]> git.ipfire.org Git - thirdparty/qemu.git/blame - cpu-exec.c
icount: Add align option to icount
[thirdparty/qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
5638d180 26void cpu_loop_exit(CPUState *cpu)
e4533c7a 27{
d77953b9 28 cpu->current_tb = NULL;
6f03bef0 29 siglongjmp(cpu->jmp_env, 1);
e4533c7a 30}
bfed01fc 31
fbf9eeb3
FB
32/* exit the current TB from a signal handler. The host registers are
33 restored in a state compatible with the CPU emulator
34 */
9eff14f3 35#if defined(CONFIG_SOFTMMU)
0ea8cb88 36void cpu_resume_from_signal(CPUState *cpu, void *puc)
9eff14f3 37{
9eff14f3
BS
38 /* XXX: restore cpu registers saved in host registers */
39
27103424 40 cpu->exception_index = -1;
6f03bef0 41 siglongjmp(cpu->jmp_env, 1);
9eff14f3 42}
9eff14f3 43#endif
fbf9eeb3 44
77211379
PM
45/* Execute a TB, and fix up the CPU state afterwards if necessary */
46static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
47{
48 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
49 uintptr_t next_tb;
50
51#if defined(DEBUG_DISAS)
52 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
53#if defined(TARGET_I386)
54 log_cpu_state(cpu, CPU_DUMP_CCOP);
55#elif defined(TARGET_M68K)
56 /* ??? Should not modify env state for dumping. */
57 cpu_m68k_flush_flags(env, env->cc_op);
58 env->cc_op = CC_OP_FLAGS;
59 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
60 log_cpu_state(cpu, 0);
61#else
62 log_cpu_state(cpu, 0);
63#endif
64 }
65#endif /* DEBUG_DISAS */
66
67 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
68 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
69 /* We didn't start executing this TB (eg because the instruction
70 * counter hit zero); we must restore the guest PC to the address
71 * of the start of the TB.
72 */
bdf7ae5b 73 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 74 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
75 if (cc->synchronize_from_tb) {
76 cc->synchronize_from_tb(cpu, tb);
77 } else {
78 assert(cc->set_pc);
79 cc->set_pc(cpu, tb->pc);
80 }
77211379 81 }
378df4b2
PM
82 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
83 /* We were asked to stop executing TBs (probably a pending
84 * interrupt. We've now stopped, so clear the flag.
85 */
86 cpu->tcg_exit_req = 0;
87 }
77211379
PM
88 return next_tb;
89}
90
2e70f6ef
PB
91/* Execute the code without caching the generated code. An interpreter
92 could be used if available. */
9349b4f9 93static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 94 TranslationBlock *orig_tb)
2e70f6ef 95{
d77953b9 96 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
97 TranslationBlock *tb;
98
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
103
648f034c 104 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
2e70f6ef 105 max_cycles);
d77953b9 106 cpu->current_tb = tb;
2e70f6ef 107 /* execute the generated code */
77211379 108 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 109 cpu->current_tb = NULL;
2e70f6ef
PB
110 tb_phys_invalidate(tb, -1);
111 tb_free(tb);
112}
113
9349b4f9 114static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 115 target_ulong pc,
8a40a180 116 target_ulong cs_base,
c068688b 117 uint64_t flags)
8a40a180 118{
8cd70437 119 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 120 TranslationBlock *tb, **ptb1;
8a40a180 121 unsigned int h;
337fc758 122 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 123 target_ulong virt_page2;
3b46e624 124
5e5f07e0 125 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 126
8a40a180 127 /* find translated block using physical mappings */
41c1b1c9 128 phys_pc = get_page_addr_code(env, pc);
8a40a180 129 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 130 h = tb_phys_hash_func(phys_pc);
5e5f07e0 131 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
132 for(;;) {
133 tb = *ptb1;
134 if (!tb)
135 goto not_found;
5fafdf24 136 if (tb->pc == pc &&
8a40a180 137 tb->page_addr[0] == phys_page1 &&
5fafdf24 138 tb->cs_base == cs_base &&
8a40a180
FB
139 tb->flags == flags) {
140 /* check next page if needed */
141 if (tb->page_addr[1] != -1) {
337fc758
BS
142 tb_page_addr_t phys_page2;
143
5fafdf24 144 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 145 TARGET_PAGE_SIZE;
41c1b1c9 146 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
147 if (tb->page_addr[1] == phys_page2)
148 goto found;
149 } else {
150 goto found;
151 }
152 }
153 ptb1 = &tb->phys_hash_next;
154 }
155 not_found:
2e70f6ef 156 /* if no translated code available, then translate it now */
648f034c 157 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
3b46e624 158
8a40a180 159 found:
2c90fe2b
KB
160 /* Move the last found TB to the head of the list */
161 if (likely(*ptb1)) {
162 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
163 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
164 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 165 }
8a40a180 166 /* we add the TB in the virtual pc hash table */
8cd70437 167 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
168 return tb;
169}
170
9349b4f9 171static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 172{
8cd70437 173 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
6b917547 176 int flags;
8a40a180
FB
177
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
6b917547 181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 182 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
cea5f9a2 185 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
186 }
187 return tb;
188}
189
1009d2ed
JK
190static CPUDebugExcpHandler *debug_excp_handler;
191
84e3b602 192void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 193{
1009d2ed 194 debug_excp_handler = handler;
1009d2ed
JK
195}
196
9349b4f9 197static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed 198{
ff4700b0 199 CPUState *cpu = ENV_GET_CPU(env);
1009d2ed
JK
200 CPUWatchpoint *wp;
201
ff4700b0
AF
202 if (!cpu->watchpoint_hit) {
203 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
204 wp->flags &= ~BP_WATCHPOINT_HIT;
205 }
206 }
207 if (debug_excp_handler) {
208 debug_excp_handler(env);
209 }
210}
211
7d13299d
FB
212/* main execution loop */
213
1a28cac3
MT
214volatile sig_atomic_t exit_request;
215
9349b4f9 216int cpu_exec(CPUArchState *env)
7d13299d 217{
c356a1bc 218 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
219#if !(defined(CONFIG_USER_ONLY) && \
220 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
222#endif
223#ifdef TARGET_I386
224 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 225#endif
8a40a180 226 int ret, interrupt_request;
8a40a180 227 TranslationBlock *tb;
c27004ec 228 uint8_t *tc_ptr;
3e9bd63a 229 uintptr_t next_tb;
bae2c270
PM
230 /* This must be volatile so it is not trashed by longjmp() */
231 volatile bool have_tb_lock = false;
8c6939c0 232
259186a7 233 if (cpu->halted) {
3993c6bd 234 if (!cpu_has_work(cpu)) {
eda48c34
PB
235 return EXCP_HALTED;
236 }
237
259186a7 238 cpu->halted = 0;
eda48c34 239 }
5a1e3cfc 240
4917cf44 241 current_cpu = cpu;
e4533c7a 242
4917cf44 243 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
244 * requests by other threads to exit the execution loop are expected to
245 * be issued using the exit_request global. We must make sure that our
4917cf44 246 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
247 * value transition point, which requires a memory barrier as well as
248 * an instruction scheduling constraint on modern architectures. */
249 smp_mb();
250
c629a4bc 251 if (unlikely(exit_request)) {
fcd7d003 252 cpu->exit_request = 1;
1a28cac3
MT
253 }
254
ecb644f4 255#if defined(TARGET_I386)
6792a57b
JK
256 /* put eflags in CPU temporary format */
257 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 258 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
259 CC_OP = CC_OP_EFLAGS;
260 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 261#elif defined(TARGET_SPARC)
e6e5906b
PB
262#elif defined(TARGET_M68K)
263 env->cc_op = CC_OP_FLAGS;
264 env->cc_dest = env->sr & 0xf;
265 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
266#elif defined(TARGET_ALPHA)
267#elif defined(TARGET_ARM)
d2fbca94 268#elif defined(TARGET_UNICORE32)
ecb644f4 269#elif defined(TARGET_PPC)
4e85f82c 270 env->reserve_addr = -1;
81ea0e13 271#elif defined(TARGET_LM32)
b779e29e 272#elif defined(TARGET_MICROBLAZE)
6af0bf9c 273#elif defined(TARGET_MIPS)
d15a9c23 274#elif defined(TARGET_MOXIE)
e67db06e 275#elif defined(TARGET_OPENRISC)
fdf9b3e8 276#elif defined(TARGET_SH4)
f1ccf904 277#elif defined(TARGET_CRIS)
10ec5117 278#elif defined(TARGET_S390X)
2328826b 279#elif defined(TARGET_XTENSA)
fdf9b3e8 280 /* XXXXX */
e4533c7a
FB
281#else
282#error unsupported target CPU
283#endif
27103424 284 cpu->exception_index = -1;
9d27abd9 285
7d13299d 286 /* prepare setjmp context for exception handling */
3fb2ded1 287 for(;;) {
6f03bef0 288 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 289 /* if an exception is pending, we execute it here */
27103424
AF
290 if (cpu->exception_index >= 0) {
291 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 292 /* exit request from the cpu execution loop */
27103424 293 ret = cpu->exception_index;
1009d2ed
JK
294 if (ret == EXCP_DEBUG) {
295 cpu_handle_debug_exception(env);
296 }
3fb2ded1 297 break;
72d239ed
AJ
298 } else {
299#if defined(CONFIG_USER_ONLY)
3fb2ded1 300 /* if user mode only, we simulate a fake exception
9f083493 301 which will be handled outside the cpu execution
3fb2ded1 302 loop */
83479e77 303#if defined(TARGET_I386)
97a8ea5a 304 cc->do_interrupt(cpu);
83479e77 305#endif
27103424 306 ret = cpu->exception_index;
3fb2ded1 307 break;
72d239ed 308#else
97a8ea5a 309 cc->do_interrupt(cpu);
27103424 310 cpu->exception_index = -1;
83479e77 311#endif
3fb2ded1 312 }
5fafdf24 313 }
9df217a3 314
b5fc09ae 315 next_tb = 0; /* force lookup of first TB */
3fb2ded1 316 for(;;) {
259186a7 317 interrupt_request = cpu->interrupt_request;
e1638bd8 318 if (unlikely(interrupt_request)) {
ed2803da 319 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 320 /* Mask out external interrupts for this step. */
3125f763 321 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 322 }
6658ffb8 323 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 324 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 325 cpu->exception_index = EXCP_DEBUG;
5638d180 326 cpu_loop_exit(cpu);
6658ffb8 327 }
a90b7318 328#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 331 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
332 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
333 cpu->halted = 1;
27103424 334 cpu->exception_index = EXCP_HLT;
5638d180 335 cpu_loop_exit(cpu);
a90b7318
AZ
336 }
337#endif
4a92a558
PB
338#if defined(TARGET_I386)
339 if (interrupt_request & CPU_INTERRUPT_INIT) {
340 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
341 do_cpu_init(x86_cpu);
342 cpu->exception_index = EXCP_HALTED;
343 cpu_loop_exit(cpu);
344 }
345#else
346 if (interrupt_request & CPU_INTERRUPT_RESET) {
347 cpu_reset(cpu);
348 }
349#endif
68a79315 350#if defined(TARGET_I386)
5d62c43a
JK
351#if !defined(CONFIG_USER_ONLY)
352 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 353 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 354 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
355 }
356#endif
4a92a558 357 if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 358 do_cpu_sipi(x86_cpu);
b09ea7d5 359 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
360 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
361 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
362 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
363 0);
259186a7 364 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 365 do_smm_enter(x86_cpu);
db620f46
FB
366 next_tb = 0;
367 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
368 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 369 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 370 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 371 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 372 next_tb = 0;
e965fc38 373 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 374 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 375 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 376 next_tb = 0;
db620f46
FB
377 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
378 (((env->hflags2 & HF2_VINTR_MASK) &&
379 (env->hflags2 & HF2_HIF_MASK)) ||
380 (!(env->hflags2 & HF2_VINTR_MASK) &&
381 (env->eflags & IF_MASK &&
382 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
383 int intno;
77b2bc2c
BS
384 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
385 0);
259186a7
AF
386 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
387 CPU_INTERRUPT_VIRQ);
db620f46 388 intno = cpu_get_pic_interrupt(env);
4f213879 389 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
390 do_interrupt_x86_hardirq(env, intno, 1);
391 /* ensure that no TB jump will be modified as
392 the program flow was changed */
393 next_tb = 0;
0573fbfc 394#if !defined(CONFIG_USER_ONLY)
db620f46
FB
395 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
396 (env->eflags & IF_MASK) &&
397 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
398 int intno;
399 /* FIXME: this should respect TPR */
77b2bc2c
BS
400 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
401 0);
fdfba1a2
EI
402 intno = ldl_phys(cpu->as,
403 env->vm_vmcb
404 + offsetof(struct vmcb,
405 control.int_vector));
93fcfe39 406 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 407 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 408 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 409 next_tb = 0;
907a5b26 410#endif
db620f46 411 }
68a79315 412 }
ce09776b 413#elif defined(TARGET_PPC)
47103572 414 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 415 ppc_hw_interrupt(env);
259186a7
AF
416 if (env->pending_interrupts == 0) {
417 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
418 }
b5fc09ae 419 next_tb = 0;
ce09776b 420 }
81ea0e13
MW
421#elif defined(TARGET_LM32)
422 if ((interrupt_request & CPU_INTERRUPT_HARD)
423 && (env->ie & IE_IE)) {
27103424 424 cpu->exception_index = EXCP_IRQ;
97a8ea5a 425 cc->do_interrupt(cpu);
81ea0e13
MW
426 next_tb = 0;
427 }
b779e29e
EI
428#elif defined(TARGET_MICROBLAZE)
429 if ((interrupt_request & CPU_INTERRUPT_HARD)
430 && (env->sregs[SR_MSR] & MSR_IE)
431 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
432 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 433 cpu->exception_index = EXCP_IRQ;
97a8ea5a 434 cc->do_interrupt(cpu);
b779e29e
EI
435 next_tb = 0;
436 }
6af0bf9c
FB
437#elif defined(TARGET_MIPS)
438 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 439 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 440 /* Raise it */
27103424 441 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 442 env->error_code = 0;
97a8ea5a 443 cc->do_interrupt(cpu);
b5fc09ae 444 next_tb = 0;
6af0bf9c 445 }
b6a71ef7
JL
446#elif defined(TARGET_OPENRISC)
447 {
448 int idx = -1;
449 if ((interrupt_request & CPU_INTERRUPT_HARD)
450 && (env->sr & SR_IEE)) {
451 idx = EXCP_INT;
452 }
453 if ((interrupt_request & CPU_INTERRUPT_TIMER)
454 && (env->sr & SR_TEE)) {
455 idx = EXCP_TICK;
456 }
457 if (idx >= 0) {
27103424 458 cpu->exception_index = idx;
97a8ea5a 459 cc->do_interrupt(cpu);
b6a71ef7
JL
460 next_tb = 0;
461 }
462 }
e95c8d51 463#elif defined(TARGET_SPARC)
d532b26c
IK
464 if (interrupt_request & CPU_INTERRUPT_HARD) {
465 if (cpu_interrupts_enabled(env) &&
466 env->interrupt_index > 0) {
467 int pil = env->interrupt_index & 0xf;
468 int type = env->interrupt_index & 0xf0;
469
470 if (((type == TT_EXTINT) &&
471 cpu_pil_allowed(env, pil)) ||
472 type != TT_EXTINT) {
27103424 473 cpu->exception_index = env->interrupt_index;
97a8ea5a 474 cc->do_interrupt(cpu);
d532b26c
IK
475 next_tb = 0;
476 }
477 }
e965fc38 478 }
b5ff1b31
FB
479#elif defined(TARGET_ARM)
480 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 481 && !(env->daif & PSTATE_F)) {
27103424 482 cpu->exception_index = EXCP_FIQ;
97a8ea5a 483 cc->do_interrupt(cpu);
b5fc09ae 484 next_tb = 0;
b5ff1b31 485 }
9ee6e8bb
PB
486 /* ARMv7-M interrupt return works by loading a magic value
487 into the PC. On real hardware the load causes the
488 return to occur. The qemu implementation performs the
489 jump normally, then does the exception return when the
490 CPU tries to execute code at the magic address.
491 This will cause the magic PC value to be pushed to
a1c7273b 492 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
493 We avoid this by disabling interrupts when
494 pc contains a magic address. */
b5ff1b31 495 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 496 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 497 || !(env->daif & PSTATE_I))) {
27103424 498 cpu->exception_index = EXCP_IRQ;
97a8ea5a 499 cc->do_interrupt(cpu);
b5fc09ae 500 next_tb = 0;
b5ff1b31 501 }
d2fbca94
GX
502#elif defined(TARGET_UNICORE32)
503 if (interrupt_request & CPU_INTERRUPT_HARD
504 && !(env->uncached_asr & ASR_I)) {
27103424 505 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 506 cc->do_interrupt(cpu);
d2fbca94
GX
507 next_tb = 0;
508 }
fdf9b3e8 509#elif defined(TARGET_SH4)
e96e2044 510 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 511 cc->do_interrupt(cpu);
b5fc09ae 512 next_tb = 0;
e96e2044 513 }
eddf68a6 514#elif defined(TARGET_ALPHA)
6a80e088
RH
515 {
516 int idx = -1;
517 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 518 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
519 case 0 ... 3:
520 if (interrupt_request & CPU_INTERRUPT_HARD) {
521 idx = EXCP_DEV_INTERRUPT;
522 }
523 /* FALLTHRU */
524 case 4:
525 if (interrupt_request & CPU_INTERRUPT_TIMER) {
526 idx = EXCP_CLK_INTERRUPT;
527 }
528 /* FALLTHRU */
529 case 5:
530 if (interrupt_request & CPU_INTERRUPT_SMP) {
531 idx = EXCP_SMP_INTERRUPT;
532 }
533 /* FALLTHRU */
534 case 6:
535 if (interrupt_request & CPU_INTERRUPT_MCHK) {
536 idx = EXCP_MCHK;
537 }
538 }
539 if (idx >= 0) {
27103424 540 cpu->exception_index = idx;
6a80e088 541 env->error_code = 0;
97a8ea5a 542 cc->do_interrupt(cpu);
6a80e088
RH
543 next_tb = 0;
544 }
eddf68a6 545 }
f1ccf904 546#elif defined(TARGET_CRIS)
1b1a38b0 547 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
548 && (env->pregs[PR_CCS] & I_FLAG)
549 && !env->locked_irq) {
27103424 550 cpu->exception_index = EXCP_IRQ;
97a8ea5a 551 cc->do_interrupt(cpu);
1b1a38b0
EI
552 next_tb = 0;
553 }
8219314b
LP
554 if (interrupt_request & CPU_INTERRUPT_NMI) {
555 unsigned int m_flag_archval;
556 if (env->pregs[PR_VR] < 32) {
557 m_flag_archval = M_FLAG_V10;
558 } else {
559 m_flag_archval = M_FLAG_V32;
560 }
561 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 562 cpu->exception_index = EXCP_NMI;
97a8ea5a 563 cc->do_interrupt(cpu);
8219314b
LP
564 next_tb = 0;
565 }
f1ccf904 566 }
0633879f
PB
567#elif defined(TARGET_M68K)
568 if (interrupt_request & CPU_INTERRUPT_HARD
569 && ((env->sr & SR_I) >> SR_I_SHIFT)
570 < env->pending_level) {
571 /* Real hardware gets the interrupt vector via an
572 IACK cycle at this point. Current emulated
573 hardware doesn't rely on this, so we
574 provide/save the vector when the interrupt is
575 first signalled. */
27103424 576 cpu->exception_index = env->pending_vector;
3c688828 577 do_interrupt_m68k_hardirq(env);
b5fc09ae 578 next_tb = 0;
0633879f 579 }
3110e292
AG
580#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
581 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
582 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 583 cc->do_interrupt(cpu);
3110e292
AG
584 next_tb = 0;
585 }
40643d7c
MF
586#elif defined(TARGET_XTENSA)
587 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 588 cpu->exception_index = EXC_IRQ;
97a8ea5a 589 cc->do_interrupt(cpu);
40643d7c
MF
590 next_tb = 0;
591 }
68a79315 592#endif
ff2712ba 593 /* Don't use the cached interrupt_request value,
9d05095e 594 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
595 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
596 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
597 /* ensure that no TB jump will be modified as
598 the program flow was changed */
b5fc09ae 599 next_tb = 0;
bf3e8bf1 600 }
be214e6c 601 }
fcd7d003
AF
602 if (unlikely(cpu->exit_request)) {
603 cpu->exit_request = 0;
27103424 604 cpu->exception_index = EXCP_INTERRUPT;
5638d180 605 cpu_loop_exit(cpu);
3fb2ded1 606 }
5e5f07e0 607 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
bae2c270 608 have_tb_lock = true;
cea5f9a2 609 tb = tb_find_fast(env);
d5975363
PB
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
5e5f07e0 612 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
616 next_tb = 0;
5e5f07e0 617 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 618 }
c30d1aea
PM
619 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
620 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
621 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
622 }
8a40a180
FB
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
625 jump. */
040f2fb2 626 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
627 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
628 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 629 }
bae2c270 630 have_tb_lock = false;
5e5f07e0 631 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 632
633 /* cpu_interrupt might be called while translating the
634 TB, but before it is linked into a potentially
635 infinite loop and becomes env->current_tb. Avoid
636 starting execution if there is a pending interrupt. */
d77953b9 637 cpu->current_tb = tb;
b0052d15 638 barrier();
fcd7d003 639 if (likely(!cpu->exit_request)) {
2e70f6ef 640 tc_ptr = tb->tc_ptr;
e965fc38 641 /* execute the generated code */
77211379 642 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
643 switch (next_tb & TB_EXIT_MASK) {
644 case TB_EXIT_REQUESTED:
645 /* Something asked us to stop executing
646 * chained TBs; just continue round the main
647 * loop. Whatever requested the exit will also
648 * have set something else (eg exit_request or
649 * interrupt_request) which we will handle
650 * next time around the loop.
651 */
652 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
653 next_tb = 0;
654 break;
655 case TB_EXIT_ICOUNT_EXPIRED:
656 {
bf20dc07 657 /* Instruction counter expired. */
2e70f6ef 658 int insns_left;
0980011b 659 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 660 insns_left = cpu->icount_decr.u32;
efee7340 661 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 662 /* Refill decrementer and continue execution. */
efee7340
AF
663 cpu->icount_extra += insns_left;
664 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
665 insns_left = 0xffff;
666 } else {
efee7340 667 insns_left = cpu->icount_extra;
2e70f6ef 668 }
efee7340 669 cpu->icount_extra -= insns_left;
28ecfd7a 670 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
671 } else {
672 if (insns_left > 0) {
673 /* Execute remaining instructions. */
cea5f9a2 674 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef 675 }
27103424 676 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 677 next_tb = 0;
5638d180 678 cpu_loop_exit(cpu);
2e70f6ef 679 }
378df4b2
PM
680 break;
681 }
682 default:
683 break;
2e70f6ef
PB
684 }
685 }
d77953b9 686 cpu->current_tb = NULL;
4cbf74b6
FB
687 /* reset soft MMU for next block (it can currently
688 only be set by a memory fault) */
50a518e3 689 } /* for(;;) */
0d101938
JK
690 } else {
691 /* Reload env after longjmp - the compiler may have smashed all
692 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
693 cpu = current_cpu;
694 env = cpu->env_ptr;
6c78f29a
JL
695#if !(defined(CONFIG_USER_ONLY) && \
696 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
697 cc = CPU_GET_CLASS(cpu);
693fa551
AF
698#endif
699#ifdef TARGET_I386
700 x86_cpu = X86_CPU(cpu);
6c78f29a 701#endif
bae2c270
PM
702 if (have_tb_lock) {
703 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
704 have_tb_lock = false;
705 }
7d13299d 706 }
3fb2ded1
FB
707 } /* for(;;) */
708
7d13299d 709
e4533c7a 710#if defined(TARGET_I386)
9de5e440 711 /* restore flags in standard format */
e694d4e2 712 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 713 | (env->df & DF_MASK);
e4533c7a 714#elif defined(TARGET_ARM)
b7bcbe95 715 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 716#elif defined(TARGET_UNICORE32)
93ac68bc 717#elif defined(TARGET_SPARC)
67867308 718#elif defined(TARGET_PPC)
81ea0e13 719#elif defined(TARGET_LM32)
e6e5906b
PB
720#elif defined(TARGET_M68K)
721 cpu_m68k_flush_flags(env, env->cc_op);
722 env->cc_op = CC_OP_FLAGS;
723 env->sr = (env->sr & 0xffe0)
724 | env->cc_dest | (env->cc_x << 4);
b779e29e 725#elif defined(TARGET_MICROBLAZE)
6af0bf9c 726#elif defined(TARGET_MIPS)
d15a9c23 727#elif defined(TARGET_MOXIE)
e67db06e 728#elif defined(TARGET_OPENRISC)
fdf9b3e8 729#elif defined(TARGET_SH4)
eddf68a6 730#elif defined(TARGET_ALPHA)
f1ccf904 731#elif defined(TARGET_CRIS)
10ec5117 732#elif defined(TARGET_S390X)
2328826b 733#elif defined(TARGET_XTENSA)
fdf9b3e8 734 /* XXXXX */
e4533c7a
FB
735#else
736#error unsupported target CPU
737#endif
1057eaa7 738
4917cf44
AF
739 /* fail safe : never use current_cpu outside cpu_exec() */
740 current_cpu = NULL;
7d13299d
FB
741 return ret;
742}