]> git.ipfire.org Git - thirdparty/qemu.git/blame - cpu-exec.c
Silence compiler warning.
[thirdparty/qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54
FB
46int tb_invalidated_flag;
47
dc99065b 48//#define DEBUG_EXEC
9de5e440 49//#define DEBUG_SIGNAL
7d13299d 50
e4533c7a
FB
51void cpu_loop_exit(void)
52{
bfed01fc
TS
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
e4533c7a
FB
56 longjmp(env->jmp_env, 1);
57}
bfed01fc 58
e6e5906b 59#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
60#define reg_T2
61#endif
e4533c7a 62
fbf9eeb3
FB
63/* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
65 */
5fafdf24 66void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
67{
68#if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70#endif
71
72 env = env1;
73
74 /* XXX: restore cpu registers saved in host registers */
75
76#if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
80 }
81#endif
82 longjmp(env->jmp_env, 1);
83}
84
2e70f6ef
PB
85/* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
88{
89 unsigned long next_tb;
90 TranslationBlock *tb;
91
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles > CF_COUNT_MASK)
95 max_cycles = CF_COUNT_MASK;
96
97 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
98 max_cycles);
99 env->current_tb = tb;
100 /* execute the generated code */
101 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
102
103 if ((next_tb & 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env, tb);
107 }
108 tb_phys_invalidate(tb, -1);
109 tb_free(tb);
110}
111
8a40a180
FB
112static TranslationBlock *tb_find_slow(target_ulong pc,
113 target_ulong cs_base,
c068688b 114 uint64_t flags)
8a40a180
FB
115{
116 TranslationBlock *tb, **ptb1;
8a40a180
FB
117 unsigned int h;
118 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 119
8a40a180 120 tb_invalidated_flag = 0;
3b46e624 121
8a40a180 122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 123
8a40a180
FB
124 /* find translated block using physical mappings */
125 phys_pc = get_phys_addr_code(env, pc);
126 phys_page1 = phys_pc & TARGET_PAGE_MASK;
127 phys_page2 = -1;
128 h = tb_phys_hash_func(phys_pc);
129 ptb1 = &tb_phys_hash[h];
130 for(;;) {
131 tb = *ptb1;
132 if (!tb)
133 goto not_found;
5fafdf24 134 if (tb->pc == pc &&
8a40a180 135 tb->page_addr[0] == phys_page1 &&
5fafdf24 136 tb->cs_base == cs_base &&
8a40a180
FB
137 tb->flags == flags) {
138 /* check next page if needed */
139 if (tb->page_addr[1] != -1) {
5fafdf24 140 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
141 TARGET_PAGE_SIZE;
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 if (tb->page_addr[1] == phys_page2)
144 goto found;
145 } else {
146 goto found;
147 }
148 }
149 ptb1 = &tb->phys_hash_next;
150 }
151 not_found:
2e70f6ef
PB
152 /* if no translated code available, then translate it now */
153 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 154
8a40a180 155 found:
8a40a180
FB
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
158 return tb;
159}
160
161static inline TranslationBlock *tb_find_fast(void)
162{
163 TranslationBlock *tb;
164 target_ulong cs_base, pc;
c068688b 165 uint64_t flags;
8a40a180
FB
166
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
169 is executed. */
170#if defined(TARGET_I386)
171 flags = env->hflags;
172 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
173 cs_base = env->segs[R_CS].base;
174 pc = cs_base + env->eip;
175#elif defined(TARGET_ARM)
176 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
177 | (env->vfp.vec_stride << 4);
178 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
179 flags |= (1 << 6);
40f137e1
PB
180 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
181 flags |= (1 << 7);
9ee6e8bb 182 flags |= (env->condexec_bits << 8);
8a40a180
FB
183 cs_base = 0;
184 pc = env->regs[15];
185#elif defined(TARGET_SPARC)
186#ifdef TARGET_SPARC64
a80dde08
FB
187 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
189 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 190#else
6d5f237a
BS
191 // FPU enable . Supervisor
192 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
193#endif
194 cs_base = env->npc;
195 pc = env->pc;
196#elif defined(TARGET_PPC)
1527c87e 197 flags = env->hflags;
8a40a180
FB
198 cs_base = 0;
199 pc = env->nip;
200#elif defined(TARGET_MIPS)
56b19403 201 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 202 cs_base = 0;
b5dc7732 203 pc = env->active_tc.PC;
e6e5906b 204#elif defined(TARGET_M68K)
acf930aa
PB
205 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
206 | (env->sr & SR_S) /* Bit 13 */
207 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
208 cs_base = 0;
209 pc = env->pc;
fdf9b3e8 210#elif defined(TARGET_SH4)
823029f9
TS
211 flags = env->flags;
212 cs_base = 0;
fdf9b3e8 213 pc = env->pc;
eddf68a6
JM
214#elif defined(TARGET_ALPHA)
215 flags = env->ps;
216 cs_base = 0;
217 pc = env->pc;
f1ccf904 218#elif defined(TARGET_CRIS)
7e15e603 219 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
cf1d97f0 220 flags |= env->dslot;
f1ccf904
TS
221 cs_base = 0;
222 pc = env->pc;
8a40a180
FB
223#else
224#error unsupported CPU
225#endif
bce61846 226 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
227 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
228 tb->flags != flags, 0)) {
229 tb = tb_find_slow(pc, cs_base, flags);
230 }
231 return tb;
232}
233
7d13299d
FB
234/* main execution loop */
235
e4533c7a 236int cpu_exec(CPUState *env1)
7d13299d 237{
1057eaa7
PB
238#define DECLARE_HOST_REGS 1
239#include "hostregs_helper.h"
8a40a180 240 int ret, interrupt_request;
8a40a180 241 TranslationBlock *tb;
c27004ec 242 uint8_t *tc_ptr;
d5975363 243 unsigned long next_tb;
8c6939c0 244
bfed01fc
TS
245 if (cpu_halted(env1) == EXCP_HALTED)
246 return EXCP_HALTED;
5a1e3cfc 247
5fafdf24 248 cpu_single_env = env1;
6a00d601 249
7d13299d 250 /* first we save global registers */
1057eaa7
PB
251#define SAVE_HOST_REGS 1
252#include "hostregs_helper.h"
c27004ec 253 env = env1;
e4533c7a 254
0d1a29f9 255 env_to_regs();
ecb644f4 256#if defined(TARGET_I386)
9de5e440 257 /* put eflags in CPU temporary format */
fc2b4c48
FB
258 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 260 CC_OP = CC_OP_EFLAGS;
fc2b4c48 261 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 262#elif defined(TARGET_SPARC)
e6e5906b
PB
263#elif defined(TARGET_M68K)
264 env->cc_op = CC_OP_FLAGS;
265 env->cc_dest = env->sr & 0xf;
266 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
267#elif defined(TARGET_ALPHA)
268#elif defined(TARGET_ARM)
269#elif defined(TARGET_PPC)
6af0bf9c 270#elif defined(TARGET_MIPS)
fdf9b3e8 271#elif defined(TARGET_SH4)
f1ccf904 272#elif defined(TARGET_CRIS)
fdf9b3e8 273 /* XXXXX */
e4533c7a
FB
274#else
275#error unsupported target CPU
276#endif
3fb2ded1 277 env->exception_index = -1;
9d27abd9 278
7d13299d 279 /* prepare setjmp context for exception handling */
3fb2ded1
FB
280 for(;;) {
281 if (setjmp(env->jmp_env) == 0) {
ee8b7021 282 env->current_tb = NULL;
3fb2ded1
FB
283 /* if an exception is pending, we execute it here */
284 if (env->exception_index >= 0) {
285 if (env->exception_index >= EXCP_INTERRUPT) {
286 /* exit request from the cpu execution loop */
287 ret = env->exception_index;
288 break;
289 } else if (env->user_mode_only) {
290 /* if user mode only, we simulate a fake exception
9f083493 291 which will be handled outside the cpu execution
3fb2ded1 292 loop */
83479e77 293#if defined(TARGET_I386)
5fafdf24
TS
294 do_interrupt_user(env->exception_index,
295 env->exception_is_int,
296 env->error_code,
3fb2ded1 297 env->exception_next_eip);
eba01623
FB
298 /* successfully delivered */
299 env->old_exception = -1;
83479e77 300#endif
3fb2ded1
FB
301 ret = env->exception_index;
302 break;
303 } else {
83479e77 304#if defined(TARGET_I386)
3fb2ded1
FB
305 /* simulate a real cpu exception. On i386, it can
306 trigger new exceptions, but we do not handle
307 double or triple faults yet. */
5fafdf24
TS
308 do_interrupt(env->exception_index,
309 env->exception_is_int,
310 env->error_code,
d05e66d2 311 env->exception_next_eip, 0);
678dde13
TS
312 /* successfully delivered */
313 env->old_exception = -1;
ce09776b
FB
314#elif defined(TARGET_PPC)
315 do_interrupt(env);
6af0bf9c
FB
316#elif defined(TARGET_MIPS)
317 do_interrupt(env);
e95c8d51 318#elif defined(TARGET_SPARC)
f2bc7e7f 319 do_interrupt(env);
b5ff1b31
FB
320#elif defined(TARGET_ARM)
321 do_interrupt(env);
fdf9b3e8
FB
322#elif defined(TARGET_SH4)
323 do_interrupt(env);
eddf68a6
JM
324#elif defined(TARGET_ALPHA)
325 do_interrupt(env);
f1ccf904
TS
326#elif defined(TARGET_CRIS)
327 do_interrupt(env);
0633879f
PB
328#elif defined(TARGET_M68K)
329 do_interrupt(0);
83479e77 330#endif
3fb2ded1
FB
331 }
332 env->exception_index = -1;
5fafdf24 333 }
9df217a3
FB
334#ifdef USE_KQEMU
335 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
336 int ret;
337 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
338 ret = kqemu_cpu_exec(env);
339 /* put eflags in CPU temporary format */
340 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
341 DF = 1 - (2 * ((env->eflags >> 10) & 1));
342 CC_OP = CC_OP_EFLAGS;
343 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
344 if (ret == 1) {
345 /* exception */
346 longjmp(env->jmp_env, 1);
347 } else if (ret == 2) {
348 /* softmmu execution needed */
349 } else {
350 if (env->interrupt_request != 0) {
351 /* hardware interrupt will be executed just after */
352 } else {
353 /* otherwise, we restart */
354 longjmp(env->jmp_env, 1);
355 }
356 }
3fb2ded1 357 }
9df217a3
FB
358#endif
359
b5fc09ae 360 next_tb = 0; /* force lookup of first TB */
3fb2ded1 361 for(;;) {
68a79315 362 interrupt_request = env->interrupt_request;
db620f46
FB
363 if (__builtin_expect(interrupt_request, 0) &&
364 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
365 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
366 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
367 env->exception_index = EXCP_DEBUG;
368 cpu_loop_exit();
369 }
a90b7318 370#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 371 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
372 if (interrupt_request & CPU_INTERRUPT_HALT) {
373 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
374 env->halted = 1;
375 env->exception_index = EXCP_HLT;
376 cpu_loop_exit();
377 }
378#endif
68a79315 379#if defined(TARGET_I386)
db620f46
FB
380 if (env->hflags2 & HF2_GIF_MASK) {
381 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
382 !(env->hflags & HF_SMM_MASK)) {
383 svm_check_intercept(SVM_EXIT_SMI);
384 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
385 do_smm_enter();
386 next_tb = 0;
387 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
388 !(env->hflags2 & HF2_NMI_MASK)) {
389 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
390 env->hflags2 |= HF2_NMI_MASK;
391 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394 (((env->hflags2 & HF2_VINTR_MASK) &&
395 (env->hflags2 & HF2_HIF_MASK)) ||
396 (!(env->hflags2 & HF2_VINTR_MASK) &&
397 (env->eflags & IF_MASK &&
398 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
399 int intno;
400 svm_check_intercept(SVM_EXIT_INTR);
401 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
402 intno = cpu_get_pic_interrupt(env);
403 if (loglevel & CPU_LOG_TB_IN_ASM) {
404 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
405 }
406 do_interrupt(intno, 0, 0, 0, 1);
407 /* ensure that no TB jump will be modified as
408 the program flow was changed */
409 next_tb = 0;
0573fbfc 410#if !defined(CONFIG_USER_ONLY)
db620f46
FB
411 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
412 (env->eflags & IF_MASK) &&
413 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
414 int intno;
415 /* FIXME: this should respect TPR */
416 svm_check_intercept(SVM_EXIT_VINTR);
417 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
418 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
419 if (loglevel & CPU_LOG_TB_IN_ASM)
420 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
421 do_interrupt(intno, 0, 0, 0, 1);
422 next_tb = 0;
907a5b26 423#endif
db620f46 424 }
68a79315 425 }
ce09776b 426#elif defined(TARGET_PPC)
9fddaa0c
FB
427#if 0
428 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
429 cpu_ppc_reset(env);
430 }
431#endif
47103572 432 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
433 ppc_hw_interrupt(env);
434 if (env->pending_interrupts == 0)
435 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 436 next_tb = 0;
ce09776b 437 }
6af0bf9c
FB
438#elif defined(TARGET_MIPS)
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 440 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 441 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
442 !(env->CP0_Status & (1 << CP0St_EXL)) &&
443 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
444 !(env->hflags & MIPS_HFLAG_DM)) {
445 /* Raise it */
446 env->exception_index = EXCP_EXT_INTERRUPT;
447 env->error_code = 0;
448 do_interrupt(env);
b5fc09ae 449 next_tb = 0;
6af0bf9c 450 }
e95c8d51 451#elif defined(TARGET_SPARC)
66321a11
FB
452 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->psret != 0)) {
454 int pil = env->interrupt_index & 15;
455 int type = env->interrupt_index & 0xf0;
456
457 if (((type == TT_EXTINT) &&
458 (pil == 15 || pil > env->psrpil)) ||
459 type != TT_EXTINT) {
460 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
461 env->exception_index = env->interrupt_index;
462 do_interrupt(env);
66321a11 463 env->interrupt_index = 0;
327ac2e7
BS
464#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
465 cpu_check_irqs(env);
466#endif
b5fc09ae 467 next_tb = 0;
66321a11 468 }
e95c8d51
FB
469 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
470 //do_interrupt(0, 0, 0, 0, 0);
471 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 472 }
b5ff1b31
FB
473#elif defined(TARGET_ARM)
474 if (interrupt_request & CPU_INTERRUPT_FIQ
475 && !(env->uncached_cpsr & CPSR_F)) {
476 env->exception_index = EXCP_FIQ;
477 do_interrupt(env);
b5fc09ae 478 next_tb = 0;
b5ff1b31 479 }
9ee6e8bb
PB
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occured at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
b5ff1b31 489 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
490 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
491 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
492 env->exception_index = EXCP_IRQ;
493 do_interrupt(env);
b5fc09ae 494 next_tb = 0;
b5ff1b31 495 }
fdf9b3e8 496#elif defined(TARGET_SH4)
e96e2044
TS
497 if (interrupt_request & CPU_INTERRUPT_HARD) {
498 do_interrupt(env);
b5fc09ae 499 next_tb = 0;
e96e2044 500 }
eddf68a6
JM
501#elif defined(TARGET_ALPHA)
502 if (interrupt_request & CPU_INTERRUPT_HARD) {
503 do_interrupt(env);
b5fc09ae 504 next_tb = 0;
eddf68a6 505 }
f1ccf904 506#elif defined(TARGET_CRIS)
1b1a38b0
EI
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && (env->pregs[PR_CCS] & I_FLAG)) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 next_tb = 0;
512 }
513 if (interrupt_request & CPU_INTERRUPT_NMI
514 && (env->pregs[PR_CCS] & M_FLAG)) {
515 env->exception_index = EXCP_NMI;
f1ccf904 516 do_interrupt(env);
b5fc09ae 517 next_tb = 0;
f1ccf904 518 }
0633879f
PB
519#elif defined(TARGET_M68K)
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((env->sr & SR_I) >> SR_I_SHIFT)
522 < env->pending_level) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
527 first signalled. */
528 env->exception_index = env->pending_vector;
529 do_interrupt(1);
b5fc09ae 530 next_tb = 0;
0633879f 531 }
68a79315 532#endif
9d05095e
FB
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 535 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
536 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
b5fc09ae 539 next_tb = 0;
bf3e8bf1 540 }
68a79315
FB
541 if (interrupt_request & CPU_INTERRUPT_EXIT) {
542 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
543 env->exception_index = EXCP_INTERRUPT;
544 cpu_loop_exit();
545 }
3fb2ded1 546 }
7d13299d 547#ifdef DEBUG_EXEC
b5ff1b31 548 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 549 /* restore flags in standard format */
ecb644f4
TS
550 regs_to_env();
551#if defined(TARGET_I386)
3fb2ded1 552 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 554 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 555#elif defined(TARGET_ARM)
7fe48483 556 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 557#elif defined(TARGET_SPARC)
3475187d 558 cpu_dump_state(env, logfile, fprintf, 0);
67867308 559#elif defined(TARGET_PPC)
7fe48483 560 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
561#elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
567#elif defined(TARGET_MIPS)
568 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
569#elif defined(TARGET_SH4)
570 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
571#elif defined(TARGET_ALPHA)
572 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
573#elif defined(TARGET_CRIS)
574 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 575#else
5fafdf24 576#error unsupported target CPU
e4533c7a 577#endif
3fb2ded1 578 }
7d13299d 579#endif
d5975363 580 spin_lock(&tb_lock);
8a40a180 581 tb = tb_find_fast();
d5975363
PB
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
2e70f6ef 589 tb_invalidated_flag = 0;
d5975363 590 }
9d27abd9 591#ifdef DEBUG_EXEC
c1135f61 592 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
593 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
3fb2ded1 596 }
9d27abd9 597#endif
8a40a180
FB
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
600 jump. */
c27004ec 601 {
b5fc09ae 602 if (next_tb != 0 &&
4d7a0880 603#ifdef USE_KQEMU
f32fc648
FB
604 (env->kqemu_enabled != 2) &&
605#endif
ec6338ba 606 tb->page_addr[1] == -1) {
b5fc09ae 607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 608 }
c27004ec 609 }
d5975363 610 spin_unlock(&tb_lock);
83479e77 611 env->current_tb = tb;
2e70f6ef
PB
612 while (env->current_tb) {
613 tc_ptr = tb->tc_ptr;
3fb2ded1 614 /* execute the generated code */
572a9d4a
BS
615#if defined(__sparc__) && !defined(HOST_SOLARIS)
616#undef env
2e70f6ef 617 env = cpu_single_env;
572a9d4a
BS
618#define env cpu_single_env
619#endif
2e70f6ef
PB
620 next_tb = tcg_qemu_tb_exec(tc_ptr);
621 env->current_tb = NULL;
622 if ((next_tb & 3) == 2) {
bf20dc07 623 /* Instruction counter expired. */
2e70f6ef
PB
624 int insns_left;
625 tb = (TranslationBlock *)(long)(next_tb & ~3);
626 /* Restore PC. */
627 CPU_PC_FROM_TB(env, tb);
628 insns_left = env->icount_decr.u32;
629 if (env->icount_extra && insns_left >= 0) {
630 /* Refill decrementer and continue execution. */
631 env->icount_extra += insns_left;
632 if (env->icount_extra > 0xffff) {
633 insns_left = 0xffff;
634 } else {
635 insns_left = env->icount_extra;
636 }
637 env->icount_extra -= insns_left;
638 env->icount_decr.u16.low = insns_left;
639 } else {
640 if (insns_left > 0) {
641 /* Execute remaining instructions. */
642 cpu_exec_nocache(insns_left, tb);
643 }
644 env->exception_index = EXCP_INTERRUPT;
645 next_tb = 0;
646 cpu_loop_exit();
647 }
648 }
649 }
4cbf74b6
FB
650 /* reset soft MMU for next block (it can currently
651 only be set by a memory fault) */
f32fc648
FB
652#if defined(USE_KQEMU)
653#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
654 if (kqemu_is_ok(env) &&
655 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
656 cpu_loop_exit();
657 }
4cbf74b6 658#endif
50a518e3 659 } /* for(;;) */
3fb2ded1 660 } else {
0d1a29f9 661 env_to_regs();
7d13299d 662 }
3fb2ded1
FB
663 } /* for(;;) */
664
7d13299d 665
e4533c7a 666#if defined(TARGET_I386)
9de5e440 667 /* restore flags in standard format */
fc2b4c48 668 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 669#elif defined(TARGET_ARM)
b7bcbe95 670 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 671#elif defined(TARGET_SPARC)
67867308 672#elif defined(TARGET_PPC)
e6e5906b
PB
673#elif defined(TARGET_M68K)
674 cpu_m68k_flush_flags(env, env->cc_op);
675 env->cc_op = CC_OP_FLAGS;
676 env->sr = (env->sr & 0xffe0)
677 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 678#elif defined(TARGET_MIPS)
fdf9b3e8 679#elif defined(TARGET_SH4)
eddf68a6 680#elif defined(TARGET_ALPHA)
f1ccf904 681#elif defined(TARGET_CRIS)
fdf9b3e8 682 /* XXXXX */
e4533c7a
FB
683#else
684#error unsupported target CPU
685#endif
1057eaa7
PB
686
687 /* restore global registers */
1057eaa7
PB
688#include "hostregs_helper.h"
689
6a00d601 690 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 691 cpu_single_env = NULL;
7d13299d
FB
692 return ret;
693}
6dbad63e 694
fbf9eeb3
FB
695/* must only be called from the generated code as an exception can be
696 generated */
697void tb_invalidate_page_range(target_ulong start, target_ulong end)
698{
dc5d0b3d
FB
699 /* XXX: cannot enable it yet because it yields to MMU exception
700 where NIP != read address on PowerPC */
701#if 0
fbf9eeb3
FB
702 target_ulong phys_addr;
703 phys_addr = get_phys_addr_code(env, start);
704 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 705#endif
fbf9eeb3
FB
706}
707
1a18c71b 708#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 709
6dbad63e
FB
710void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
711{
712 CPUX86State *saved_env;
713
714 saved_env = env;
715 env = s;
a412ac57 716 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 717 selector &= 0xffff;
5fafdf24 718 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 719 (selector << 4), 0xffff, 0);
a513fe19 720 } else {
5d97559d 721 helper_load_seg(seg_reg, selector);
a513fe19 722 }
6dbad63e
FB
723 env = saved_env;
724}
9de5e440 725
6f12a2a6 726void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
727{
728 CPUX86State *saved_env;
729
730 saved_env = env;
731 env = s;
3b46e624 732
6f12a2a6 733 helper_fsave(ptr, data32);
d0a1ffc9
FB
734
735 env = saved_env;
736}
737
6f12a2a6 738void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
739{
740 CPUX86State *saved_env;
741
742 saved_env = env;
743 env = s;
3b46e624 744
6f12a2a6 745 helper_frstor(ptr, data32);
d0a1ffc9
FB
746
747 env = saved_env;
748}
749
e4533c7a
FB
750#endif /* TARGET_I386 */
751
67b915a5
FB
752#if !defined(CONFIG_SOFTMMU)
753
3fb2ded1
FB
754#if defined(TARGET_I386)
755
b56dad1c 756/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
757 the effective address of the memory exception. 'is_write' is 1 if a
758 write caused the exception and otherwise 0'. 'old_set' is the
759 signal set which should be restored */
2b413144 760static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 761 int is_write, sigset_t *old_set,
bf3e8bf1 762 void *puc)
9de5e440 763{
a513fe19
FB
764 TranslationBlock *tb;
765 int ret;
68a79315 766
83479e77
FB
767 if (cpu_single_env)
768 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 769#if defined(DEBUG_SIGNAL)
5fafdf24 770 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 771 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 772#endif
25eb4484 773 /* XXX: locking issue */
53a5960a 774 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
775 return 1;
776 }
fbf9eeb3 777
3fb2ded1 778 /* see if it is an MMU fault */
6ebbf390 779 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
780 if (ret < 0)
781 return 0; /* not an MMU fault */
782 if (ret == 0)
783 return 1; /* the MMU fault was handled without causing real CPU fault */
784 /* now we have a real cpu fault */
a513fe19
FB
785 tb = tb_find_pc(pc);
786 if (tb) {
9de5e440
FB
787 /* the PC is inside the translated code. It means that we have
788 a virtual CPU fault */
bf3e8bf1 789 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 790 }
4cbf74b6 791 if (ret == 1) {
3fb2ded1 792#if 0
5fafdf24 793 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 794 env->eip, env->cr[2], env->error_code);
3fb2ded1 795#endif
4cbf74b6
FB
796 /* we restore the process signal mask as the sigreturn should
797 do it (XXX: use sigsetjmp) */
798 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 799 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
800 } else {
801 /* activate soft MMU for this block */
3f337316 802 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 803 cpu_resume_from_signal(env, puc);
4cbf74b6 804 }
3fb2ded1
FB
805 /* never comes here */
806 return 1;
807}
808
e4533c7a 809#elif defined(TARGET_ARM)
3fb2ded1 810static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
811 int is_write, sigset_t *old_set,
812 void *puc)
3fb2ded1 813{
68016c62
FB
814 TranslationBlock *tb;
815 int ret;
816
817 if (cpu_single_env)
818 env = cpu_single_env; /* XXX: find a correct solution for multithread */
819#if defined(DEBUG_SIGNAL)
5fafdf24 820 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
821 pc, address, is_write, *(unsigned long *)old_set);
822#endif
9f0777ed 823 /* XXX: locking issue */
53a5960a 824 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
825 return 1;
826 }
68016c62 827 /* see if it is an MMU fault */
6ebbf390 828 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
829 if (ret < 0)
830 return 0; /* not an MMU fault */
831 if (ret == 0)
832 return 1; /* the MMU fault was handled without causing real CPU fault */
833 /* now we have a real cpu fault */
834 tb = tb_find_pc(pc);
835 if (tb) {
836 /* the PC is inside the translated code. It means that we have
837 a virtual CPU fault */
838 cpu_restore_state(tb, env, pc, puc);
839 }
840 /* we restore the process signal mask as the sigreturn should
841 do it (XXX: use sigsetjmp) */
842 sigprocmask(SIG_SETMASK, old_set, NULL);
843 cpu_loop_exit();
968c74da
AJ
844 /* never comes here */
845 return 1;
3fb2ded1 846}
93ac68bc
FB
847#elif defined(TARGET_SPARC)
848static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
849 int is_write, sigset_t *old_set,
850 void *puc)
93ac68bc 851{
68016c62
FB
852 TranslationBlock *tb;
853 int ret;
854
855 if (cpu_single_env)
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
857#if defined(DEBUG_SIGNAL)
5fafdf24 858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
859 pc, address, is_write, *(unsigned long *)old_set);
860#endif
b453b70b 861 /* XXX: locking issue */
53a5960a 862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
863 return 1;
864 }
68016c62 865 /* see if it is an MMU fault */
6ebbf390 866 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
867 if (ret < 0)
868 return 0; /* not an MMU fault */
869 if (ret == 0)
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
872 tb = tb_find_pc(pc);
873 if (tb) {
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb, env, pc, puc);
877 }
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK, old_set, NULL);
881 cpu_loop_exit();
968c74da
AJ
882 /* never comes here */
883 return 1;
93ac68bc 884}
67867308
FB
885#elif defined (TARGET_PPC)
886static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
887 int is_write, sigset_t *old_set,
888 void *puc)
67867308
FB
889{
890 TranslationBlock *tb;
ce09776b 891 int ret;
3b46e624 892
67867308
FB
893 if (cpu_single_env)
894 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 895#if defined(DEBUG_SIGNAL)
5fafdf24 896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
897 pc, address, is_write, *(unsigned long *)old_set);
898#endif
899 /* XXX: locking issue */
53a5960a 900 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
901 return 1;
902 }
903
ce09776b 904 /* see if it is an MMU fault */
6ebbf390 905 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
906 if (ret < 0)
907 return 0; /* not an MMU fault */
908 if (ret == 0)
909 return 1; /* the MMU fault was handled without causing real CPU fault */
910
67867308
FB
911 /* now we have a real cpu fault */
912 tb = tb_find_pc(pc);
913 if (tb) {
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
bf3e8bf1 916 cpu_restore_state(tb, env, pc, puc);
67867308 917 }
ce09776b 918 if (ret == 1) {
67867308 919#if 0
5fafdf24 920 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 921 env->nip, env->error_code, tb);
67867308
FB
922#endif
923 /* we restore the process signal mask as the sigreturn should
924 do it (XXX: use sigsetjmp) */
bf3e8bf1 925 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 926 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
927 } else {
928 /* activate soft MMU for this block */
fbf9eeb3 929 cpu_resume_from_signal(env, puc);
ce09776b 930 }
67867308 931 /* never comes here */
e6e5906b
PB
932 return 1;
933}
934
935#elif defined(TARGET_M68K)
936static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
937 int is_write, sigset_t *old_set,
938 void *puc)
939{
940 TranslationBlock *tb;
941 int ret;
942
943 if (cpu_single_env)
944 env = cpu_single_env; /* XXX: find a correct solution for multithread */
945#if defined(DEBUG_SIGNAL)
5fafdf24 946 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
947 pc, address, is_write, *(unsigned long *)old_set);
948#endif
949 /* XXX: locking issue */
950 if (is_write && page_unprotect(address, pc, puc)) {
951 return 1;
952 }
953 /* see if it is an MMU fault */
6ebbf390 954 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
955 if (ret < 0)
956 return 0; /* not an MMU fault */
957 if (ret == 0)
958 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
960 tb = tb_find_pc(pc);
961 if (tb) {
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb, env, pc, puc);
965 }
966 /* we restore the process signal mask as the sigreturn should
967 do it (XXX: use sigsetjmp) */
968 sigprocmask(SIG_SETMASK, old_set, NULL);
969 cpu_loop_exit();
970 /* never comes here */
67867308
FB
971 return 1;
972}
6af0bf9c
FB
973
974#elif defined (TARGET_MIPS)
975static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
976 int is_write, sigset_t *old_set,
977 void *puc)
978{
979 TranslationBlock *tb;
980 int ret;
3b46e624 981
6af0bf9c
FB
982 if (cpu_single_env)
983 env = cpu_single_env; /* XXX: find a correct solution for multithread */
984#if defined(DEBUG_SIGNAL)
5fafdf24 985 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
986 pc, address, is_write, *(unsigned long *)old_set);
987#endif
988 /* XXX: locking issue */
53a5960a 989 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
990 return 1;
991 }
992
993 /* see if it is an MMU fault */
6ebbf390 994 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
995 if (ret < 0)
996 return 0; /* not an MMU fault */
997 if (ret == 0)
998 return 1; /* the MMU fault was handled without causing real CPU fault */
999
1000 /* now we have a real cpu fault */
1001 tb = tb_find_pc(pc);
1002 if (tb) {
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb, env, pc, puc);
1006 }
1007 if (ret == 1) {
1008#if 0
5fafdf24 1009 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1010 env->PC, env->error_code, tb);
6af0bf9c
FB
1011#endif
1012 /* we restore the process signal mask as the sigreturn should
1013 do it (XXX: use sigsetjmp) */
1014 sigprocmask(SIG_SETMASK, old_set, NULL);
1015 do_raise_exception_err(env->exception_index, env->error_code);
1016 } else {
1017 /* activate soft MMU for this block */
1018 cpu_resume_from_signal(env, puc);
1019 }
1020 /* never comes here */
1021 return 1;
1022}
1023
fdf9b3e8
FB
1024#elif defined (TARGET_SH4)
1025static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1026 int is_write, sigset_t *old_set,
1027 void *puc)
1028{
1029 TranslationBlock *tb;
1030 int ret;
3b46e624 1031
fdf9b3e8
FB
1032 if (cpu_single_env)
1033 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1034#if defined(DEBUG_SIGNAL)
5fafdf24 1035 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1036 pc, address, is_write, *(unsigned long *)old_set);
1037#endif
1038 /* XXX: locking issue */
1039 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1040 return 1;
1041 }
1042
1043 /* see if it is an MMU fault */
6ebbf390 1044 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1045 if (ret < 0)
1046 return 0; /* not an MMU fault */
1047 if (ret == 0)
1048 return 1; /* the MMU fault was handled without causing real CPU fault */
1049
1050 /* now we have a real cpu fault */
eddf68a6
JM
1051 tb = tb_find_pc(pc);
1052 if (tb) {
1053 /* the PC is inside the translated code. It means that we have
1054 a virtual CPU fault */
1055 cpu_restore_state(tb, env, pc, puc);
1056 }
1057#if 0
5fafdf24 1058 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1059 env->nip, env->error_code, tb);
1060#endif
1061 /* we restore the process signal mask as the sigreturn should
1062 do it (XXX: use sigsetjmp) */
1063 sigprocmask(SIG_SETMASK, old_set, NULL);
1064 cpu_loop_exit();
1065 /* never comes here */
1066 return 1;
1067}
1068
1069#elif defined (TARGET_ALPHA)
1070static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1071 int is_write, sigset_t *old_set,
1072 void *puc)
1073{
1074 TranslationBlock *tb;
1075 int ret;
3b46e624 1076
eddf68a6
JM
1077 if (cpu_single_env)
1078 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1079#if defined(DEBUG_SIGNAL)
5fafdf24 1080 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1081 pc, address, is_write, *(unsigned long *)old_set);
1082#endif
1083 /* XXX: locking issue */
1084 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1085 return 1;
1086 }
1087
1088 /* see if it is an MMU fault */
6ebbf390 1089 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1090 if (ret < 0)
1091 return 0; /* not an MMU fault */
1092 if (ret == 0)
1093 return 1; /* the MMU fault was handled without causing real CPU fault */
1094
1095 /* now we have a real cpu fault */
fdf9b3e8
FB
1096 tb = tb_find_pc(pc);
1097 if (tb) {
1098 /* the PC is inside the translated code. It means that we have
1099 a virtual CPU fault */
1100 cpu_restore_state(tb, env, pc, puc);
1101 }
fdf9b3e8 1102#if 0
5fafdf24 1103 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1104 env->nip, env->error_code, tb);
1105#endif
1106 /* we restore the process signal mask as the sigreturn should
1107 do it (XXX: use sigsetjmp) */
355fb23d
PB
1108 sigprocmask(SIG_SETMASK, old_set, NULL);
1109 cpu_loop_exit();
fdf9b3e8
FB
1110 /* never comes here */
1111 return 1;
1112}
f1ccf904
TS
1113#elif defined (TARGET_CRIS)
1114static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1115 int is_write, sigset_t *old_set,
1116 void *puc)
1117{
1118 TranslationBlock *tb;
1119 int ret;
1120
1121 if (cpu_single_env)
1122 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1123#if defined(DEBUG_SIGNAL)
1124 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1125 pc, address, is_write, *(unsigned long *)old_set);
1126#endif
1127 /* XXX: locking issue */
1128 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1129 return 1;
1130 }
1131
1132 /* see if it is an MMU fault */
6ebbf390 1133 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1134 if (ret < 0)
1135 return 0; /* not an MMU fault */
1136 if (ret == 0)
1137 return 1; /* the MMU fault was handled without causing real CPU fault */
1138
1139 /* now we have a real cpu fault */
1140 tb = tb_find_pc(pc);
1141 if (tb) {
1142 /* the PC is inside the translated code. It means that we have
1143 a virtual CPU fault */
1144 cpu_restore_state(tb, env, pc, puc);
1145 }
f1ccf904
TS
1146 /* we restore the process signal mask as the sigreturn should
1147 do it (XXX: use sigsetjmp) */
1148 sigprocmask(SIG_SETMASK, old_set, NULL);
1149 cpu_loop_exit();
1150 /* never comes here */
1151 return 1;
1152}
1153
e4533c7a
FB
1154#else
1155#error unsupported target CPU
1156#endif
9de5e440 1157
2b413144
FB
1158#if defined(__i386__)
1159
d8ecc0b9
FB
1160#if defined(__APPLE__)
1161# include <sys/ucontext.h>
1162
1163# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1164# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1165# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1166#else
1167# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1168# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1169# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1170#endif
1171
5fafdf24 1172int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1173 void *puc)
9de5e440 1174{
5a7b542b 1175 siginfo_t *info = pinfo;
9de5e440
FB
1176 struct ucontext *uc = puc;
1177 unsigned long pc;
bf3e8bf1 1178 int trapno;
97eb5b14 1179
d691f669
FB
1180#ifndef REG_EIP
1181/* for glibc 2.1 */
fd6ce8f6
FB
1182#define REG_EIP EIP
1183#define REG_ERR ERR
1184#define REG_TRAPNO TRAPNO
d691f669 1185#endif
d8ecc0b9
FB
1186 pc = EIP_sig(uc);
1187 trapno = TRAP_sig(uc);
ec6338ba
FB
1188 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1189 trapno == 0xe ?
1190 (ERROR_sig(uc) >> 1) & 1 : 0,
1191 &uc->uc_sigmask, puc);
2b413144
FB
1192}
1193
bc51c5c9
FB
1194#elif defined(__x86_64__)
1195
5a7b542b 1196int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1197 void *puc)
1198{
5a7b542b 1199 siginfo_t *info = pinfo;
bc51c5c9
FB
1200 struct ucontext *uc = puc;
1201 unsigned long pc;
1202
1203 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1204 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1205 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1206 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1207 &uc->uc_sigmask, puc);
1208}
1209
83fb7adf 1210#elif defined(__powerpc__)
2b413144 1211
83fb7adf
FB
1212/***********************************************************************
1213 * signal context platform-specific definitions
1214 * From Wine
1215 */
1216#ifdef linux
1217/* All Registers access - only for local access */
1218# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1219/* Gpr Registers access */
1220# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1221# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1222# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1223# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1224# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1225# define LR_sig(context) REG_sig(link, context) /* Link register */
1226# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1227/* Float Registers access */
1228# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1229# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1230/* Exception Registers access */
1231# define DAR_sig(context) REG_sig(dar, context)
1232# define DSISR_sig(context) REG_sig(dsisr, context)
1233# define TRAP_sig(context) REG_sig(trap, context)
1234#endif /* linux */
1235
1236#ifdef __APPLE__
1237# include <sys/ucontext.h>
1238typedef struct ucontext SIGCONTEXT;
1239/* All Registers access - only for local access */
1240# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1241# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1242# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1243# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1244/* Gpr Registers access */
1245# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1246# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1247# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1248# define CTR_sig(context) REG_sig(ctr, context)
1249# define XER_sig(context) REG_sig(xer, context) /* Link register */
1250# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1251# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1252/* Float Registers access */
1253# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1254# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1255/* Exception Registers access */
1256# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1257# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1258# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1259#endif /* __APPLE__ */
1260
5fafdf24 1261int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1262 void *puc)
2b413144 1263{
5a7b542b 1264 siginfo_t *info = pinfo;
25eb4484 1265 struct ucontext *uc = puc;
25eb4484 1266 unsigned long pc;
25eb4484
FB
1267 int is_write;
1268
83fb7adf 1269 pc = IAR_sig(uc);
25eb4484
FB
1270 is_write = 0;
1271#if 0
1272 /* ppc 4xx case */
83fb7adf 1273 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1274 is_write = 1;
1275#else
83fb7adf 1276 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1277 is_write = 1;
1278#endif
5fafdf24 1279 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1280 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1281}
1282
2f87c607
FB
1283#elif defined(__alpha__)
1284
5fafdf24 1285int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1286 void *puc)
1287{
5a7b542b 1288 siginfo_t *info = pinfo;
2f87c607
FB
1289 struct ucontext *uc = puc;
1290 uint32_t *pc = uc->uc_mcontext.sc_pc;
1291 uint32_t insn = *pc;
1292 int is_write = 0;
1293
8c6939c0 1294 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1295 switch (insn >> 26) {
1296 case 0x0d: // stw
1297 case 0x0e: // stb
1298 case 0x0f: // stq_u
1299 case 0x24: // stf
1300 case 0x25: // stg
1301 case 0x26: // sts
1302 case 0x27: // stt
1303 case 0x2c: // stl
1304 case 0x2d: // stq
1305 case 0x2e: // stl_c
1306 case 0x2f: // stq_c
1307 is_write = 1;
1308 }
1309
5fafdf24 1310 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1311 is_write, &uc->uc_sigmask, puc);
2f87c607 1312}
8c6939c0
FB
1313#elif defined(__sparc__)
1314
5fafdf24 1315int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1316 void *puc)
8c6939c0 1317{
5a7b542b 1318 siginfo_t *info = pinfo;
8c6939c0
FB
1319 int is_write;
1320 uint32_t insn;
6b4c11cd 1321#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1322 uint32_t *regs = (uint32_t *)(info + 1);
1323 void *sigmask = (regs + 20);
8c6939c0 1324 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1325 unsigned long pc = regs[1];
1326#else
1327 struct sigcontext *sc = puc;
1328 unsigned long pc = sc->sigc_regs.tpc;
1329 void *sigmask = (void *)sc->sigc_mask;
1330#endif
1331
8c6939c0
FB
1332 /* XXX: need kernel patch to get write flag faster */
1333 is_write = 0;
1334 insn = *(uint32_t *)pc;
1335 if ((insn >> 30) == 3) {
1336 switch((insn >> 19) & 0x3f) {
1337 case 0x05: // stb
1338 case 0x06: // sth
1339 case 0x04: // st
1340 case 0x07: // std
1341 case 0x24: // stf
1342 case 0x27: // stdf
1343 case 0x25: // stfsr
1344 is_write = 1;
1345 break;
1346 }
1347 }
5fafdf24 1348 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1349 is_write, sigmask, NULL);
8c6939c0
FB
1350}
1351
1352#elif defined(__arm__)
1353
5fafdf24 1354int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1355 void *puc)
8c6939c0 1356{
5a7b542b 1357 siginfo_t *info = pinfo;
8c6939c0
FB
1358 struct ucontext *uc = puc;
1359 unsigned long pc;
1360 int is_write;
3b46e624 1361
5c49b363
AZ
1362#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1363 pc = uc->uc_mcontext.gregs[R15];
1364#else
4eee57f5 1365 pc = uc->uc_mcontext.arm_pc;
5c49b363 1366#endif
8c6939c0
FB
1367 /* XXX: compute is_write */
1368 is_write = 0;
5fafdf24 1369 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1370 is_write,
f3a9676a 1371 &uc->uc_sigmask, puc);
8c6939c0
FB
1372}
1373
38e584a0
FB
1374#elif defined(__mc68000)
1375
5fafdf24 1376int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1377 void *puc)
1378{
5a7b542b 1379 siginfo_t *info = pinfo;
38e584a0
FB
1380 struct ucontext *uc = puc;
1381 unsigned long pc;
1382 int is_write;
3b46e624 1383
38e584a0
FB
1384 pc = uc->uc_mcontext.gregs[16];
1385 /* XXX: compute is_write */
1386 is_write = 0;
5fafdf24 1387 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1388 is_write,
bf3e8bf1 1389 &uc->uc_sigmask, puc);
38e584a0
FB
1390}
1391
b8076a74
FB
1392#elif defined(__ia64)
1393
1394#ifndef __ISR_VALID
1395 /* This ought to be in <bits/siginfo.h>... */
1396# define __ISR_VALID 1
b8076a74
FB
1397#endif
1398
5a7b542b 1399int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1400{
5a7b542b 1401 siginfo_t *info = pinfo;
b8076a74
FB
1402 struct ucontext *uc = puc;
1403 unsigned long ip;
1404 int is_write = 0;
1405
1406 ip = uc->uc_mcontext.sc_ip;
1407 switch (host_signum) {
1408 case SIGILL:
1409 case SIGFPE:
1410 case SIGSEGV:
1411 case SIGBUS:
1412 case SIGTRAP:
fd4a43e4 1413 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1414 /* ISR.W (write-access) is bit 33: */
1415 is_write = (info->si_isr >> 33) & 1;
1416 break;
1417
1418 default:
1419 break;
1420 }
1421 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1422 is_write,
1423 &uc->uc_sigmask, puc);
1424}
1425
90cb9493
FB
1426#elif defined(__s390__)
1427
5fafdf24 1428int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1429 void *puc)
1430{
5a7b542b 1431 siginfo_t *info = pinfo;
90cb9493
FB
1432 struct ucontext *uc = puc;
1433 unsigned long pc;
1434 int is_write;
3b46e624 1435
90cb9493
FB
1436 pc = uc->uc_mcontext.psw.addr;
1437 /* XXX: compute is_write */
1438 is_write = 0;
5fafdf24 1439 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1440 is_write, &uc->uc_sigmask, puc);
1441}
1442
1443#elif defined(__mips__)
1444
5fafdf24 1445int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1446 void *puc)
1447{
9617efe8 1448 siginfo_t *info = pinfo;
c4b89d18
TS
1449 struct ucontext *uc = puc;
1450 greg_t pc = uc->uc_mcontext.pc;
1451 int is_write;
3b46e624 1452
c4b89d18
TS
1453 /* XXX: compute is_write */
1454 is_write = 0;
5fafdf24 1455 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1456 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1457}
1458
f54b3f92
AJ
1459#elif defined(__hppa__)
1460
1461int cpu_signal_handler(int host_signum, void *pinfo,
1462 void *puc)
1463{
1464 struct siginfo *info = pinfo;
1465 struct ucontext *uc = puc;
1466 unsigned long pc;
1467 int is_write;
1468
1469 pc = uc->uc_mcontext.sc_iaoq[0];
1470 /* FIXME: compute is_write */
1471 is_write = 0;
1472 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1473 is_write,
1474 &uc->uc_sigmask, puc);
1475}
1476
9de5e440 1477#else
2b413144 1478
3fb2ded1 1479#error host CPU specific signal handler needed
2b413144 1480
9de5e440 1481#endif
67b915a5
FB
1482
1483#endif /* !defined(CONFIG_SOFTMMU) */