]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
Ignore m68k-softmmu.
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
1196be37
TS
50//#define DEBUG_IOPORT
51
99773bd4
PB
52#if !defined(CONFIG_USER_ONLY)
53/* TB consistency checks only implemented for usermode emulation. */
54#undef DEBUG_TB_CHECK
55#endif
56
fd6ce8f6
FB
57/* threshold to flush the translated code buffer */
58#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59
9fa3e853
FB
60#define SMC_BITMAP_USE_THRESHOLD 10
61
62#define MMAP_AREA_START 0x00000000
63#define MMAP_AREA_END 0xa8000000
fd6ce8f6 64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
67#elif defined(TARGET_SPARC)
68#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
69#elif defined(TARGET_ALPHA)
70#define TARGET_PHYS_ADDR_SPACE_BITS 42
71#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
72#elif defined(TARGET_PPC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
74#else
75/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
76#define TARGET_PHYS_ADDR_SPACE_BITS 32
77#endif
78
fd6ce8f6 79TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 80TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 81int nb_tbs;
eb51d102
FB
82/* any access to the tbs or the page table must use this lock */
83spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 84
b8076a74 85uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
86uint8_t *code_gen_ptr;
87
9fa3e853
FB
88int phys_ram_size;
89int phys_ram_fd;
90uint8_t *phys_ram_base;
1ccde1cb 91uint8_t *phys_ram_dirty;
e9a1ab19 92static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 93
6a00d601
FB
94CPUState *first_cpu;
95/* current CPU in the current thread. It is only valid inside
96 cpu_exec() */
97CPUState *cpu_single_env;
98
54936004 99typedef struct PageDesc {
92e873b9 100 /* list of TBs intersecting this ram page */
fd6ce8f6 101 TranslationBlock *first_tb;
9fa3e853
FB
102 /* in order to optimize self modifying code, we count the number
103 of lookups we do to a given page to use a bitmap */
104 unsigned int code_write_count;
105 uint8_t *code_bitmap;
106#if defined(CONFIG_USER_ONLY)
107 unsigned long flags;
108#endif
54936004
FB
109} PageDesc;
110
92e873b9
FB
111typedef struct PhysPageDesc {
112 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 113 uint32_t phys_offset;
92e873b9
FB
114} PhysPageDesc;
115
54936004 116#define L2_BITS 10
bedb69ea
JM
117#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
118/* XXX: this is a temporary hack for alpha target.
119 * In the future, this is to be replaced by a multi-level table
120 * to actually be able to handle the complete 64 bits address space.
121 */
122#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
123#else
54936004 124#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 125#endif
54936004
FB
126
127#define L1_SIZE (1 << L1_BITS)
128#define L2_SIZE (1 << L2_BITS)
129
33417e70 130static void io_mem_init(void);
fd6ce8f6 131
83fb7adf
FB
132unsigned long qemu_real_host_page_size;
133unsigned long qemu_host_page_bits;
134unsigned long qemu_host_page_size;
135unsigned long qemu_host_page_mask;
54936004 136
92e873b9 137/* XXX: for system emulation, it could just be an array */
54936004 138static PageDesc *l1_map[L1_SIZE];
0a962c02 139PhysPageDesc **l1_phys_map;
54936004 140
33417e70 141/* io memory support */
33417e70
FB
142CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
143CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 144void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 145static int io_mem_nb;
6658ffb8
PB
146#if defined(CONFIG_SOFTMMU)
147static int io_mem_watch;
148#endif
33417e70 149
34865134
FB
150/* log support */
151char *logfilename = "/tmp/qemu.log";
152FILE *logfile;
153int loglevel;
154
e3db7226
FB
155/* statistics */
156static int tlb_flush_count;
157static int tb_flush_count;
158static int tb_phys_invalidate_count;
159
b346ff46 160static void page_init(void)
54936004 161{
83fb7adf 162 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 163 TARGET_PAGE_SIZE */
67b915a5 164#ifdef _WIN32
d5a8f07c
FB
165 {
166 SYSTEM_INFO system_info;
167 DWORD old_protect;
168
169 GetSystemInfo(&system_info);
170 qemu_real_host_page_size = system_info.dwPageSize;
171
172 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
173 PAGE_EXECUTE_READWRITE, &old_protect);
174 }
67b915a5 175#else
83fb7adf 176 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
177 {
178 unsigned long start, end;
179
180 start = (unsigned long)code_gen_buffer;
181 start &= ~(qemu_real_host_page_size - 1);
182
183 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
184 end += qemu_real_host_page_size - 1;
185 end &= ~(qemu_real_host_page_size - 1);
186
187 mprotect((void *)start, end - start,
188 PROT_READ | PROT_WRITE | PROT_EXEC);
189 }
67b915a5 190#endif
d5a8f07c 191
83fb7adf
FB
192 if (qemu_host_page_size == 0)
193 qemu_host_page_size = qemu_real_host_page_size;
194 if (qemu_host_page_size < TARGET_PAGE_SIZE)
195 qemu_host_page_size = TARGET_PAGE_SIZE;
196 qemu_host_page_bits = 0;
197 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
198 qemu_host_page_bits++;
199 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
200 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
201 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
202}
203
fd6ce8f6 204static inline PageDesc *page_find_alloc(unsigned int index)
54936004 205{
54936004
FB
206 PageDesc **lp, *p;
207
54936004
FB
208 lp = &l1_map[index >> L2_BITS];
209 p = *lp;
210 if (!p) {
211 /* allocate if not found */
59817ccb 212 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 213 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
214 *lp = p;
215 }
216 return p + (index & (L2_SIZE - 1));
217}
218
fd6ce8f6 219static inline PageDesc *page_find(unsigned int index)
54936004 220{
54936004
FB
221 PageDesc *p;
222
54936004
FB
223 p = l1_map[index >> L2_BITS];
224 if (!p)
225 return 0;
fd6ce8f6
FB
226 return p + (index & (L2_SIZE - 1));
227}
228
108c49b8 229static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 230{
108c49b8 231 void **lp, **p;
e3f4e2a4 232 PhysPageDesc *pd;
92e873b9 233
108c49b8
FB
234 p = (void **)l1_phys_map;
235#if TARGET_PHYS_ADDR_SPACE_BITS > 32
236
237#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
238#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
239#endif
240 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
241 p = *lp;
242 if (!p) {
243 /* allocate if not found */
108c49b8
FB
244 if (!alloc)
245 return NULL;
246 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
247 memset(p, 0, sizeof(void *) * L1_SIZE);
248 *lp = p;
249 }
250#endif
251 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
252 pd = *lp;
253 if (!pd) {
254 int i;
108c49b8
FB
255 /* allocate if not found */
256 if (!alloc)
257 return NULL;
e3f4e2a4
PB
258 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
259 *lp = pd;
260 for (i = 0; i < L2_SIZE; i++)
261 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 262 }
e3f4e2a4 263 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
264}
265
108c49b8 266static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 267{
108c49b8 268 return phys_page_find_alloc(index, 0);
92e873b9
FB
269}
270
9fa3e853 271#if !defined(CONFIG_USER_ONLY)
6a00d601 272static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
273static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
274 target_ulong vaddr);
9fa3e853 275#endif
fd6ce8f6 276
6a00d601 277void cpu_exec_init(CPUState *env)
fd6ce8f6 278{
6a00d601
FB
279 CPUState **penv;
280 int cpu_index;
281
fd6ce8f6
FB
282 if (!code_gen_ptr) {
283 code_gen_ptr = code_gen_buffer;
b346ff46 284 page_init();
33417e70 285 io_mem_init();
fd6ce8f6 286 }
6a00d601
FB
287 env->next_cpu = NULL;
288 penv = &first_cpu;
289 cpu_index = 0;
290 while (*penv != NULL) {
291 penv = (CPUState **)&(*penv)->next_cpu;
292 cpu_index++;
293 }
294 env->cpu_index = cpu_index;
6658ffb8 295 env->nb_watchpoints = 0;
6a00d601 296 *penv = env;
fd6ce8f6
FB
297}
298
9fa3e853
FB
299static inline void invalidate_page_bitmap(PageDesc *p)
300{
301 if (p->code_bitmap) {
59817ccb 302 qemu_free(p->code_bitmap);
9fa3e853
FB
303 p->code_bitmap = NULL;
304 }
305 p->code_write_count = 0;
306}
307
fd6ce8f6
FB
308/* set to NULL all the 'first_tb' fields in all PageDescs */
309static void page_flush_tb(void)
310{
311 int i, j;
312 PageDesc *p;
313
314 for(i = 0; i < L1_SIZE; i++) {
315 p = l1_map[i];
316 if (p) {
9fa3e853
FB
317 for(j = 0; j < L2_SIZE; j++) {
318 p->first_tb = NULL;
319 invalidate_page_bitmap(p);
320 p++;
321 }
fd6ce8f6
FB
322 }
323 }
324}
325
326/* flush all the translation blocks */
d4e8164f 327/* XXX: tb_flush is currently not thread safe */
6a00d601 328void tb_flush(CPUState *env1)
fd6ce8f6 329{
6a00d601 330 CPUState *env;
0124311e 331#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
332 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
333 code_gen_ptr - code_gen_buffer,
334 nb_tbs,
0124311e 335 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
336#endif
337 nb_tbs = 0;
6a00d601
FB
338
339 for(env = first_cpu; env != NULL; env = env->next_cpu) {
340 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
341 }
9fa3e853 342
8a8a608f 343 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 344 page_flush_tb();
9fa3e853 345
fd6ce8f6 346 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
347 /* XXX: flush processor icache at this point if cache flush is
348 expensive */
e3db7226 349 tb_flush_count++;
fd6ce8f6
FB
350}
351
352#ifdef DEBUG_TB_CHECK
353
bc98a7ef 354static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
355{
356 TranslationBlock *tb;
357 int i;
358 address &= TARGET_PAGE_MASK;
99773bd4
PB
359 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
360 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
361 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
362 address >= tb->pc + tb->size)) {
363 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 364 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
365 }
366 }
367 }
368}
369
370/* verify that all the pages have correct rights for code */
371static void tb_page_check(void)
372{
373 TranslationBlock *tb;
374 int i, flags1, flags2;
375
99773bd4
PB
376 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
377 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
378 flags1 = page_get_flags(tb->pc);
379 flags2 = page_get_flags(tb->pc + tb->size - 1);
380 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
381 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 382 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
383 }
384 }
385 }
386}
387
d4e8164f
FB
388void tb_jmp_check(TranslationBlock *tb)
389{
390 TranslationBlock *tb1;
391 unsigned int n1;
392
393 /* suppress any remaining jumps to this TB */
394 tb1 = tb->jmp_first;
395 for(;;) {
396 n1 = (long)tb1 & 3;
397 tb1 = (TranslationBlock *)((long)tb1 & ~3);
398 if (n1 == 2)
399 break;
400 tb1 = tb1->jmp_next[n1];
401 }
402 /* check end of list */
403 if (tb1 != tb) {
404 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
405 }
406}
407
fd6ce8f6
FB
408#endif
409
410/* invalidate one TB */
411static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
412 int next_offset)
413{
414 TranslationBlock *tb1;
415 for(;;) {
416 tb1 = *ptb;
417 if (tb1 == tb) {
418 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
419 break;
420 }
421 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
422 }
423}
424
9fa3e853
FB
425static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
426{
427 TranslationBlock *tb1;
428 unsigned int n1;
429
430 for(;;) {
431 tb1 = *ptb;
432 n1 = (long)tb1 & 3;
433 tb1 = (TranslationBlock *)((long)tb1 & ~3);
434 if (tb1 == tb) {
435 *ptb = tb1->page_next[n1];
436 break;
437 }
438 ptb = &tb1->page_next[n1];
439 }
440}
441
d4e8164f
FB
442static inline void tb_jmp_remove(TranslationBlock *tb, int n)
443{
444 TranslationBlock *tb1, **ptb;
445 unsigned int n1;
446
447 ptb = &tb->jmp_next[n];
448 tb1 = *ptb;
449 if (tb1) {
450 /* find tb(n) in circular list */
451 for(;;) {
452 tb1 = *ptb;
453 n1 = (long)tb1 & 3;
454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
455 if (n1 == n && tb1 == tb)
456 break;
457 if (n1 == 2) {
458 ptb = &tb1->jmp_first;
459 } else {
460 ptb = &tb1->jmp_next[n1];
461 }
462 }
463 /* now we can suppress tb(n) from the list */
464 *ptb = tb->jmp_next[n];
465
466 tb->jmp_next[n] = NULL;
467 }
468}
469
470/* reset the jump entry 'n' of a TB so that it is not chained to
471 another TB */
472static inline void tb_reset_jump(TranslationBlock *tb, int n)
473{
474 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
475}
476
8a40a180 477static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 478{
6a00d601 479 CPUState *env;
8a40a180 480 PageDesc *p;
d4e8164f 481 unsigned int h, n1;
8a40a180
FB
482 target_ulong phys_pc;
483 TranslationBlock *tb1, *tb2;
d4e8164f 484
8a40a180
FB
485 /* remove the TB from the hash list */
486 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
487 h = tb_phys_hash_func(phys_pc);
488 tb_remove(&tb_phys_hash[h], tb,
489 offsetof(TranslationBlock, phys_hash_next));
490
491 /* remove the TB from the page list */
492 if (tb->page_addr[0] != page_addr) {
493 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
494 tb_page_remove(&p->first_tb, tb);
495 invalidate_page_bitmap(p);
496 }
497 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
498 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
499 tb_page_remove(&p->first_tb, tb);
500 invalidate_page_bitmap(p);
501 }
502
36bdbe54 503 tb_invalidated_flag = 1;
59817ccb 504
fd6ce8f6 505 /* remove the TB from the hash list */
8a40a180 506 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
507 for(env = first_cpu; env != NULL; env = env->next_cpu) {
508 if (env->tb_jmp_cache[h] == tb)
509 env->tb_jmp_cache[h] = NULL;
510 }
d4e8164f
FB
511
512 /* suppress this TB from the two jump lists */
513 tb_jmp_remove(tb, 0);
514 tb_jmp_remove(tb, 1);
515
516 /* suppress any remaining jumps to this TB */
517 tb1 = tb->jmp_first;
518 for(;;) {
519 n1 = (long)tb1 & 3;
520 if (n1 == 2)
521 break;
522 tb1 = (TranslationBlock *)((long)tb1 & ~3);
523 tb2 = tb1->jmp_next[n1];
524 tb_reset_jump(tb1, n1);
525 tb1->jmp_next[n1] = NULL;
526 tb1 = tb2;
527 }
528 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 529
e3db7226 530 tb_phys_invalidate_count++;
9fa3e853
FB
531}
532
533static inline void set_bits(uint8_t *tab, int start, int len)
534{
535 int end, mask, end1;
536
537 end = start + len;
538 tab += start >> 3;
539 mask = 0xff << (start & 7);
540 if ((start & ~7) == (end & ~7)) {
541 if (start < end) {
542 mask &= ~(0xff << (end & 7));
543 *tab |= mask;
544 }
545 } else {
546 *tab++ |= mask;
547 start = (start + 8) & ~7;
548 end1 = end & ~7;
549 while (start < end1) {
550 *tab++ = 0xff;
551 start += 8;
552 }
553 if (start < end) {
554 mask = ~(0xff << (end & 7));
555 *tab |= mask;
556 }
557 }
558}
559
560static void build_page_bitmap(PageDesc *p)
561{
562 int n, tb_start, tb_end;
563 TranslationBlock *tb;
564
59817ccb 565 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
566 if (!p->code_bitmap)
567 return;
568 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
569
570 tb = p->first_tb;
571 while (tb != NULL) {
572 n = (long)tb & 3;
573 tb = (TranslationBlock *)((long)tb & ~3);
574 /* NOTE: this is subtle as a TB may span two physical pages */
575 if (n == 0) {
576 /* NOTE: tb_end may be after the end of the page, but
577 it is not a problem */
578 tb_start = tb->pc & ~TARGET_PAGE_MASK;
579 tb_end = tb_start + tb->size;
580 if (tb_end > TARGET_PAGE_SIZE)
581 tb_end = TARGET_PAGE_SIZE;
582 } else {
583 tb_start = 0;
584 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
585 }
586 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
587 tb = tb->page_next[n];
588 }
589}
590
d720b93d
FB
591#ifdef TARGET_HAS_PRECISE_SMC
592
593static void tb_gen_code(CPUState *env,
594 target_ulong pc, target_ulong cs_base, int flags,
595 int cflags)
596{
597 TranslationBlock *tb;
598 uint8_t *tc_ptr;
599 target_ulong phys_pc, phys_page2, virt_page2;
600 int code_gen_size;
601
c27004ec
FB
602 phys_pc = get_phys_addr_code(env, pc);
603 tb = tb_alloc(pc);
d720b93d
FB
604 if (!tb) {
605 /* flush must be done */
606 tb_flush(env);
607 /* cannot fail at this point */
c27004ec 608 tb = tb_alloc(pc);
d720b93d
FB
609 }
610 tc_ptr = code_gen_ptr;
611 tb->tc_ptr = tc_ptr;
612 tb->cs_base = cs_base;
613 tb->flags = flags;
614 tb->cflags = cflags;
615 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
616 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
617
618 /* check next page if needed */
c27004ec 619 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 620 phys_page2 = -1;
c27004ec 621 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
622 phys_page2 = get_phys_addr_code(env, virt_page2);
623 }
624 tb_link_phys(tb, phys_pc, phys_page2);
625}
626#endif
627
9fa3e853
FB
628/* invalidate all TBs which intersect with the target physical page
629 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
630 the same physical page. 'is_cpu_write_access' should be true if called
631 from a real cpu write access: the virtual CPU will exit the current
632 TB if code is modified inside this TB. */
633void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
634 int is_cpu_write_access)
635{
636 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 637 CPUState *env = cpu_single_env;
9fa3e853 638 PageDesc *p;
ea1c1802 639 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 640 target_ulong tb_start, tb_end;
d720b93d 641 target_ulong current_pc, current_cs_base;
9fa3e853
FB
642
643 p = page_find(start >> TARGET_PAGE_BITS);
644 if (!p)
645 return;
646 if (!p->code_bitmap &&
d720b93d
FB
647 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
648 is_cpu_write_access) {
9fa3e853
FB
649 /* build code bitmap */
650 build_page_bitmap(p);
651 }
652
653 /* we remove all the TBs in the range [start, end[ */
654 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
655 current_tb_not_found = is_cpu_write_access;
656 current_tb_modified = 0;
657 current_tb = NULL; /* avoid warning */
658 current_pc = 0; /* avoid warning */
659 current_cs_base = 0; /* avoid warning */
660 current_flags = 0; /* avoid warning */
9fa3e853
FB
661 tb = p->first_tb;
662 while (tb != NULL) {
663 n = (long)tb & 3;
664 tb = (TranslationBlock *)((long)tb & ~3);
665 tb_next = tb->page_next[n];
666 /* NOTE: this is subtle as a TB may span two physical pages */
667 if (n == 0) {
668 /* NOTE: tb_end may be after the end of the page, but
669 it is not a problem */
670 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
671 tb_end = tb_start + tb->size;
672 } else {
673 tb_start = tb->page_addr[1];
674 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
675 }
676 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
677#ifdef TARGET_HAS_PRECISE_SMC
678 if (current_tb_not_found) {
679 current_tb_not_found = 0;
680 current_tb = NULL;
681 if (env->mem_write_pc) {
682 /* now we have a real cpu fault */
683 current_tb = tb_find_pc(env->mem_write_pc);
684 }
685 }
686 if (current_tb == tb &&
687 !(current_tb->cflags & CF_SINGLE_INSN)) {
688 /* If we are modifying the current TB, we must stop
689 its execution. We could be more precise by checking
690 that the modification is after the current PC, but it
691 would require a specialized function to partially
692 restore the CPU state */
693
694 current_tb_modified = 1;
695 cpu_restore_state(current_tb, env,
696 env->mem_write_pc, NULL);
697#if defined(TARGET_I386)
698 current_flags = env->hflags;
699 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
700 current_cs_base = (target_ulong)env->segs[R_CS].base;
701 current_pc = current_cs_base + env->eip;
702#else
703#error unsupported CPU
704#endif
705 }
706#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
707 /* we need to do that to handle the case where a signal
708 occurs while doing tb_phys_invalidate() */
709 saved_tb = NULL;
710 if (env) {
711 saved_tb = env->current_tb;
712 env->current_tb = NULL;
713 }
9fa3e853 714 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
715 if (env) {
716 env->current_tb = saved_tb;
717 if (env->interrupt_request && env->current_tb)
718 cpu_interrupt(env, env->interrupt_request);
719 }
9fa3e853
FB
720 }
721 tb = tb_next;
722 }
723#if !defined(CONFIG_USER_ONLY)
724 /* if no code remaining, no need to continue to use slow writes */
725 if (!p->first_tb) {
726 invalidate_page_bitmap(p);
d720b93d
FB
727 if (is_cpu_write_access) {
728 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
729 }
730 }
731#endif
732#ifdef TARGET_HAS_PRECISE_SMC
733 if (current_tb_modified) {
734 /* we generate a block containing just the instruction
735 modifying the memory. It will ensure that it cannot modify
736 itself */
ea1c1802 737 env->current_tb = NULL;
d720b93d
FB
738 tb_gen_code(env, current_pc, current_cs_base, current_flags,
739 CF_SINGLE_INSN);
740 cpu_resume_from_signal(env, NULL);
9fa3e853 741 }
fd6ce8f6 742#endif
9fa3e853 743}
fd6ce8f6 744
9fa3e853 745/* len must be <= 8 and start must be a multiple of len */
d720b93d 746static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
747{
748 PageDesc *p;
749 int offset, b;
59817ccb 750#if 0
a4193c8a
FB
751 if (1) {
752 if (loglevel) {
753 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
754 cpu_single_env->mem_write_vaddr, len,
755 cpu_single_env->eip,
756 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
757 }
59817ccb
FB
758 }
759#endif
9fa3e853
FB
760 p = page_find(start >> TARGET_PAGE_BITS);
761 if (!p)
762 return;
763 if (p->code_bitmap) {
764 offset = start & ~TARGET_PAGE_MASK;
765 b = p->code_bitmap[offset >> 3] >> (offset & 7);
766 if (b & ((1 << len) - 1))
767 goto do_invalidate;
768 } else {
769 do_invalidate:
d720b93d 770 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
771 }
772}
773
9fa3e853 774#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
775static void tb_invalidate_phys_page(target_ulong addr,
776 unsigned long pc, void *puc)
9fa3e853 777{
d720b93d
FB
778 int n, current_flags, current_tb_modified;
779 target_ulong current_pc, current_cs_base;
9fa3e853 780 PageDesc *p;
d720b93d
FB
781 TranslationBlock *tb, *current_tb;
782#ifdef TARGET_HAS_PRECISE_SMC
783 CPUState *env = cpu_single_env;
784#endif
9fa3e853
FB
785
786 addr &= TARGET_PAGE_MASK;
787 p = page_find(addr >> TARGET_PAGE_BITS);
788 if (!p)
789 return;
790 tb = p->first_tb;
d720b93d
FB
791 current_tb_modified = 0;
792 current_tb = NULL;
793 current_pc = 0; /* avoid warning */
794 current_cs_base = 0; /* avoid warning */
795 current_flags = 0; /* avoid warning */
796#ifdef TARGET_HAS_PRECISE_SMC
797 if (tb && pc != 0) {
798 current_tb = tb_find_pc(pc);
799 }
800#endif
9fa3e853
FB
801 while (tb != NULL) {
802 n = (long)tb & 3;
803 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
804#ifdef TARGET_HAS_PRECISE_SMC
805 if (current_tb == tb &&
806 !(current_tb->cflags & CF_SINGLE_INSN)) {
807 /* If we are modifying the current TB, we must stop
808 its execution. We could be more precise by checking
809 that the modification is after the current PC, but it
810 would require a specialized function to partially
811 restore the CPU state */
812
813 current_tb_modified = 1;
814 cpu_restore_state(current_tb, env, pc, puc);
815#if defined(TARGET_I386)
816 current_flags = env->hflags;
817 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
818 current_cs_base = (target_ulong)env->segs[R_CS].base;
819 current_pc = current_cs_base + env->eip;
820#else
821#error unsupported CPU
822#endif
823 }
824#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
825 tb_phys_invalidate(tb, addr);
826 tb = tb->page_next[n];
827 }
fd6ce8f6 828 p->first_tb = NULL;
d720b93d
FB
829#ifdef TARGET_HAS_PRECISE_SMC
830 if (current_tb_modified) {
831 /* we generate a block containing just the instruction
832 modifying the memory. It will ensure that it cannot modify
833 itself */
ea1c1802 834 env->current_tb = NULL;
d720b93d
FB
835 tb_gen_code(env, current_pc, current_cs_base, current_flags,
836 CF_SINGLE_INSN);
837 cpu_resume_from_signal(env, puc);
838 }
839#endif
fd6ce8f6 840}
9fa3e853 841#endif
fd6ce8f6
FB
842
843/* add the tb in the target page and protect it if necessary */
9fa3e853 844static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 845 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
846{
847 PageDesc *p;
9fa3e853
FB
848 TranslationBlock *last_first_tb;
849
850 tb->page_addr[n] = page_addr;
3a7d929e 851 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
852 tb->page_next[n] = p->first_tb;
853 last_first_tb = p->first_tb;
854 p->first_tb = (TranslationBlock *)((long)tb | n);
855 invalidate_page_bitmap(p);
fd6ce8f6 856
107db443 857#if defined(TARGET_HAS_SMC) || 1
d720b93d 858
9fa3e853 859#if defined(CONFIG_USER_ONLY)
fd6ce8f6 860 if (p->flags & PAGE_WRITE) {
53a5960a
PB
861 target_ulong addr;
862 PageDesc *p2;
9fa3e853
FB
863 int prot;
864
fd6ce8f6
FB
865 /* force the host page as non writable (writes will have a
866 page fault + mprotect overhead) */
53a5960a 867 page_addr &= qemu_host_page_mask;
fd6ce8f6 868 prot = 0;
53a5960a
PB
869 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
870 addr += TARGET_PAGE_SIZE) {
871
872 p2 = page_find (addr >> TARGET_PAGE_BITS);
873 if (!p2)
874 continue;
875 prot |= p2->flags;
876 p2->flags &= ~PAGE_WRITE;
877 page_get_flags(addr);
878 }
879 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
880 (prot & PAGE_BITS) & ~PAGE_WRITE);
881#ifdef DEBUG_TB_INVALIDATE
882 printf("protecting code page: 0x%08lx\n",
53a5960a 883 page_addr);
fd6ce8f6 884#endif
fd6ce8f6 885 }
9fa3e853
FB
886#else
887 /* if some code is already present, then the pages are already
888 protected. So we handle the case where only the first TB is
889 allocated in a physical page */
890 if (!last_first_tb) {
6a00d601 891 tlb_protect_code(page_addr);
9fa3e853
FB
892 }
893#endif
d720b93d
FB
894
895#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
896}
897
898/* Allocate a new translation block. Flush the translation buffer if
899 too many translation blocks or too much generated code. */
c27004ec 900TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
901{
902 TranslationBlock *tb;
fd6ce8f6
FB
903
904 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
905 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 906 return NULL;
fd6ce8f6
FB
907 tb = &tbs[nb_tbs++];
908 tb->pc = pc;
b448f2f3 909 tb->cflags = 0;
d4e8164f
FB
910 return tb;
911}
912
9fa3e853
FB
913/* add a new TB and link it to the physical page tables. phys_page2 is
914 (-1) to indicate that only one page contains the TB. */
915void tb_link_phys(TranslationBlock *tb,
916 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 917{
9fa3e853
FB
918 unsigned int h;
919 TranslationBlock **ptb;
920
921 /* add in the physical hash table */
922 h = tb_phys_hash_func(phys_pc);
923 ptb = &tb_phys_hash[h];
924 tb->phys_hash_next = *ptb;
925 *ptb = tb;
fd6ce8f6
FB
926
927 /* add in the page list */
9fa3e853
FB
928 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
929 if (phys_page2 != -1)
930 tb_alloc_page(tb, 1, phys_page2);
931 else
932 tb->page_addr[1] = -1;
9fa3e853 933
d4e8164f
FB
934 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
935 tb->jmp_next[0] = NULL;
936 tb->jmp_next[1] = NULL;
b448f2f3
FB
937#ifdef USE_CODE_COPY
938 tb->cflags &= ~CF_FP_USED;
939 if (tb->cflags & CF_TB_FP_USED)
940 tb->cflags |= CF_FP_USED;
941#endif
d4e8164f
FB
942
943 /* init original jump addresses */
944 if (tb->tb_next_offset[0] != 0xffff)
945 tb_reset_jump(tb, 0);
946 if (tb->tb_next_offset[1] != 0xffff)
947 tb_reset_jump(tb, 1);
8a40a180
FB
948
949#ifdef DEBUG_TB_CHECK
950 tb_page_check();
951#endif
fd6ce8f6
FB
952}
953
9fa3e853
FB
954/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
955 tb[1].tc_ptr. Return NULL if not found */
956TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 957{
9fa3e853
FB
958 int m_min, m_max, m;
959 unsigned long v;
960 TranslationBlock *tb;
a513fe19
FB
961
962 if (nb_tbs <= 0)
963 return NULL;
964 if (tc_ptr < (unsigned long)code_gen_buffer ||
965 tc_ptr >= (unsigned long)code_gen_ptr)
966 return NULL;
967 /* binary search (cf Knuth) */
968 m_min = 0;
969 m_max = nb_tbs - 1;
970 while (m_min <= m_max) {
971 m = (m_min + m_max) >> 1;
972 tb = &tbs[m];
973 v = (unsigned long)tb->tc_ptr;
974 if (v == tc_ptr)
975 return tb;
976 else if (tc_ptr < v) {
977 m_max = m - 1;
978 } else {
979 m_min = m + 1;
980 }
981 }
982 return &tbs[m_max];
983}
7501267e 984
ea041c0e
FB
985static void tb_reset_jump_recursive(TranslationBlock *tb);
986
987static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
988{
989 TranslationBlock *tb1, *tb_next, **ptb;
990 unsigned int n1;
991
992 tb1 = tb->jmp_next[n];
993 if (tb1 != NULL) {
994 /* find head of list */
995 for(;;) {
996 n1 = (long)tb1 & 3;
997 tb1 = (TranslationBlock *)((long)tb1 & ~3);
998 if (n1 == 2)
999 break;
1000 tb1 = tb1->jmp_next[n1];
1001 }
1002 /* we are now sure now that tb jumps to tb1 */
1003 tb_next = tb1;
1004
1005 /* remove tb from the jmp_first list */
1006 ptb = &tb_next->jmp_first;
1007 for(;;) {
1008 tb1 = *ptb;
1009 n1 = (long)tb1 & 3;
1010 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1011 if (n1 == n && tb1 == tb)
1012 break;
1013 ptb = &tb1->jmp_next[n1];
1014 }
1015 *ptb = tb->jmp_next[n];
1016 tb->jmp_next[n] = NULL;
1017
1018 /* suppress the jump to next tb in generated code */
1019 tb_reset_jump(tb, n);
1020
0124311e 1021 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1022 tb_reset_jump_recursive(tb_next);
1023 }
1024}
1025
1026static void tb_reset_jump_recursive(TranslationBlock *tb)
1027{
1028 tb_reset_jump_recursive2(tb, 0);
1029 tb_reset_jump_recursive2(tb, 1);
1030}
1031
1fddef4b 1032#if defined(TARGET_HAS_ICE)
d720b93d
FB
1033static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1034{
9b3c35e0
JM
1035 target_phys_addr_t addr;
1036 target_ulong pd;
c2f07f81
PB
1037 ram_addr_t ram_addr;
1038 PhysPageDesc *p;
d720b93d 1039
c2f07f81
PB
1040 addr = cpu_get_phys_page_debug(env, pc);
1041 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1042 if (!p) {
1043 pd = IO_MEM_UNASSIGNED;
1044 } else {
1045 pd = p->phys_offset;
1046 }
1047 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1048 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1049}
c27004ec 1050#endif
d720b93d 1051
6658ffb8
PB
1052/* Add a watchpoint. */
1053int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1054{
1055 int i;
1056
1057 for (i = 0; i < env->nb_watchpoints; i++) {
1058 if (addr == env->watchpoint[i].vaddr)
1059 return 0;
1060 }
1061 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1062 return -1;
1063
1064 i = env->nb_watchpoints++;
1065 env->watchpoint[i].vaddr = addr;
1066 tlb_flush_page(env, addr);
1067 /* FIXME: This flush is needed because of the hack to make memory ops
1068 terminate the TB. It can be removed once the proper IO trap and
1069 re-execute bits are in. */
1070 tb_flush(env);
1071 return i;
1072}
1073
1074/* Remove a watchpoint. */
1075int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1076{
1077 int i;
1078
1079 for (i = 0; i < env->nb_watchpoints; i++) {
1080 if (addr == env->watchpoint[i].vaddr) {
1081 env->nb_watchpoints--;
1082 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1083 tlb_flush_page(env, addr);
1084 return 0;
1085 }
1086 }
1087 return -1;
1088}
1089
c33a346e
FB
1090/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1091 breakpoint is reached */
2e12669a 1092int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1093{
1fddef4b 1094#if defined(TARGET_HAS_ICE)
4c3a88a2 1095 int i;
d720b93d 1096
4c3a88a2
FB
1097 for(i = 0; i < env->nb_breakpoints; i++) {
1098 if (env->breakpoints[i] == pc)
1099 return 0;
1100 }
1101
1102 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1103 return -1;
1104 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1105
1106 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1107 return 0;
1108#else
1109 return -1;
1110#endif
1111}
1112
1113/* remove a breakpoint */
2e12669a 1114int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1115{
1fddef4b 1116#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1117 int i;
1118 for(i = 0; i < env->nb_breakpoints; i++) {
1119 if (env->breakpoints[i] == pc)
1120 goto found;
1121 }
1122 return -1;
1123 found:
4c3a88a2 1124 env->nb_breakpoints--;
1fddef4b
FB
1125 if (i < env->nb_breakpoints)
1126 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1127
1128 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1129 return 0;
1130#else
1131 return -1;
1132#endif
1133}
1134
c33a346e
FB
1135/* enable or disable single step mode. EXCP_DEBUG is returned by the
1136 CPU loop after each instruction */
1137void cpu_single_step(CPUState *env, int enabled)
1138{
1fddef4b 1139#if defined(TARGET_HAS_ICE)
c33a346e
FB
1140 if (env->singlestep_enabled != enabled) {
1141 env->singlestep_enabled = enabled;
1142 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1143 /* XXX: only flush what is necessary */
0124311e 1144 tb_flush(env);
c33a346e
FB
1145 }
1146#endif
1147}
1148
34865134
FB
1149/* enable or disable low levels log */
1150void cpu_set_log(int log_flags)
1151{
1152 loglevel = log_flags;
1153 if (loglevel && !logfile) {
1154 logfile = fopen(logfilename, "w");
1155 if (!logfile) {
1156 perror(logfilename);
1157 _exit(1);
1158 }
9fa3e853
FB
1159#if !defined(CONFIG_SOFTMMU)
1160 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1161 {
1162 static uint8_t logfile_buf[4096];
1163 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1164 }
1165#else
34865134 1166 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1167#endif
34865134
FB
1168 }
1169}
1170
1171void cpu_set_log_filename(const char *filename)
1172{
1173 logfilename = strdup(filename);
1174}
c33a346e 1175
0124311e 1176/* mask must never be zero, except for A20 change call */
68a79315 1177void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1178{
1179 TranslationBlock *tb;
ee8b7021 1180 static int interrupt_lock;
59817ccb 1181
68a79315 1182 env->interrupt_request |= mask;
ea041c0e
FB
1183 /* if the cpu is currently executing code, we must unlink it and
1184 all the potentially executing TB */
1185 tb = env->current_tb;
ee8b7021
FB
1186 if (tb && !testandset(&interrupt_lock)) {
1187 env->current_tb = NULL;
ea041c0e 1188 tb_reset_jump_recursive(tb);
ee8b7021 1189 interrupt_lock = 0;
ea041c0e
FB
1190 }
1191}
1192
b54ad049
FB
1193void cpu_reset_interrupt(CPUState *env, int mask)
1194{
1195 env->interrupt_request &= ~mask;
1196}
1197
f193c797
FB
1198CPULogItem cpu_log_items[] = {
1199 { CPU_LOG_TB_OUT_ASM, "out_asm",
1200 "show generated host assembly code for each compiled TB" },
1201 { CPU_LOG_TB_IN_ASM, "in_asm",
1202 "show target assembly code for each compiled TB" },
1203 { CPU_LOG_TB_OP, "op",
1204 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1205#ifdef TARGET_I386
1206 { CPU_LOG_TB_OP_OPT, "op_opt",
1207 "show micro ops after optimization for each compiled TB" },
1208#endif
1209 { CPU_LOG_INT, "int",
1210 "show interrupts/exceptions in short format" },
1211 { CPU_LOG_EXEC, "exec",
1212 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1213 { CPU_LOG_TB_CPU, "cpu",
1214 "show CPU state before bloc translation" },
f193c797
FB
1215#ifdef TARGET_I386
1216 { CPU_LOG_PCALL, "pcall",
1217 "show protected mode far calls/returns/exceptions" },
1218#endif
8e3a9fd2 1219#ifdef DEBUG_IOPORT
fd872598
FB
1220 { CPU_LOG_IOPORT, "ioport",
1221 "show all i/o ports accesses" },
8e3a9fd2 1222#endif
f193c797
FB
1223 { 0, NULL, NULL },
1224};
1225
1226static int cmp1(const char *s1, int n, const char *s2)
1227{
1228 if (strlen(s2) != n)
1229 return 0;
1230 return memcmp(s1, s2, n) == 0;
1231}
1232
1233/* takes a comma separated list of log masks. Return 0 if error. */
1234int cpu_str_to_log_mask(const char *str)
1235{
1236 CPULogItem *item;
1237 int mask;
1238 const char *p, *p1;
1239
1240 p = str;
1241 mask = 0;
1242 for(;;) {
1243 p1 = strchr(p, ',');
1244 if (!p1)
1245 p1 = p + strlen(p);
8e3a9fd2
FB
1246 if(cmp1(p,p1-p,"all")) {
1247 for(item = cpu_log_items; item->mask != 0; item++) {
1248 mask |= item->mask;
1249 }
1250 } else {
f193c797
FB
1251 for(item = cpu_log_items; item->mask != 0; item++) {
1252 if (cmp1(p, p1 - p, item->name))
1253 goto found;
1254 }
1255 return 0;
8e3a9fd2 1256 }
f193c797
FB
1257 found:
1258 mask |= item->mask;
1259 if (*p1 != ',')
1260 break;
1261 p = p1 + 1;
1262 }
1263 return mask;
1264}
ea041c0e 1265
7501267e
FB
1266void cpu_abort(CPUState *env, const char *fmt, ...)
1267{
1268 va_list ap;
1269
1270 va_start(ap, fmt);
1271 fprintf(stderr, "qemu: fatal: ");
1272 vfprintf(stderr, fmt, ap);
1273 fprintf(stderr, "\n");
1274#ifdef TARGET_I386
7fe48483
FB
1275 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1276#else
1277 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1278#endif
1279 va_end(ap);
1280 abort();
1281}
1282
c5be9f08
TS
1283CPUState *cpu_copy(CPUState *env)
1284{
1285 CPUState *new_env = cpu_init();
1286 /* preserve chaining and index */
1287 CPUState *next_cpu = new_env->next_cpu;
1288 int cpu_index = new_env->cpu_index;
1289 memcpy(new_env, env, sizeof(CPUState));
1290 new_env->next_cpu = next_cpu;
1291 new_env->cpu_index = cpu_index;
1292 return new_env;
1293}
1294
0124311e
FB
1295#if !defined(CONFIG_USER_ONLY)
1296
ee8b7021
FB
1297/* NOTE: if flush_global is true, also flush global entries (not
1298 implemented yet) */
1299void tlb_flush(CPUState *env, int flush_global)
33417e70 1300{
33417e70 1301 int i;
0124311e 1302
9fa3e853
FB
1303#if defined(DEBUG_TLB)
1304 printf("tlb_flush:\n");
1305#endif
0124311e
FB
1306 /* must reset current TB so that interrupts cannot modify the
1307 links while we are modifying them */
1308 env->current_tb = NULL;
1309
33417e70 1310 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1311 env->tlb_table[0][i].addr_read = -1;
1312 env->tlb_table[0][i].addr_write = -1;
1313 env->tlb_table[0][i].addr_code = -1;
1314 env->tlb_table[1][i].addr_read = -1;
1315 env->tlb_table[1][i].addr_write = -1;
1316 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1317#if (NB_MMU_MODES >= 3)
1318 env->tlb_table[2][i].addr_read = -1;
1319 env->tlb_table[2][i].addr_write = -1;
1320 env->tlb_table[2][i].addr_code = -1;
1321#if (NB_MMU_MODES == 4)
1322 env->tlb_table[3][i].addr_read = -1;
1323 env->tlb_table[3][i].addr_write = -1;
1324 env->tlb_table[3][i].addr_code = -1;
1325#endif
1326#endif
33417e70 1327 }
9fa3e853 1328
8a40a180 1329 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1330
1331#if !defined(CONFIG_SOFTMMU)
1332 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1333#endif
1334#ifdef USE_KQEMU
1335 if (env->kqemu_enabled) {
1336 kqemu_flush(env, flush_global);
1337 }
9fa3e853 1338#endif
e3db7226 1339 tlb_flush_count++;
33417e70
FB
1340}
1341
274da6b2 1342static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1343{
84b7b8e7
FB
1344 if (addr == (tlb_entry->addr_read &
1345 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1346 addr == (tlb_entry->addr_write &
1347 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1348 addr == (tlb_entry->addr_code &
1349 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1350 tlb_entry->addr_read = -1;
1351 tlb_entry->addr_write = -1;
1352 tlb_entry->addr_code = -1;
1353 }
61382a50
FB
1354}
1355
2e12669a 1356void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1357{
8a40a180 1358 int i;
9fa3e853 1359 TranslationBlock *tb;
0124311e 1360
9fa3e853 1361#if defined(DEBUG_TLB)
108c49b8 1362 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1363#endif
0124311e
FB
1364 /* must reset current TB so that interrupts cannot modify the
1365 links while we are modifying them */
1366 env->current_tb = NULL;
61382a50
FB
1367
1368 addr &= TARGET_PAGE_MASK;
1369 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1370 tlb_flush_entry(&env->tlb_table[0][i], addr);
1371 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1372#if (NB_MMU_MODES >= 3)
1373 tlb_flush_entry(&env->tlb_table[2][i], addr);
1374#if (NB_MMU_MODES == 4)
1375 tlb_flush_entry(&env->tlb_table[3][i], addr);
1376#endif
1377#endif
0124311e 1378
b362e5e0
PB
1379 /* Discard jump cache entries for any tb which might potentially
1380 overlap the flushed page. */
1381 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1382 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1383
1384 i = tb_jmp_cache_hash_page(addr);
1385 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1386
0124311e 1387#if !defined(CONFIG_SOFTMMU)
9fa3e853 1388 if (addr < MMAP_AREA_END)
0124311e 1389 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1390#endif
0a962c02
FB
1391#ifdef USE_KQEMU
1392 if (env->kqemu_enabled) {
1393 kqemu_flush_page(env, addr);
1394 }
1395#endif
9fa3e853
FB
1396}
1397
9fa3e853
FB
1398/* update the TLBs so that writes to code in the virtual page 'addr'
1399 can be detected */
6a00d601 1400static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1401{
6a00d601
FB
1402 cpu_physical_memory_reset_dirty(ram_addr,
1403 ram_addr + TARGET_PAGE_SIZE,
1404 CODE_DIRTY_FLAG);
9fa3e853
FB
1405}
1406
9fa3e853 1407/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1408 tested for self modifying code */
1409static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1410 target_ulong vaddr)
9fa3e853 1411{
3a7d929e 1412 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1413}
1414
1415static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1416 unsigned long start, unsigned long length)
1417{
1418 unsigned long addr;
84b7b8e7
FB
1419 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1420 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1421 if ((addr - start) < length) {
84b7b8e7 1422 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1423 }
1424 }
1425}
1426
3a7d929e 1427void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1428 int dirty_flags)
1ccde1cb
FB
1429{
1430 CPUState *env;
4f2ac237 1431 unsigned long length, start1;
0a962c02
FB
1432 int i, mask, len;
1433 uint8_t *p;
1ccde1cb
FB
1434
1435 start &= TARGET_PAGE_MASK;
1436 end = TARGET_PAGE_ALIGN(end);
1437
1438 length = end - start;
1439 if (length == 0)
1440 return;
0a962c02 1441 len = length >> TARGET_PAGE_BITS;
3a7d929e 1442#ifdef USE_KQEMU
6a00d601
FB
1443 /* XXX: should not depend on cpu context */
1444 env = first_cpu;
3a7d929e 1445 if (env->kqemu_enabled) {
f23db169
FB
1446 ram_addr_t addr;
1447 addr = start;
1448 for(i = 0; i < len; i++) {
1449 kqemu_set_notdirty(env, addr);
1450 addr += TARGET_PAGE_SIZE;
1451 }
3a7d929e
FB
1452 }
1453#endif
f23db169
FB
1454 mask = ~dirty_flags;
1455 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1456 for(i = 0; i < len; i++)
1457 p[i] &= mask;
1458
1ccde1cb
FB
1459 /* we modify the TLB cache so that the dirty bit will be set again
1460 when accessing the range */
59817ccb 1461 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1462 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1463 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1464 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1465 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1466 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1467#if (NB_MMU_MODES >= 3)
1468 for(i = 0; i < CPU_TLB_SIZE; i++)
1469 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1470#if (NB_MMU_MODES == 4)
1471 for(i = 0; i < CPU_TLB_SIZE; i++)
1472 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1473#endif
1474#endif
6a00d601 1475 }
59817ccb
FB
1476
1477#if !defined(CONFIG_SOFTMMU)
1478 /* XXX: this is expensive */
1479 {
1480 VirtPageDesc *p;
1481 int j;
1482 target_ulong addr;
1483
1484 for(i = 0; i < L1_SIZE; i++) {
1485 p = l1_virt_map[i];
1486 if (p) {
1487 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1488 for(j = 0; j < L2_SIZE; j++) {
1489 if (p->valid_tag == virt_valid_tag &&
1490 p->phys_addr >= start && p->phys_addr < end &&
1491 (p->prot & PROT_WRITE)) {
1492 if (addr < MMAP_AREA_END) {
1493 mprotect((void *)addr, TARGET_PAGE_SIZE,
1494 p->prot & ~PROT_WRITE);
1495 }
1496 }
1497 addr += TARGET_PAGE_SIZE;
1498 p++;
1499 }
1500 }
1501 }
1502 }
1503#endif
1ccde1cb
FB
1504}
1505
3a7d929e
FB
1506static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1507{
1508 ram_addr_t ram_addr;
1509
84b7b8e7
FB
1510 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1511 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1512 tlb_entry->addend - (unsigned long)phys_ram_base;
1513 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1514 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1515 }
1516 }
1517}
1518
1519/* update the TLB according to the current state of the dirty bits */
1520void cpu_tlb_update_dirty(CPUState *env)
1521{
1522 int i;
1523 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1524 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1525 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1526 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1527#if (NB_MMU_MODES >= 3)
1528 for(i = 0; i < CPU_TLB_SIZE; i++)
1529 tlb_update_dirty(&env->tlb_table[2][i]);
1530#if (NB_MMU_MODES == 4)
1531 for(i = 0; i < CPU_TLB_SIZE; i++)
1532 tlb_update_dirty(&env->tlb_table[3][i]);
1533#endif
1534#endif
3a7d929e
FB
1535}
1536
1ccde1cb 1537static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1538 unsigned long start)
1ccde1cb
FB
1539{
1540 unsigned long addr;
84b7b8e7
FB
1541 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1542 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1543 if (addr == start) {
84b7b8e7 1544 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1545 }
1546 }
1547}
1548
1549/* update the TLB corresponding to virtual page vaddr and phys addr
1550 addr so that it is no longer dirty */
6a00d601
FB
1551static inline void tlb_set_dirty(CPUState *env,
1552 unsigned long addr, target_ulong vaddr)
1ccde1cb 1553{
1ccde1cb
FB
1554 int i;
1555
1ccde1cb
FB
1556 addr &= TARGET_PAGE_MASK;
1557 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1558 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1559 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1560#if (NB_MMU_MODES >= 3)
1561 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1562#if (NB_MMU_MODES == 4)
1563 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1564#endif
1565#endif
9fa3e853
FB
1566}
1567
59817ccb
FB
1568/* add a new TLB entry. At most one entry for a given virtual address
1569 is permitted. Return 0 if OK or 2 if the page could not be mapped
1570 (can only happen in non SOFTMMU mode for I/O pages or pages
1571 conflicting with the host address space). */
84b7b8e7
FB
1572int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1573 target_phys_addr_t paddr, int prot,
1574 int is_user, int is_softmmu)
9fa3e853 1575{
92e873b9 1576 PhysPageDesc *p;
4f2ac237 1577 unsigned long pd;
9fa3e853 1578 unsigned int index;
4f2ac237 1579 target_ulong address;
108c49b8 1580 target_phys_addr_t addend;
9fa3e853 1581 int ret;
84b7b8e7 1582 CPUTLBEntry *te;
6658ffb8 1583 int i;
9fa3e853 1584
92e873b9 1585 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1586 if (!p) {
1587 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1588 } else {
1589 pd = p->phys_offset;
9fa3e853
FB
1590 }
1591#if defined(DEBUG_TLB)
3a7d929e 1592 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1593 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1594#endif
1595
1596 ret = 0;
1597#if !defined(CONFIG_SOFTMMU)
1598 if (is_softmmu)
1599#endif
1600 {
2a4188a3 1601 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1602 /* IO memory case */
1603 address = vaddr | pd;
1604 addend = paddr;
1605 } else {
1606 /* standard memory */
1607 address = vaddr;
1608 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1609 }
6658ffb8
PB
1610
1611 /* Make accesses to pages with watchpoints go via the
1612 watchpoint trap routines. */
1613 for (i = 0; i < env->nb_watchpoints; i++) {
1614 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1615 if (address & ~TARGET_PAGE_MASK) {
1616 env->watchpoint[i].is_ram = 0;
1617 address = vaddr | io_mem_watch;
1618 } else {
1619 env->watchpoint[i].is_ram = 1;
1620 /* TODO: Figure out how to make read watchpoints coexist
1621 with code. */
1622 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1623 }
1624 }
1625 }
9fa3e853 1626
90f18422 1627 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1628 addend -= vaddr;
84b7b8e7
FB
1629 te = &env->tlb_table[is_user][index];
1630 te->addend = addend;
67b915a5 1631 if (prot & PAGE_READ) {
84b7b8e7
FB
1632 te->addr_read = address;
1633 } else {
1634 te->addr_read = -1;
1635 }
1636 if (prot & PAGE_EXEC) {
1637 te->addr_code = address;
9fa3e853 1638 } else {
84b7b8e7 1639 te->addr_code = -1;
9fa3e853 1640 }
67b915a5 1641 if (prot & PAGE_WRITE) {
856074ec
FB
1642 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1643 (pd & IO_MEM_ROMD)) {
1644 /* write access calls the I/O callback */
1645 te->addr_write = vaddr |
1646 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1647 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1648 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1649 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1650 } else {
84b7b8e7 1651 te->addr_write = address;
9fa3e853
FB
1652 }
1653 } else {
84b7b8e7 1654 te->addr_write = -1;
9fa3e853
FB
1655 }
1656 }
1657#if !defined(CONFIG_SOFTMMU)
1658 else {
1659 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1660 /* IO access: no mapping is done as it will be handled by the
1661 soft MMU */
1662 if (!(env->hflags & HF_SOFTMMU_MASK))
1663 ret = 2;
1664 } else {
1665 void *map_addr;
59817ccb
FB
1666
1667 if (vaddr >= MMAP_AREA_END) {
1668 ret = 2;
1669 } else {
1670 if (prot & PROT_WRITE) {
1671 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1672#if defined(TARGET_HAS_SMC) || 1
59817ccb 1673 first_tb ||
d720b93d 1674#endif
59817ccb
FB
1675 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1676 !cpu_physical_memory_is_dirty(pd))) {
1677 /* ROM: we do as if code was inside */
1678 /* if code is present, we only map as read only and save the
1679 original mapping */
1680 VirtPageDesc *vp;
1681
90f18422 1682 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1683 vp->phys_addr = pd;
1684 vp->prot = prot;
1685 vp->valid_tag = virt_valid_tag;
1686 prot &= ~PAGE_WRITE;
1687 }
1688 }
1689 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1690 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1691 if (map_addr == MAP_FAILED) {
1692 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1693 paddr, vaddr);
9fa3e853 1694 }
9fa3e853
FB
1695 }
1696 }
1697 }
1698#endif
1699 return ret;
1700}
1701
1702/* called from signal handler: invalidate the code and unprotect the
1703 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1704int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1705{
1706#if !defined(CONFIG_SOFTMMU)
1707 VirtPageDesc *vp;
1708
1709#if defined(DEBUG_TLB)
1710 printf("page_unprotect: addr=0x%08x\n", addr);
1711#endif
1712 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1713
1714 /* if it is not mapped, no need to worry here */
1715 if (addr >= MMAP_AREA_END)
1716 return 0;
9fa3e853
FB
1717 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1718 if (!vp)
1719 return 0;
1720 /* NOTE: in this case, validate_tag is _not_ tested as it
1721 validates only the code TLB */
1722 if (vp->valid_tag != virt_valid_tag)
1723 return 0;
1724 if (!(vp->prot & PAGE_WRITE))
1725 return 0;
1726#if defined(DEBUG_TLB)
1727 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1728 addr, vp->phys_addr, vp->prot);
1729#endif
59817ccb
FB
1730 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1731 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1732 (unsigned long)addr, vp->prot);
d720b93d 1733 /* set the dirty bit */
0a962c02 1734 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1735 /* flush the code inside */
1736 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1737 return 1;
1738#else
1739 return 0;
1740#endif
33417e70
FB
1741}
1742
0124311e
FB
1743#else
1744
ee8b7021 1745void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1746{
1747}
1748
2e12669a 1749void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1750{
1751}
1752
84b7b8e7
FB
1753int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1754 target_phys_addr_t paddr, int prot,
1755 int is_user, int is_softmmu)
9fa3e853
FB
1756{
1757 return 0;
1758}
0124311e 1759
9fa3e853
FB
1760/* dump memory mappings */
1761void page_dump(FILE *f)
33417e70 1762{
9fa3e853
FB
1763 unsigned long start, end;
1764 int i, j, prot, prot1;
1765 PageDesc *p;
33417e70 1766
9fa3e853
FB
1767 fprintf(f, "%-8s %-8s %-8s %s\n",
1768 "start", "end", "size", "prot");
1769 start = -1;
1770 end = -1;
1771 prot = 0;
1772 for(i = 0; i <= L1_SIZE; i++) {
1773 if (i < L1_SIZE)
1774 p = l1_map[i];
1775 else
1776 p = NULL;
1777 for(j = 0;j < L2_SIZE; j++) {
1778 if (!p)
1779 prot1 = 0;
1780 else
1781 prot1 = p[j].flags;
1782 if (prot1 != prot) {
1783 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1784 if (start != -1) {
1785 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1786 start, end, end - start,
1787 prot & PAGE_READ ? 'r' : '-',
1788 prot & PAGE_WRITE ? 'w' : '-',
1789 prot & PAGE_EXEC ? 'x' : '-');
1790 }
1791 if (prot1 != 0)
1792 start = end;
1793 else
1794 start = -1;
1795 prot = prot1;
1796 }
1797 if (!p)
1798 break;
1799 }
33417e70 1800 }
33417e70
FB
1801}
1802
53a5960a 1803int page_get_flags(target_ulong address)
33417e70 1804{
9fa3e853
FB
1805 PageDesc *p;
1806
1807 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1808 if (!p)
9fa3e853
FB
1809 return 0;
1810 return p->flags;
1811}
1812
1813/* modify the flags of a page and invalidate the code if
1814 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1815 depending on PAGE_WRITE */
53a5960a 1816void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1817{
1818 PageDesc *p;
53a5960a 1819 target_ulong addr;
9fa3e853
FB
1820
1821 start = start & TARGET_PAGE_MASK;
1822 end = TARGET_PAGE_ALIGN(end);
1823 if (flags & PAGE_WRITE)
1824 flags |= PAGE_WRITE_ORG;
1825 spin_lock(&tb_lock);
1826 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1827 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1828 /* if the write protection is set, then we invalidate the code
1829 inside */
1830 if (!(p->flags & PAGE_WRITE) &&
1831 (flags & PAGE_WRITE) &&
1832 p->first_tb) {
d720b93d 1833 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1834 }
1835 p->flags = flags;
1836 }
1837 spin_unlock(&tb_lock);
33417e70
FB
1838}
1839
9fa3e853
FB
1840/* called from signal handler: invalidate the code and unprotect the
1841 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1842int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1843{
1844 unsigned int page_index, prot, pindex;
1845 PageDesc *p, *p1;
53a5960a 1846 target_ulong host_start, host_end, addr;
9fa3e853 1847
83fb7adf 1848 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1849 page_index = host_start >> TARGET_PAGE_BITS;
1850 p1 = page_find(page_index);
1851 if (!p1)
1852 return 0;
83fb7adf 1853 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1854 p = p1;
1855 prot = 0;
1856 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1857 prot |= p->flags;
1858 p++;
1859 }
1860 /* if the page was really writable, then we change its
1861 protection back to writable */
1862 if (prot & PAGE_WRITE_ORG) {
1863 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1864 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1865 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1866 (prot & PAGE_BITS) | PAGE_WRITE);
1867 p1[pindex].flags |= PAGE_WRITE;
1868 /* and since the content will be modified, we must invalidate
1869 the corresponding translated code. */
d720b93d 1870 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1871#ifdef DEBUG_TB_CHECK
1872 tb_invalidate_check(address);
1873#endif
1874 return 1;
1875 }
1876 }
1877 return 0;
1878}
1879
1880/* call this function when system calls directly modify a memory area */
53a5960a
PB
1881/* ??? This should be redundant now we have lock_user. */
1882void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1883{
53a5960a 1884 target_ulong start, end, addr;
9fa3e853 1885
53a5960a 1886 start = data;
9fa3e853
FB
1887 end = start + data_size;
1888 start &= TARGET_PAGE_MASK;
1889 end = TARGET_PAGE_ALIGN(end);
1890 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1891 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1892 }
1893}
1894
6a00d601
FB
1895static inline void tlb_set_dirty(CPUState *env,
1896 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1897{
1898}
9fa3e853
FB
1899#endif /* defined(CONFIG_USER_ONLY) */
1900
33417e70
FB
1901/* register physical memory. 'size' must be a multiple of the target
1902 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1903 io memory page */
2e12669a
FB
1904void cpu_register_physical_memory(target_phys_addr_t start_addr,
1905 unsigned long size,
1906 unsigned long phys_offset)
33417e70 1907{
108c49b8 1908 target_phys_addr_t addr, end_addr;
92e873b9 1909 PhysPageDesc *p;
9d42037b 1910 CPUState *env;
33417e70 1911
5fd386f6 1912 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1913 end_addr = start_addr + size;
5fd386f6 1914 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1915 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1916 p->phys_offset = phys_offset;
2a4188a3
FB
1917 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1918 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1919 phys_offset += TARGET_PAGE_SIZE;
1920 }
9d42037b
FB
1921
1922 /* since each CPU stores ram addresses in its TLB cache, we must
1923 reset the modified entries */
1924 /* XXX: slow ! */
1925 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1926 tlb_flush(env, 1);
1927 }
33417e70
FB
1928}
1929
ba863458
FB
1930/* XXX: temporary until new memory mapping API */
1931uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1932{
1933 PhysPageDesc *p;
1934
1935 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1936 if (!p)
1937 return IO_MEM_UNASSIGNED;
1938 return p->phys_offset;
1939}
1940
e9a1ab19
FB
1941/* XXX: better than nothing */
1942ram_addr_t qemu_ram_alloc(unsigned int size)
1943{
1944 ram_addr_t addr;
1945 if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1946 fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
1947 size, phys_ram_size);
1948 abort();
1949 }
1950 addr = phys_ram_alloc_offset;
1951 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1952 return addr;
1953}
1954
1955void qemu_ram_free(ram_addr_t addr)
1956{
1957}
1958
a4193c8a 1959static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 1960{
67d3b957 1961#ifdef DEBUG_UNASSIGNED
6c36d3fa 1962 printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
b4f0a316
BS
1963#endif
1964#ifdef TARGET_SPARC
6c36d3fa 1965 do_unassigned_access(addr, 0, 0, 0);
67d3b957 1966#endif
33417e70
FB
1967 return 0;
1968}
1969
a4193c8a 1970static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 1971{
67d3b957 1972#ifdef DEBUG_UNASSIGNED
6c36d3fa 1973 printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
67d3b957 1974#endif
b4f0a316 1975#ifdef TARGET_SPARC
6c36d3fa 1976 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 1977#endif
33417e70
FB
1978}
1979
1980static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1981 unassigned_mem_readb,
1982 unassigned_mem_readb,
1983 unassigned_mem_readb,
1984};
1985
1986static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1987 unassigned_mem_writeb,
1988 unassigned_mem_writeb,
1989 unassigned_mem_writeb,
1990};
1991
3a7d929e 1992static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1993{
3a7d929e
FB
1994 unsigned long ram_addr;
1995 int dirty_flags;
1996 ram_addr = addr - (unsigned long)phys_ram_base;
1997 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1998 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1999#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2000 tb_invalidate_phys_page_fast(ram_addr, 1);
2001 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2002#endif
3a7d929e 2003 }
c27004ec 2004 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2005#ifdef USE_KQEMU
2006 if (cpu_single_env->kqemu_enabled &&
2007 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2008 kqemu_modify_page(cpu_single_env, ram_addr);
2009#endif
f23db169
FB
2010 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2011 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2012 /* we remove the notdirty callback only if the code has been
2013 flushed */
2014 if (dirty_flags == 0xff)
6a00d601 2015 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2016}
2017
3a7d929e 2018static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2019{
3a7d929e
FB
2020 unsigned long ram_addr;
2021 int dirty_flags;
2022 ram_addr = addr - (unsigned long)phys_ram_base;
2023 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2024 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2025#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2026 tb_invalidate_phys_page_fast(ram_addr, 2);
2027 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2028#endif
3a7d929e 2029 }
c27004ec 2030 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2031#ifdef USE_KQEMU
2032 if (cpu_single_env->kqemu_enabled &&
2033 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2034 kqemu_modify_page(cpu_single_env, ram_addr);
2035#endif
f23db169
FB
2036 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2037 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2038 /* we remove the notdirty callback only if the code has been
2039 flushed */
2040 if (dirty_flags == 0xff)
6a00d601 2041 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2042}
2043
3a7d929e 2044static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2045{
3a7d929e
FB
2046 unsigned long ram_addr;
2047 int dirty_flags;
2048 ram_addr = addr - (unsigned long)phys_ram_base;
2049 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2050 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2051#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2052 tb_invalidate_phys_page_fast(ram_addr, 4);
2053 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2054#endif
3a7d929e 2055 }
c27004ec 2056 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2057#ifdef USE_KQEMU
2058 if (cpu_single_env->kqemu_enabled &&
2059 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2060 kqemu_modify_page(cpu_single_env, ram_addr);
2061#endif
f23db169
FB
2062 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2063 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2064 /* we remove the notdirty callback only if the code has been
2065 flushed */
2066 if (dirty_flags == 0xff)
6a00d601 2067 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2068}
2069
3a7d929e 2070static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2071 NULL, /* never used */
2072 NULL, /* never used */
2073 NULL, /* never used */
2074};
2075
1ccde1cb
FB
2076static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2077 notdirty_mem_writeb,
2078 notdirty_mem_writew,
2079 notdirty_mem_writel,
2080};
2081
6658ffb8
PB
2082#if defined(CONFIG_SOFTMMU)
2083/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2084 so these check for a hit then pass through to the normal out-of-line
2085 phys routines. */
2086static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2087{
2088 return ldub_phys(addr);
2089}
2090
2091static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2092{
2093 return lduw_phys(addr);
2094}
2095
2096static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2097{
2098 return ldl_phys(addr);
2099}
2100
2101/* Generate a debug exception if a watchpoint has been hit.
2102 Returns the real physical address of the access. addr will be a host
2103 address in the is_ram case. */
2104static target_ulong check_watchpoint(target_phys_addr_t addr)
2105{
2106 CPUState *env = cpu_single_env;
2107 target_ulong watch;
2108 target_ulong retaddr;
2109 int i;
2110
2111 retaddr = addr;
2112 for (i = 0; i < env->nb_watchpoints; i++) {
2113 watch = env->watchpoint[i].vaddr;
2114 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2115 if (env->watchpoint[i].is_ram)
2116 retaddr = addr - (unsigned long)phys_ram_base;
2117 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2118 cpu_single_env->watchpoint_hit = i + 1;
2119 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2120 break;
2121 }
2122 }
2123 }
2124 return retaddr;
2125}
2126
2127static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2128 uint32_t val)
2129{
2130 addr = check_watchpoint(addr);
2131 stb_phys(addr, val);
2132}
2133
2134static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2135 uint32_t val)
2136{
2137 addr = check_watchpoint(addr);
2138 stw_phys(addr, val);
2139}
2140
2141static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2142 uint32_t val)
2143{
2144 addr = check_watchpoint(addr);
2145 stl_phys(addr, val);
2146}
2147
2148static CPUReadMemoryFunc *watch_mem_read[3] = {
2149 watch_mem_readb,
2150 watch_mem_readw,
2151 watch_mem_readl,
2152};
2153
2154static CPUWriteMemoryFunc *watch_mem_write[3] = {
2155 watch_mem_writeb,
2156 watch_mem_writew,
2157 watch_mem_writel,
2158};
2159#endif
2160
33417e70
FB
2161static void io_mem_init(void)
2162{
3a7d929e 2163 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2164 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2165 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2166 io_mem_nb = 5;
2167
6658ffb8
PB
2168#if defined(CONFIG_SOFTMMU)
2169 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2170 watch_mem_write, NULL);
2171#endif
1ccde1cb 2172 /* alloc dirty bits array */
0a962c02 2173 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2174 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2175}
2176
2177/* mem_read and mem_write are arrays of functions containing the
2178 function to access byte (index 0), word (index 1) and dword (index
2179 2). All functions must be supplied. If io_index is non zero, the
2180 corresponding io zone is modified. If it is zero, a new io zone is
2181 allocated. The return value can be used with
2182 cpu_register_physical_memory(). (-1) is returned if error. */
2183int cpu_register_io_memory(int io_index,
2184 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2185 CPUWriteMemoryFunc **mem_write,
2186 void *opaque)
33417e70
FB
2187{
2188 int i;
2189
2190 if (io_index <= 0) {
b5ff1b31 2191 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2192 return -1;
2193 io_index = io_mem_nb++;
2194 } else {
2195 if (io_index >= IO_MEM_NB_ENTRIES)
2196 return -1;
2197 }
b5ff1b31 2198
33417e70
FB
2199 for(i = 0;i < 3; i++) {
2200 io_mem_read[io_index][i] = mem_read[i];
2201 io_mem_write[io_index][i] = mem_write[i];
2202 }
a4193c8a 2203 io_mem_opaque[io_index] = opaque;
33417e70
FB
2204 return io_index << IO_MEM_SHIFT;
2205}
61382a50 2206
8926b517
FB
2207CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2208{
2209 return io_mem_write[io_index >> IO_MEM_SHIFT];
2210}
2211
2212CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2213{
2214 return io_mem_read[io_index >> IO_MEM_SHIFT];
2215}
2216
13eb76e0
FB
2217/* physical memory access (slow version, mainly for debug) */
2218#if defined(CONFIG_USER_ONLY)
2e12669a 2219void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2220 int len, int is_write)
2221{
2222 int l, flags;
2223 target_ulong page;
53a5960a 2224 void * p;
13eb76e0
FB
2225
2226 while (len > 0) {
2227 page = addr & TARGET_PAGE_MASK;
2228 l = (page + TARGET_PAGE_SIZE) - addr;
2229 if (l > len)
2230 l = len;
2231 flags = page_get_flags(page);
2232 if (!(flags & PAGE_VALID))
2233 return;
2234 if (is_write) {
2235 if (!(flags & PAGE_WRITE))
2236 return;
53a5960a
PB
2237 p = lock_user(addr, len, 0);
2238 memcpy(p, buf, len);
2239 unlock_user(p, addr, len);
13eb76e0
FB
2240 } else {
2241 if (!(flags & PAGE_READ))
2242 return;
53a5960a
PB
2243 p = lock_user(addr, len, 1);
2244 memcpy(buf, p, len);
2245 unlock_user(p, addr, 0);
13eb76e0
FB
2246 }
2247 len -= l;
2248 buf += l;
2249 addr += l;
2250 }
2251}
8df1cd07 2252
13eb76e0 2253#else
2e12669a 2254void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2255 int len, int is_write)
2256{
2257 int l, io_index;
2258 uint8_t *ptr;
2259 uint32_t val;
2e12669a
FB
2260 target_phys_addr_t page;
2261 unsigned long pd;
92e873b9 2262 PhysPageDesc *p;
13eb76e0
FB
2263
2264 while (len > 0) {
2265 page = addr & TARGET_PAGE_MASK;
2266 l = (page + TARGET_PAGE_SIZE) - addr;
2267 if (l > len)
2268 l = len;
92e873b9 2269 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2270 if (!p) {
2271 pd = IO_MEM_UNASSIGNED;
2272 } else {
2273 pd = p->phys_offset;
2274 }
2275
2276 if (is_write) {
3a7d929e 2277 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2278 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2279 /* XXX: could force cpu_single_env to NULL to avoid
2280 potential bugs */
13eb76e0 2281 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2282 /* 32 bit write access */
c27004ec 2283 val = ldl_p(buf);
a4193c8a 2284 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2285 l = 4;
2286 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2287 /* 16 bit write access */
c27004ec 2288 val = lduw_p(buf);
a4193c8a 2289 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2290 l = 2;
2291 } else {
1c213d19 2292 /* 8 bit write access */
c27004ec 2293 val = ldub_p(buf);
a4193c8a 2294 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2295 l = 1;
2296 }
2297 } else {
b448f2f3
FB
2298 unsigned long addr1;
2299 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2300 /* RAM case */
b448f2f3 2301 ptr = phys_ram_base + addr1;
13eb76e0 2302 memcpy(ptr, buf, l);
3a7d929e
FB
2303 if (!cpu_physical_memory_is_dirty(addr1)) {
2304 /* invalidate code */
2305 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2306 /* set dirty bit */
f23db169
FB
2307 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2308 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2309 }
13eb76e0
FB
2310 }
2311 } else {
2a4188a3
FB
2312 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2313 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2314 /* I/O case */
2315 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2316 if (l >= 4 && ((addr & 3) == 0)) {
2317 /* 32 bit read access */
a4193c8a 2318 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2319 stl_p(buf, val);
13eb76e0
FB
2320 l = 4;
2321 } else if (l >= 2 && ((addr & 1) == 0)) {
2322 /* 16 bit read access */
a4193c8a 2323 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2324 stw_p(buf, val);
13eb76e0
FB
2325 l = 2;
2326 } else {
1c213d19 2327 /* 8 bit read access */
a4193c8a 2328 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2329 stb_p(buf, val);
13eb76e0
FB
2330 l = 1;
2331 }
2332 } else {
2333 /* RAM case */
2334 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2335 (addr & ~TARGET_PAGE_MASK);
2336 memcpy(buf, ptr, l);
2337 }
2338 }
2339 len -= l;
2340 buf += l;
2341 addr += l;
2342 }
2343}
8df1cd07 2344
d0ecd2aa
FB
2345/* used for ROM loading : can write in RAM and ROM */
2346void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2347 const uint8_t *buf, int len)
2348{
2349 int l;
2350 uint8_t *ptr;
2351 target_phys_addr_t page;
2352 unsigned long pd;
2353 PhysPageDesc *p;
2354
2355 while (len > 0) {
2356 page = addr & TARGET_PAGE_MASK;
2357 l = (page + TARGET_PAGE_SIZE) - addr;
2358 if (l > len)
2359 l = len;
2360 p = phys_page_find(page >> TARGET_PAGE_BITS);
2361 if (!p) {
2362 pd = IO_MEM_UNASSIGNED;
2363 } else {
2364 pd = p->phys_offset;
2365 }
2366
2367 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2368 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2369 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2370 /* do nothing */
2371 } else {
2372 unsigned long addr1;
2373 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2374 /* ROM/RAM case */
2375 ptr = phys_ram_base + addr1;
2376 memcpy(ptr, buf, l);
2377 }
2378 len -= l;
2379 buf += l;
2380 addr += l;
2381 }
2382}
2383
2384
8df1cd07
FB
2385/* warning: addr must be aligned */
2386uint32_t ldl_phys(target_phys_addr_t addr)
2387{
2388 int io_index;
2389 uint8_t *ptr;
2390 uint32_t val;
2391 unsigned long pd;
2392 PhysPageDesc *p;
2393
2394 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395 if (!p) {
2396 pd = IO_MEM_UNASSIGNED;
2397 } else {
2398 pd = p->phys_offset;
2399 }
2400
2a4188a3
FB
2401 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2402 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2403 /* I/O case */
2404 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2405 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2406 } else {
2407 /* RAM case */
2408 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2409 (addr & ~TARGET_PAGE_MASK);
2410 val = ldl_p(ptr);
2411 }
2412 return val;
2413}
2414
84b7b8e7
FB
2415/* warning: addr must be aligned */
2416uint64_t ldq_phys(target_phys_addr_t addr)
2417{
2418 int io_index;
2419 uint8_t *ptr;
2420 uint64_t val;
2421 unsigned long pd;
2422 PhysPageDesc *p;
2423
2424 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2425 if (!p) {
2426 pd = IO_MEM_UNASSIGNED;
2427 } else {
2428 pd = p->phys_offset;
2429 }
2430
2a4188a3
FB
2431 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2432 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2433 /* I/O case */
2434 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2435#ifdef TARGET_WORDS_BIGENDIAN
2436 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2437 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2438#else
2439 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2440 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2441#endif
2442 } else {
2443 /* RAM case */
2444 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2445 (addr & ~TARGET_PAGE_MASK);
2446 val = ldq_p(ptr);
2447 }
2448 return val;
2449}
2450
aab33094
FB
2451/* XXX: optimize */
2452uint32_t ldub_phys(target_phys_addr_t addr)
2453{
2454 uint8_t val;
2455 cpu_physical_memory_read(addr, &val, 1);
2456 return val;
2457}
2458
2459/* XXX: optimize */
2460uint32_t lduw_phys(target_phys_addr_t addr)
2461{
2462 uint16_t val;
2463 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2464 return tswap16(val);
2465}
2466
8df1cd07
FB
2467/* warning: addr must be aligned. The ram page is not masked as dirty
2468 and the code inside is not invalidated. It is useful if the dirty
2469 bits are used to track modified PTEs */
2470void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2471{
2472 int io_index;
2473 uint8_t *ptr;
2474 unsigned long pd;
2475 PhysPageDesc *p;
2476
2477 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2478 if (!p) {
2479 pd = IO_MEM_UNASSIGNED;
2480 } else {
2481 pd = p->phys_offset;
2482 }
2483
3a7d929e 2484 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2485 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2486 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2487 } else {
2488 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2489 (addr & ~TARGET_PAGE_MASK);
2490 stl_p(ptr, val);
2491 }
2492}
2493
bc98a7ef
JM
2494void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2495{
2496 int io_index;
2497 uint8_t *ptr;
2498 unsigned long pd;
2499 PhysPageDesc *p;
2500
2501 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2502 if (!p) {
2503 pd = IO_MEM_UNASSIGNED;
2504 } else {
2505 pd = p->phys_offset;
2506 }
2507
2508 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2509 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2510#ifdef TARGET_WORDS_BIGENDIAN
2511 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2512 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2513#else
2514 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2515 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2516#endif
2517 } else {
2518 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2519 (addr & ~TARGET_PAGE_MASK);
2520 stq_p(ptr, val);
2521 }
2522}
2523
8df1cd07 2524/* warning: addr must be aligned */
8df1cd07
FB
2525void stl_phys(target_phys_addr_t addr, uint32_t val)
2526{
2527 int io_index;
2528 uint8_t *ptr;
2529 unsigned long pd;
2530 PhysPageDesc *p;
2531
2532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2533 if (!p) {
2534 pd = IO_MEM_UNASSIGNED;
2535 } else {
2536 pd = p->phys_offset;
2537 }
2538
3a7d929e 2539 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2540 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2541 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2542 } else {
2543 unsigned long addr1;
2544 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2545 /* RAM case */
2546 ptr = phys_ram_base + addr1;
2547 stl_p(ptr, val);
3a7d929e
FB
2548 if (!cpu_physical_memory_is_dirty(addr1)) {
2549 /* invalidate code */
2550 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2551 /* set dirty bit */
f23db169
FB
2552 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2553 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2554 }
8df1cd07
FB
2555 }
2556}
2557
aab33094
FB
2558/* XXX: optimize */
2559void stb_phys(target_phys_addr_t addr, uint32_t val)
2560{
2561 uint8_t v = val;
2562 cpu_physical_memory_write(addr, &v, 1);
2563}
2564
2565/* XXX: optimize */
2566void stw_phys(target_phys_addr_t addr, uint32_t val)
2567{
2568 uint16_t v = tswap16(val);
2569 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2570}
2571
2572/* XXX: optimize */
2573void stq_phys(target_phys_addr_t addr, uint64_t val)
2574{
2575 val = tswap64(val);
2576 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2577}
2578
13eb76e0
FB
2579#endif
2580
2581/* virtual memory access for debug */
b448f2f3
FB
2582int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2583 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2584{
2585 int l;
9b3c35e0
JM
2586 target_phys_addr_t phys_addr;
2587 target_ulong page;
13eb76e0
FB
2588
2589 while (len > 0) {
2590 page = addr & TARGET_PAGE_MASK;
2591 phys_addr = cpu_get_phys_page_debug(env, page);
2592 /* if no physical page mapped, return an error */
2593 if (phys_addr == -1)
2594 return -1;
2595 l = (page + TARGET_PAGE_SIZE) - addr;
2596 if (l > len)
2597 l = len;
b448f2f3
FB
2598 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2599 buf, l, is_write);
13eb76e0
FB
2600 len -= l;
2601 buf += l;
2602 addr += l;
2603 }
2604 return 0;
2605}
2606
e3db7226
FB
2607void dump_exec_info(FILE *f,
2608 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2609{
2610 int i, target_code_size, max_target_code_size;
2611 int direct_jmp_count, direct_jmp2_count, cross_page;
2612 TranslationBlock *tb;
2613
2614 target_code_size = 0;
2615 max_target_code_size = 0;
2616 cross_page = 0;
2617 direct_jmp_count = 0;
2618 direct_jmp2_count = 0;
2619 for(i = 0; i < nb_tbs; i++) {
2620 tb = &tbs[i];
2621 target_code_size += tb->size;
2622 if (tb->size > max_target_code_size)
2623 max_target_code_size = tb->size;
2624 if (tb->page_addr[1] != -1)
2625 cross_page++;
2626 if (tb->tb_next_offset[0] != 0xffff) {
2627 direct_jmp_count++;
2628 if (tb->tb_next_offset[1] != 0xffff) {
2629 direct_jmp2_count++;
2630 }
2631 }
2632 }
2633 /* XXX: avoid using doubles ? */
2634 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2635 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2636 nb_tbs ? target_code_size / nb_tbs : 0,
2637 max_target_code_size);
2638 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2639 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2640 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2641 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2642 cross_page,
2643 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2644 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2645 direct_jmp_count,
2646 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2647 direct_jmp2_count,
2648 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2649 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2650 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2651 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2652}
2653
61382a50
FB
2654#if !defined(CONFIG_USER_ONLY)
2655
2656#define MMUSUFFIX _cmmu
2657#define GETPC() NULL
2658#define env cpu_single_env
b769d8fe 2659#define SOFTMMU_CODE_ACCESS
61382a50
FB
2660
2661#define SHIFT 0
2662#include "softmmu_template.h"
2663
2664#define SHIFT 1
2665#include "softmmu_template.h"
2666
2667#define SHIFT 2
2668#include "softmmu_template.h"
2669
2670#define SHIFT 3
2671#include "softmmu_template.h"
2672
2673#undef env
2674
2675#endif