]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
reset rombios32 area
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
67d3b957 44//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
45
46/* make various TB consistency checks */
47//#define DEBUG_TB_CHECK
98857888 48//#define DEBUG_TLB_CHECK
fd6ce8f6 49
99773bd4
PB
50#if !defined(CONFIG_USER_ONLY)
51/* TB consistency checks only implemented for usermode emulation. */
52#undef DEBUG_TB_CHECK
53#endif
54
fd6ce8f6
FB
55/* threshold to flush the translated code buffer */
56#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57
9fa3e853
FB
58#define SMC_BITMAP_USE_THRESHOLD 10
59
60#define MMAP_AREA_START 0x00000000
61#define MMAP_AREA_END 0xa8000000
fd6ce8f6 62
108c49b8
FB
63#if defined(TARGET_SPARC64)
64#define TARGET_PHYS_ADDR_SPACE_BITS 41
65#elif defined(TARGET_PPC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 42
67#else
68/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69#define TARGET_PHYS_ADDR_SPACE_BITS 32
70#endif
71
fd6ce8f6 72TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 73TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 74int nb_tbs;
eb51d102
FB
75/* any access to the tbs or the page table must use this lock */
76spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 77
b8076a74 78uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
79uint8_t *code_gen_ptr;
80
9fa3e853
FB
81int phys_ram_size;
82int phys_ram_fd;
83uint8_t *phys_ram_base;
1ccde1cb 84uint8_t *phys_ram_dirty;
9fa3e853 85
6a00d601
FB
86CPUState *first_cpu;
87/* current CPU in the current thread. It is only valid inside
88 cpu_exec() */
89CPUState *cpu_single_env;
90
54936004 91typedef struct PageDesc {
92e873b9 92 /* list of TBs intersecting this ram page */
fd6ce8f6 93 TranslationBlock *first_tb;
9fa3e853
FB
94 /* in order to optimize self modifying code, we count the number
95 of lookups we do to a given page to use a bitmap */
96 unsigned int code_write_count;
97 uint8_t *code_bitmap;
98#if defined(CONFIG_USER_ONLY)
99 unsigned long flags;
100#endif
54936004
FB
101} PageDesc;
102
92e873b9
FB
103typedef struct PhysPageDesc {
104 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 105 uint32_t phys_offset;
92e873b9
FB
106} PhysPageDesc;
107
54936004
FB
108#define L2_BITS 10
109#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
110
111#define L1_SIZE (1 << L1_BITS)
112#define L2_SIZE (1 << L2_BITS)
113
33417e70 114static void io_mem_init(void);
fd6ce8f6 115
83fb7adf
FB
116unsigned long qemu_real_host_page_size;
117unsigned long qemu_host_page_bits;
118unsigned long qemu_host_page_size;
119unsigned long qemu_host_page_mask;
54936004 120
92e873b9 121/* XXX: for system emulation, it could just be an array */
54936004 122static PageDesc *l1_map[L1_SIZE];
0a962c02 123PhysPageDesc **l1_phys_map;
54936004 124
33417e70 125/* io memory support */
33417e70
FB
126CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
127CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 128void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
129static int io_mem_nb;
130
34865134
FB
131/* log support */
132char *logfilename = "/tmp/qemu.log";
133FILE *logfile;
134int loglevel;
135
e3db7226
FB
136/* statistics */
137static int tlb_flush_count;
138static int tb_flush_count;
139static int tb_phys_invalidate_count;
140
b346ff46 141static void page_init(void)
54936004 142{
83fb7adf 143 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 144 TARGET_PAGE_SIZE */
67b915a5 145#ifdef _WIN32
d5a8f07c
FB
146 {
147 SYSTEM_INFO system_info;
148 DWORD old_protect;
149
150 GetSystemInfo(&system_info);
151 qemu_real_host_page_size = system_info.dwPageSize;
152
153 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
154 PAGE_EXECUTE_READWRITE, &old_protect);
155 }
67b915a5 156#else
83fb7adf 157 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
158 {
159 unsigned long start, end;
160
161 start = (unsigned long)code_gen_buffer;
162 start &= ~(qemu_real_host_page_size - 1);
163
164 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
165 end += qemu_real_host_page_size - 1;
166 end &= ~(qemu_real_host_page_size - 1);
167
168 mprotect((void *)start, end - start,
169 PROT_READ | PROT_WRITE | PROT_EXEC);
170 }
67b915a5 171#endif
d5a8f07c 172
83fb7adf
FB
173 if (qemu_host_page_size == 0)
174 qemu_host_page_size = qemu_real_host_page_size;
175 if (qemu_host_page_size < TARGET_PAGE_SIZE)
176 qemu_host_page_size = TARGET_PAGE_SIZE;
177 qemu_host_page_bits = 0;
178 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
179 qemu_host_page_bits++;
180 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
181 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
182 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
183}
184
fd6ce8f6 185static inline PageDesc *page_find_alloc(unsigned int index)
54936004 186{
54936004
FB
187 PageDesc **lp, *p;
188
54936004
FB
189 lp = &l1_map[index >> L2_BITS];
190 p = *lp;
191 if (!p) {
192 /* allocate if not found */
59817ccb 193 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 194 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
195 *lp = p;
196 }
197 return p + (index & (L2_SIZE - 1));
198}
199
fd6ce8f6 200static inline PageDesc *page_find(unsigned int index)
54936004 201{
54936004
FB
202 PageDesc *p;
203
54936004
FB
204 p = l1_map[index >> L2_BITS];
205 if (!p)
206 return 0;
fd6ce8f6
FB
207 return p + (index & (L2_SIZE - 1));
208}
209
108c49b8 210static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 211{
108c49b8 212 void **lp, **p;
e3f4e2a4 213 PhysPageDesc *pd;
92e873b9 214
108c49b8
FB
215 p = (void **)l1_phys_map;
216#if TARGET_PHYS_ADDR_SPACE_BITS > 32
217
218#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
219#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
220#endif
221 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
222 p = *lp;
223 if (!p) {
224 /* allocate if not found */
108c49b8
FB
225 if (!alloc)
226 return NULL;
227 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
228 memset(p, 0, sizeof(void *) * L1_SIZE);
229 *lp = p;
230 }
231#endif
232 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
233 pd = *lp;
234 if (!pd) {
235 int i;
108c49b8
FB
236 /* allocate if not found */
237 if (!alloc)
238 return NULL;
e3f4e2a4
PB
239 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
240 *lp = pd;
241 for (i = 0; i < L2_SIZE; i++)
242 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 243 }
e3f4e2a4 244 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
245}
246
108c49b8 247static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 248{
108c49b8 249 return phys_page_find_alloc(index, 0);
92e873b9
FB
250}
251
9fa3e853 252#if !defined(CONFIG_USER_ONLY)
6a00d601 253static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
254static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
255 target_ulong vaddr);
9fa3e853 256#endif
fd6ce8f6 257
6a00d601 258void cpu_exec_init(CPUState *env)
fd6ce8f6 259{
6a00d601
FB
260 CPUState **penv;
261 int cpu_index;
262
fd6ce8f6
FB
263 if (!code_gen_ptr) {
264 code_gen_ptr = code_gen_buffer;
b346ff46 265 page_init();
33417e70 266 io_mem_init();
fd6ce8f6 267 }
6a00d601
FB
268 env->next_cpu = NULL;
269 penv = &first_cpu;
270 cpu_index = 0;
271 while (*penv != NULL) {
272 penv = (CPUState **)&(*penv)->next_cpu;
273 cpu_index++;
274 }
275 env->cpu_index = cpu_index;
276 *penv = env;
fd6ce8f6
FB
277}
278
9fa3e853
FB
279static inline void invalidate_page_bitmap(PageDesc *p)
280{
281 if (p->code_bitmap) {
59817ccb 282 qemu_free(p->code_bitmap);
9fa3e853
FB
283 p->code_bitmap = NULL;
284 }
285 p->code_write_count = 0;
286}
287
fd6ce8f6
FB
288/* set to NULL all the 'first_tb' fields in all PageDescs */
289static void page_flush_tb(void)
290{
291 int i, j;
292 PageDesc *p;
293
294 for(i = 0; i < L1_SIZE; i++) {
295 p = l1_map[i];
296 if (p) {
9fa3e853
FB
297 for(j = 0; j < L2_SIZE; j++) {
298 p->first_tb = NULL;
299 invalidate_page_bitmap(p);
300 p++;
301 }
fd6ce8f6
FB
302 }
303 }
304}
305
306/* flush all the translation blocks */
d4e8164f 307/* XXX: tb_flush is currently not thread safe */
6a00d601 308void tb_flush(CPUState *env1)
fd6ce8f6 309{
6a00d601 310 CPUState *env;
0124311e 311#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
312 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
313 code_gen_ptr - code_gen_buffer,
314 nb_tbs,
0124311e 315 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
316#endif
317 nb_tbs = 0;
6a00d601
FB
318
319 for(env = first_cpu; env != NULL; env = env->next_cpu) {
320 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
321 }
9fa3e853 322
8a8a608f 323 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 324 page_flush_tb();
9fa3e853 325
fd6ce8f6 326 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
327 /* XXX: flush processor icache at this point if cache flush is
328 expensive */
e3db7226 329 tb_flush_count++;
fd6ce8f6
FB
330}
331
332#ifdef DEBUG_TB_CHECK
333
334static void tb_invalidate_check(unsigned long address)
335{
336 TranslationBlock *tb;
337 int i;
338 address &= TARGET_PAGE_MASK;
99773bd4
PB
339 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
340 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
341 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
342 address >= tb->pc + tb->size)) {
343 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 344 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
345 }
346 }
347 }
348}
349
350/* verify that all the pages have correct rights for code */
351static void tb_page_check(void)
352{
353 TranslationBlock *tb;
354 int i, flags1, flags2;
355
99773bd4
PB
356 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
357 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
358 flags1 = page_get_flags(tb->pc);
359 flags2 = page_get_flags(tb->pc + tb->size - 1);
360 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
361 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 362 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
363 }
364 }
365 }
366}
367
d4e8164f
FB
368void tb_jmp_check(TranslationBlock *tb)
369{
370 TranslationBlock *tb1;
371 unsigned int n1;
372
373 /* suppress any remaining jumps to this TB */
374 tb1 = tb->jmp_first;
375 for(;;) {
376 n1 = (long)tb1 & 3;
377 tb1 = (TranslationBlock *)((long)tb1 & ~3);
378 if (n1 == 2)
379 break;
380 tb1 = tb1->jmp_next[n1];
381 }
382 /* check end of list */
383 if (tb1 != tb) {
384 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
385 }
386}
387
fd6ce8f6
FB
388#endif
389
390/* invalidate one TB */
391static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
392 int next_offset)
393{
394 TranslationBlock *tb1;
395 for(;;) {
396 tb1 = *ptb;
397 if (tb1 == tb) {
398 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
399 break;
400 }
401 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
402 }
403}
404
9fa3e853
FB
405static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
406{
407 TranslationBlock *tb1;
408 unsigned int n1;
409
410 for(;;) {
411 tb1 = *ptb;
412 n1 = (long)tb1 & 3;
413 tb1 = (TranslationBlock *)((long)tb1 & ~3);
414 if (tb1 == tb) {
415 *ptb = tb1->page_next[n1];
416 break;
417 }
418 ptb = &tb1->page_next[n1];
419 }
420}
421
d4e8164f
FB
422static inline void tb_jmp_remove(TranslationBlock *tb, int n)
423{
424 TranslationBlock *tb1, **ptb;
425 unsigned int n1;
426
427 ptb = &tb->jmp_next[n];
428 tb1 = *ptb;
429 if (tb1) {
430 /* find tb(n) in circular list */
431 for(;;) {
432 tb1 = *ptb;
433 n1 = (long)tb1 & 3;
434 tb1 = (TranslationBlock *)((long)tb1 & ~3);
435 if (n1 == n && tb1 == tb)
436 break;
437 if (n1 == 2) {
438 ptb = &tb1->jmp_first;
439 } else {
440 ptb = &tb1->jmp_next[n1];
441 }
442 }
443 /* now we can suppress tb(n) from the list */
444 *ptb = tb->jmp_next[n];
445
446 tb->jmp_next[n] = NULL;
447 }
448}
449
450/* reset the jump entry 'n' of a TB so that it is not chained to
451 another TB */
452static inline void tb_reset_jump(TranslationBlock *tb, int n)
453{
454 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
455}
456
8a40a180 457static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 458{
6a00d601 459 CPUState *env;
8a40a180 460 PageDesc *p;
d4e8164f 461 unsigned int h, n1;
8a40a180
FB
462 target_ulong phys_pc;
463 TranslationBlock *tb1, *tb2;
d4e8164f 464
8a40a180
FB
465 /* remove the TB from the hash list */
466 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
467 h = tb_phys_hash_func(phys_pc);
468 tb_remove(&tb_phys_hash[h], tb,
469 offsetof(TranslationBlock, phys_hash_next));
470
471 /* remove the TB from the page list */
472 if (tb->page_addr[0] != page_addr) {
473 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
474 tb_page_remove(&p->first_tb, tb);
475 invalidate_page_bitmap(p);
476 }
477 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
478 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
479 tb_page_remove(&p->first_tb, tb);
480 invalidate_page_bitmap(p);
481 }
482
36bdbe54 483 tb_invalidated_flag = 1;
59817ccb 484
fd6ce8f6 485 /* remove the TB from the hash list */
8a40a180 486 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
487 for(env = first_cpu; env != NULL; env = env->next_cpu) {
488 if (env->tb_jmp_cache[h] == tb)
489 env->tb_jmp_cache[h] = NULL;
490 }
d4e8164f
FB
491
492 /* suppress this TB from the two jump lists */
493 tb_jmp_remove(tb, 0);
494 tb_jmp_remove(tb, 1);
495
496 /* suppress any remaining jumps to this TB */
497 tb1 = tb->jmp_first;
498 for(;;) {
499 n1 = (long)tb1 & 3;
500 if (n1 == 2)
501 break;
502 tb1 = (TranslationBlock *)((long)tb1 & ~3);
503 tb2 = tb1->jmp_next[n1];
504 tb_reset_jump(tb1, n1);
505 tb1->jmp_next[n1] = NULL;
506 tb1 = tb2;
507 }
508 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 509
e3db7226 510 tb_phys_invalidate_count++;
9fa3e853
FB
511}
512
513static inline void set_bits(uint8_t *tab, int start, int len)
514{
515 int end, mask, end1;
516
517 end = start + len;
518 tab += start >> 3;
519 mask = 0xff << (start & 7);
520 if ((start & ~7) == (end & ~7)) {
521 if (start < end) {
522 mask &= ~(0xff << (end & 7));
523 *tab |= mask;
524 }
525 } else {
526 *tab++ |= mask;
527 start = (start + 8) & ~7;
528 end1 = end & ~7;
529 while (start < end1) {
530 *tab++ = 0xff;
531 start += 8;
532 }
533 if (start < end) {
534 mask = ~(0xff << (end & 7));
535 *tab |= mask;
536 }
537 }
538}
539
540static void build_page_bitmap(PageDesc *p)
541{
542 int n, tb_start, tb_end;
543 TranslationBlock *tb;
544
59817ccb 545 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
546 if (!p->code_bitmap)
547 return;
548 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
549
550 tb = p->first_tb;
551 while (tb != NULL) {
552 n = (long)tb & 3;
553 tb = (TranslationBlock *)((long)tb & ~3);
554 /* NOTE: this is subtle as a TB may span two physical pages */
555 if (n == 0) {
556 /* NOTE: tb_end may be after the end of the page, but
557 it is not a problem */
558 tb_start = tb->pc & ~TARGET_PAGE_MASK;
559 tb_end = tb_start + tb->size;
560 if (tb_end > TARGET_PAGE_SIZE)
561 tb_end = TARGET_PAGE_SIZE;
562 } else {
563 tb_start = 0;
564 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
565 }
566 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
567 tb = tb->page_next[n];
568 }
569}
570
d720b93d
FB
571#ifdef TARGET_HAS_PRECISE_SMC
572
573static void tb_gen_code(CPUState *env,
574 target_ulong pc, target_ulong cs_base, int flags,
575 int cflags)
576{
577 TranslationBlock *tb;
578 uint8_t *tc_ptr;
579 target_ulong phys_pc, phys_page2, virt_page2;
580 int code_gen_size;
581
c27004ec
FB
582 phys_pc = get_phys_addr_code(env, pc);
583 tb = tb_alloc(pc);
d720b93d
FB
584 if (!tb) {
585 /* flush must be done */
586 tb_flush(env);
587 /* cannot fail at this point */
c27004ec 588 tb = tb_alloc(pc);
d720b93d
FB
589 }
590 tc_ptr = code_gen_ptr;
591 tb->tc_ptr = tc_ptr;
592 tb->cs_base = cs_base;
593 tb->flags = flags;
594 tb->cflags = cflags;
595 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
596 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
597
598 /* check next page if needed */
c27004ec 599 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 600 phys_page2 = -1;
c27004ec 601 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
602 phys_page2 = get_phys_addr_code(env, virt_page2);
603 }
604 tb_link_phys(tb, phys_pc, phys_page2);
605}
606#endif
607
9fa3e853
FB
608/* invalidate all TBs which intersect with the target physical page
609 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
610 the same physical page. 'is_cpu_write_access' should be true if called
611 from a real cpu write access: the virtual CPU will exit the current
612 TB if code is modified inside this TB. */
613void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
614 int is_cpu_write_access)
615{
616 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 617 CPUState *env = cpu_single_env;
9fa3e853 618 PageDesc *p;
ea1c1802 619 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 620 target_ulong tb_start, tb_end;
d720b93d 621 target_ulong current_pc, current_cs_base;
9fa3e853
FB
622
623 p = page_find(start >> TARGET_PAGE_BITS);
624 if (!p)
625 return;
626 if (!p->code_bitmap &&
d720b93d
FB
627 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
628 is_cpu_write_access) {
9fa3e853
FB
629 /* build code bitmap */
630 build_page_bitmap(p);
631 }
632
633 /* we remove all the TBs in the range [start, end[ */
634 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
635 current_tb_not_found = is_cpu_write_access;
636 current_tb_modified = 0;
637 current_tb = NULL; /* avoid warning */
638 current_pc = 0; /* avoid warning */
639 current_cs_base = 0; /* avoid warning */
640 current_flags = 0; /* avoid warning */
9fa3e853
FB
641 tb = p->first_tb;
642 while (tb != NULL) {
643 n = (long)tb & 3;
644 tb = (TranslationBlock *)((long)tb & ~3);
645 tb_next = tb->page_next[n];
646 /* NOTE: this is subtle as a TB may span two physical pages */
647 if (n == 0) {
648 /* NOTE: tb_end may be after the end of the page, but
649 it is not a problem */
650 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
651 tb_end = tb_start + tb->size;
652 } else {
653 tb_start = tb->page_addr[1];
654 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
655 }
656 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
657#ifdef TARGET_HAS_PRECISE_SMC
658 if (current_tb_not_found) {
659 current_tb_not_found = 0;
660 current_tb = NULL;
661 if (env->mem_write_pc) {
662 /* now we have a real cpu fault */
663 current_tb = tb_find_pc(env->mem_write_pc);
664 }
665 }
666 if (current_tb == tb &&
667 !(current_tb->cflags & CF_SINGLE_INSN)) {
668 /* If we are modifying the current TB, we must stop
669 its execution. We could be more precise by checking
670 that the modification is after the current PC, but it
671 would require a specialized function to partially
672 restore the CPU state */
673
674 current_tb_modified = 1;
675 cpu_restore_state(current_tb, env,
676 env->mem_write_pc, NULL);
677#if defined(TARGET_I386)
678 current_flags = env->hflags;
679 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
680 current_cs_base = (target_ulong)env->segs[R_CS].base;
681 current_pc = current_cs_base + env->eip;
682#else
683#error unsupported CPU
684#endif
685 }
686#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
687 /* we need to do that to handle the case where a signal
688 occurs while doing tb_phys_invalidate() */
689 saved_tb = NULL;
690 if (env) {
691 saved_tb = env->current_tb;
692 env->current_tb = NULL;
693 }
9fa3e853 694 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
695 if (env) {
696 env->current_tb = saved_tb;
697 if (env->interrupt_request && env->current_tb)
698 cpu_interrupt(env, env->interrupt_request);
699 }
9fa3e853
FB
700 }
701 tb = tb_next;
702 }
703#if !defined(CONFIG_USER_ONLY)
704 /* if no code remaining, no need to continue to use slow writes */
705 if (!p->first_tb) {
706 invalidate_page_bitmap(p);
d720b93d
FB
707 if (is_cpu_write_access) {
708 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
709 }
710 }
711#endif
712#ifdef TARGET_HAS_PRECISE_SMC
713 if (current_tb_modified) {
714 /* we generate a block containing just the instruction
715 modifying the memory. It will ensure that it cannot modify
716 itself */
ea1c1802 717 env->current_tb = NULL;
d720b93d
FB
718 tb_gen_code(env, current_pc, current_cs_base, current_flags,
719 CF_SINGLE_INSN);
720 cpu_resume_from_signal(env, NULL);
9fa3e853 721 }
fd6ce8f6 722#endif
9fa3e853 723}
fd6ce8f6 724
9fa3e853 725/* len must be <= 8 and start must be a multiple of len */
d720b93d 726static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
727{
728 PageDesc *p;
729 int offset, b;
59817ccb 730#if 0
a4193c8a
FB
731 if (1) {
732 if (loglevel) {
733 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
734 cpu_single_env->mem_write_vaddr, len,
735 cpu_single_env->eip,
736 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
737 }
59817ccb
FB
738 }
739#endif
9fa3e853
FB
740 p = page_find(start >> TARGET_PAGE_BITS);
741 if (!p)
742 return;
743 if (p->code_bitmap) {
744 offset = start & ~TARGET_PAGE_MASK;
745 b = p->code_bitmap[offset >> 3] >> (offset & 7);
746 if (b & ((1 << len) - 1))
747 goto do_invalidate;
748 } else {
749 do_invalidate:
d720b93d 750 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
751 }
752}
753
9fa3e853 754#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
755static void tb_invalidate_phys_page(target_ulong addr,
756 unsigned long pc, void *puc)
9fa3e853 757{
d720b93d
FB
758 int n, current_flags, current_tb_modified;
759 target_ulong current_pc, current_cs_base;
9fa3e853 760 PageDesc *p;
d720b93d
FB
761 TranslationBlock *tb, *current_tb;
762#ifdef TARGET_HAS_PRECISE_SMC
763 CPUState *env = cpu_single_env;
764#endif
9fa3e853
FB
765
766 addr &= TARGET_PAGE_MASK;
767 p = page_find(addr >> TARGET_PAGE_BITS);
768 if (!p)
769 return;
770 tb = p->first_tb;
d720b93d
FB
771 current_tb_modified = 0;
772 current_tb = NULL;
773 current_pc = 0; /* avoid warning */
774 current_cs_base = 0; /* avoid warning */
775 current_flags = 0; /* avoid warning */
776#ifdef TARGET_HAS_PRECISE_SMC
777 if (tb && pc != 0) {
778 current_tb = tb_find_pc(pc);
779 }
780#endif
9fa3e853
FB
781 while (tb != NULL) {
782 n = (long)tb & 3;
783 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
784#ifdef TARGET_HAS_PRECISE_SMC
785 if (current_tb == tb &&
786 !(current_tb->cflags & CF_SINGLE_INSN)) {
787 /* If we are modifying the current TB, we must stop
788 its execution. We could be more precise by checking
789 that the modification is after the current PC, but it
790 would require a specialized function to partially
791 restore the CPU state */
792
793 current_tb_modified = 1;
794 cpu_restore_state(current_tb, env, pc, puc);
795#if defined(TARGET_I386)
796 current_flags = env->hflags;
797 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
798 current_cs_base = (target_ulong)env->segs[R_CS].base;
799 current_pc = current_cs_base + env->eip;
800#else
801#error unsupported CPU
802#endif
803 }
804#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
805 tb_phys_invalidate(tb, addr);
806 tb = tb->page_next[n];
807 }
fd6ce8f6 808 p->first_tb = NULL;
d720b93d
FB
809#ifdef TARGET_HAS_PRECISE_SMC
810 if (current_tb_modified) {
811 /* we generate a block containing just the instruction
812 modifying the memory. It will ensure that it cannot modify
813 itself */
ea1c1802 814 env->current_tb = NULL;
d720b93d
FB
815 tb_gen_code(env, current_pc, current_cs_base, current_flags,
816 CF_SINGLE_INSN);
817 cpu_resume_from_signal(env, puc);
818 }
819#endif
fd6ce8f6 820}
9fa3e853 821#endif
fd6ce8f6
FB
822
823/* add the tb in the target page and protect it if necessary */
9fa3e853 824static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 825 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
826{
827 PageDesc *p;
9fa3e853
FB
828 TranslationBlock *last_first_tb;
829
830 tb->page_addr[n] = page_addr;
3a7d929e 831 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
832 tb->page_next[n] = p->first_tb;
833 last_first_tb = p->first_tb;
834 p->first_tb = (TranslationBlock *)((long)tb | n);
835 invalidate_page_bitmap(p);
fd6ce8f6 836
107db443 837#if defined(TARGET_HAS_SMC) || 1
d720b93d 838
9fa3e853 839#if defined(CONFIG_USER_ONLY)
fd6ce8f6 840 if (p->flags & PAGE_WRITE) {
53a5960a
PB
841 target_ulong addr;
842 PageDesc *p2;
9fa3e853
FB
843 int prot;
844
fd6ce8f6
FB
845 /* force the host page as non writable (writes will have a
846 page fault + mprotect overhead) */
53a5960a 847 page_addr &= qemu_host_page_mask;
fd6ce8f6 848 prot = 0;
53a5960a
PB
849 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
850 addr += TARGET_PAGE_SIZE) {
851
852 p2 = page_find (addr >> TARGET_PAGE_BITS);
853 if (!p2)
854 continue;
855 prot |= p2->flags;
856 p2->flags &= ~PAGE_WRITE;
857 page_get_flags(addr);
858 }
859 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
860 (prot & PAGE_BITS) & ~PAGE_WRITE);
861#ifdef DEBUG_TB_INVALIDATE
862 printf("protecting code page: 0x%08lx\n",
53a5960a 863 page_addr);
fd6ce8f6 864#endif
fd6ce8f6 865 }
9fa3e853
FB
866#else
867 /* if some code is already present, then the pages are already
868 protected. So we handle the case where only the first TB is
869 allocated in a physical page */
870 if (!last_first_tb) {
6a00d601 871 tlb_protect_code(page_addr);
9fa3e853
FB
872 }
873#endif
d720b93d
FB
874
875#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
876}
877
878/* Allocate a new translation block. Flush the translation buffer if
879 too many translation blocks or too much generated code. */
c27004ec 880TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
881{
882 TranslationBlock *tb;
fd6ce8f6
FB
883
884 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
885 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 886 return NULL;
fd6ce8f6
FB
887 tb = &tbs[nb_tbs++];
888 tb->pc = pc;
b448f2f3 889 tb->cflags = 0;
d4e8164f
FB
890 return tb;
891}
892
9fa3e853
FB
893/* add a new TB and link it to the physical page tables. phys_page2 is
894 (-1) to indicate that only one page contains the TB. */
895void tb_link_phys(TranslationBlock *tb,
896 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 897{
9fa3e853
FB
898 unsigned int h;
899 TranslationBlock **ptb;
900
901 /* add in the physical hash table */
902 h = tb_phys_hash_func(phys_pc);
903 ptb = &tb_phys_hash[h];
904 tb->phys_hash_next = *ptb;
905 *ptb = tb;
fd6ce8f6
FB
906
907 /* add in the page list */
9fa3e853
FB
908 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
909 if (phys_page2 != -1)
910 tb_alloc_page(tb, 1, phys_page2);
911 else
912 tb->page_addr[1] = -1;
9fa3e853 913
d4e8164f
FB
914 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
915 tb->jmp_next[0] = NULL;
916 tb->jmp_next[1] = NULL;
b448f2f3
FB
917#ifdef USE_CODE_COPY
918 tb->cflags &= ~CF_FP_USED;
919 if (tb->cflags & CF_TB_FP_USED)
920 tb->cflags |= CF_FP_USED;
921#endif
d4e8164f
FB
922
923 /* init original jump addresses */
924 if (tb->tb_next_offset[0] != 0xffff)
925 tb_reset_jump(tb, 0);
926 if (tb->tb_next_offset[1] != 0xffff)
927 tb_reset_jump(tb, 1);
8a40a180
FB
928
929#ifdef DEBUG_TB_CHECK
930 tb_page_check();
931#endif
fd6ce8f6
FB
932}
933
9fa3e853
FB
934/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
935 tb[1].tc_ptr. Return NULL if not found */
936TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 937{
9fa3e853
FB
938 int m_min, m_max, m;
939 unsigned long v;
940 TranslationBlock *tb;
a513fe19
FB
941
942 if (nb_tbs <= 0)
943 return NULL;
944 if (tc_ptr < (unsigned long)code_gen_buffer ||
945 tc_ptr >= (unsigned long)code_gen_ptr)
946 return NULL;
947 /* binary search (cf Knuth) */
948 m_min = 0;
949 m_max = nb_tbs - 1;
950 while (m_min <= m_max) {
951 m = (m_min + m_max) >> 1;
952 tb = &tbs[m];
953 v = (unsigned long)tb->tc_ptr;
954 if (v == tc_ptr)
955 return tb;
956 else if (tc_ptr < v) {
957 m_max = m - 1;
958 } else {
959 m_min = m + 1;
960 }
961 }
962 return &tbs[m_max];
963}
7501267e 964
ea041c0e
FB
965static void tb_reset_jump_recursive(TranslationBlock *tb);
966
967static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
968{
969 TranslationBlock *tb1, *tb_next, **ptb;
970 unsigned int n1;
971
972 tb1 = tb->jmp_next[n];
973 if (tb1 != NULL) {
974 /* find head of list */
975 for(;;) {
976 n1 = (long)tb1 & 3;
977 tb1 = (TranslationBlock *)((long)tb1 & ~3);
978 if (n1 == 2)
979 break;
980 tb1 = tb1->jmp_next[n1];
981 }
982 /* we are now sure now that tb jumps to tb1 */
983 tb_next = tb1;
984
985 /* remove tb from the jmp_first list */
986 ptb = &tb_next->jmp_first;
987 for(;;) {
988 tb1 = *ptb;
989 n1 = (long)tb1 & 3;
990 tb1 = (TranslationBlock *)((long)tb1 & ~3);
991 if (n1 == n && tb1 == tb)
992 break;
993 ptb = &tb1->jmp_next[n1];
994 }
995 *ptb = tb->jmp_next[n];
996 tb->jmp_next[n] = NULL;
997
998 /* suppress the jump to next tb in generated code */
999 tb_reset_jump(tb, n);
1000
0124311e 1001 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1002 tb_reset_jump_recursive(tb_next);
1003 }
1004}
1005
1006static void tb_reset_jump_recursive(TranslationBlock *tb)
1007{
1008 tb_reset_jump_recursive2(tb, 0);
1009 tb_reset_jump_recursive2(tb, 1);
1010}
1011
1fddef4b 1012#if defined(TARGET_HAS_ICE)
d720b93d
FB
1013static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1014{
c2f07f81
PB
1015 target_ulong addr, pd;
1016 ram_addr_t ram_addr;
1017 PhysPageDesc *p;
d720b93d 1018
c2f07f81
PB
1019 addr = cpu_get_phys_page_debug(env, pc);
1020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1021 if (!p) {
1022 pd = IO_MEM_UNASSIGNED;
1023 } else {
1024 pd = p->phys_offset;
1025 }
1026 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1027 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1028}
c27004ec 1029#endif
d720b93d 1030
c33a346e
FB
1031/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1032 breakpoint is reached */
2e12669a 1033int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1034{
1fddef4b 1035#if defined(TARGET_HAS_ICE)
4c3a88a2 1036 int i;
d720b93d 1037
4c3a88a2
FB
1038 for(i = 0; i < env->nb_breakpoints; i++) {
1039 if (env->breakpoints[i] == pc)
1040 return 0;
1041 }
1042
1043 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1044 return -1;
1045 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1046
1047 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1048 return 0;
1049#else
1050 return -1;
1051#endif
1052}
1053
1054/* remove a breakpoint */
2e12669a 1055int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1056{
1fddef4b 1057#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1058 int i;
1059 for(i = 0; i < env->nb_breakpoints; i++) {
1060 if (env->breakpoints[i] == pc)
1061 goto found;
1062 }
1063 return -1;
1064 found:
4c3a88a2 1065 env->nb_breakpoints--;
1fddef4b
FB
1066 if (i < env->nb_breakpoints)
1067 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1068
1069 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1070 return 0;
1071#else
1072 return -1;
1073#endif
1074}
1075
c33a346e
FB
1076/* enable or disable single step mode. EXCP_DEBUG is returned by the
1077 CPU loop after each instruction */
1078void cpu_single_step(CPUState *env, int enabled)
1079{
1fddef4b 1080#if defined(TARGET_HAS_ICE)
c33a346e
FB
1081 if (env->singlestep_enabled != enabled) {
1082 env->singlestep_enabled = enabled;
1083 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1084 /* XXX: only flush what is necessary */
0124311e 1085 tb_flush(env);
c33a346e
FB
1086 }
1087#endif
1088}
1089
34865134
FB
1090/* enable or disable low levels log */
1091void cpu_set_log(int log_flags)
1092{
1093 loglevel = log_flags;
1094 if (loglevel && !logfile) {
1095 logfile = fopen(logfilename, "w");
1096 if (!logfile) {
1097 perror(logfilename);
1098 _exit(1);
1099 }
9fa3e853
FB
1100#if !defined(CONFIG_SOFTMMU)
1101 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1102 {
1103 static uint8_t logfile_buf[4096];
1104 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1105 }
1106#else
34865134 1107 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1108#endif
34865134
FB
1109 }
1110}
1111
1112void cpu_set_log_filename(const char *filename)
1113{
1114 logfilename = strdup(filename);
1115}
c33a346e 1116
0124311e 1117/* mask must never be zero, except for A20 change call */
68a79315 1118void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1119{
1120 TranslationBlock *tb;
ee8b7021 1121 static int interrupt_lock;
59817ccb 1122
68a79315 1123 env->interrupt_request |= mask;
ea041c0e
FB
1124 /* if the cpu is currently executing code, we must unlink it and
1125 all the potentially executing TB */
1126 tb = env->current_tb;
ee8b7021
FB
1127 if (tb && !testandset(&interrupt_lock)) {
1128 env->current_tb = NULL;
ea041c0e 1129 tb_reset_jump_recursive(tb);
ee8b7021 1130 interrupt_lock = 0;
ea041c0e
FB
1131 }
1132}
1133
b54ad049
FB
1134void cpu_reset_interrupt(CPUState *env, int mask)
1135{
1136 env->interrupt_request &= ~mask;
1137}
1138
f193c797
FB
1139CPULogItem cpu_log_items[] = {
1140 { CPU_LOG_TB_OUT_ASM, "out_asm",
1141 "show generated host assembly code for each compiled TB" },
1142 { CPU_LOG_TB_IN_ASM, "in_asm",
1143 "show target assembly code for each compiled TB" },
1144 { CPU_LOG_TB_OP, "op",
1145 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1146#ifdef TARGET_I386
1147 { CPU_LOG_TB_OP_OPT, "op_opt",
1148 "show micro ops after optimization for each compiled TB" },
1149#endif
1150 { CPU_LOG_INT, "int",
1151 "show interrupts/exceptions in short format" },
1152 { CPU_LOG_EXEC, "exec",
1153 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1154 { CPU_LOG_TB_CPU, "cpu",
1155 "show CPU state before bloc translation" },
f193c797
FB
1156#ifdef TARGET_I386
1157 { CPU_LOG_PCALL, "pcall",
1158 "show protected mode far calls/returns/exceptions" },
1159#endif
8e3a9fd2 1160#ifdef DEBUG_IOPORT
fd872598
FB
1161 { CPU_LOG_IOPORT, "ioport",
1162 "show all i/o ports accesses" },
8e3a9fd2 1163#endif
f193c797
FB
1164 { 0, NULL, NULL },
1165};
1166
1167static int cmp1(const char *s1, int n, const char *s2)
1168{
1169 if (strlen(s2) != n)
1170 return 0;
1171 return memcmp(s1, s2, n) == 0;
1172}
1173
1174/* takes a comma separated list of log masks. Return 0 if error. */
1175int cpu_str_to_log_mask(const char *str)
1176{
1177 CPULogItem *item;
1178 int mask;
1179 const char *p, *p1;
1180
1181 p = str;
1182 mask = 0;
1183 for(;;) {
1184 p1 = strchr(p, ',');
1185 if (!p1)
1186 p1 = p + strlen(p);
8e3a9fd2
FB
1187 if(cmp1(p,p1-p,"all")) {
1188 for(item = cpu_log_items; item->mask != 0; item++) {
1189 mask |= item->mask;
1190 }
1191 } else {
f193c797
FB
1192 for(item = cpu_log_items; item->mask != 0; item++) {
1193 if (cmp1(p, p1 - p, item->name))
1194 goto found;
1195 }
1196 return 0;
8e3a9fd2 1197 }
f193c797
FB
1198 found:
1199 mask |= item->mask;
1200 if (*p1 != ',')
1201 break;
1202 p = p1 + 1;
1203 }
1204 return mask;
1205}
ea041c0e 1206
7501267e
FB
1207void cpu_abort(CPUState *env, const char *fmt, ...)
1208{
1209 va_list ap;
1210
1211 va_start(ap, fmt);
1212 fprintf(stderr, "qemu: fatal: ");
1213 vfprintf(stderr, fmt, ap);
1214 fprintf(stderr, "\n");
1215#ifdef TARGET_I386
7fe48483
FB
1216 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1217#else
1218 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1219#endif
1220 va_end(ap);
1221 abort();
1222}
1223
0124311e
FB
1224#if !defined(CONFIG_USER_ONLY)
1225
ee8b7021
FB
1226/* NOTE: if flush_global is true, also flush global entries (not
1227 implemented yet) */
1228void tlb_flush(CPUState *env, int flush_global)
33417e70 1229{
33417e70 1230 int i;
0124311e 1231
9fa3e853
FB
1232#if defined(DEBUG_TLB)
1233 printf("tlb_flush:\n");
1234#endif
0124311e
FB
1235 /* must reset current TB so that interrupts cannot modify the
1236 links while we are modifying them */
1237 env->current_tb = NULL;
1238
33417e70 1239 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1240 env->tlb_table[0][i].addr_read = -1;
1241 env->tlb_table[0][i].addr_write = -1;
1242 env->tlb_table[0][i].addr_code = -1;
1243 env->tlb_table[1][i].addr_read = -1;
1244 env->tlb_table[1][i].addr_write = -1;
1245 env->tlb_table[1][i].addr_code = -1;
33417e70 1246 }
9fa3e853 1247
8a40a180 1248 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1249
1250#if !defined(CONFIG_SOFTMMU)
1251 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1252#endif
1253#ifdef USE_KQEMU
1254 if (env->kqemu_enabled) {
1255 kqemu_flush(env, flush_global);
1256 }
9fa3e853 1257#endif
e3db7226 1258 tlb_flush_count++;
33417e70
FB
1259}
1260
274da6b2 1261static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1262{
84b7b8e7
FB
1263 if (addr == (tlb_entry->addr_read &
1264 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1265 addr == (tlb_entry->addr_write &
1266 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1267 addr == (tlb_entry->addr_code &
1268 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1269 tlb_entry->addr_read = -1;
1270 tlb_entry->addr_write = -1;
1271 tlb_entry->addr_code = -1;
1272 }
61382a50
FB
1273}
1274
2e12669a 1275void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1276{
8a40a180 1277 int i;
9fa3e853 1278 TranslationBlock *tb;
0124311e 1279
9fa3e853 1280#if defined(DEBUG_TLB)
108c49b8 1281 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1282#endif
0124311e
FB
1283 /* must reset current TB so that interrupts cannot modify the
1284 links while we are modifying them */
1285 env->current_tb = NULL;
61382a50
FB
1286
1287 addr &= TARGET_PAGE_MASK;
1288 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1289 tlb_flush_entry(&env->tlb_table[0][i], addr);
1290 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1291
b362e5e0
PB
1292 /* Discard jump cache entries for any tb which might potentially
1293 overlap the flushed page. */
1294 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1295 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1296
1297 i = tb_jmp_cache_hash_page(addr);
1298 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1299
0124311e 1300#if !defined(CONFIG_SOFTMMU)
9fa3e853 1301 if (addr < MMAP_AREA_END)
0124311e 1302 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1303#endif
0a962c02
FB
1304#ifdef USE_KQEMU
1305 if (env->kqemu_enabled) {
1306 kqemu_flush_page(env, addr);
1307 }
1308#endif
9fa3e853
FB
1309}
1310
9fa3e853
FB
1311/* update the TLBs so that writes to code in the virtual page 'addr'
1312 can be detected */
6a00d601 1313static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1314{
6a00d601
FB
1315 cpu_physical_memory_reset_dirty(ram_addr,
1316 ram_addr + TARGET_PAGE_SIZE,
1317 CODE_DIRTY_FLAG);
9fa3e853
FB
1318}
1319
9fa3e853 1320/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1321 tested for self modifying code */
1322static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1323 target_ulong vaddr)
9fa3e853 1324{
3a7d929e 1325 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1326}
1327
1328static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1329 unsigned long start, unsigned long length)
1330{
1331 unsigned long addr;
84b7b8e7
FB
1332 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1333 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1334 if ((addr - start) < length) {
84b7b8e7 1335 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1336 }
1337 }
1338}
1339
3a7d929e 1340void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1341 int dirty_flags)
1ccde1cb
FB
1342{
1343 CPUState *env;
4f2ac237 1344 unsigned long length, start1;
0a962c02
FB
1345 int i, mask, len;
1346 uint8_t *p;
1ccde1cb
FB
1347
1348 start &= TARGET_PAGE_MASK;
1349 end = TARGET_PAGE_ALIGN(end);
1350
1351 length = end - start;
1352 if (length == 0)
1353 return;
0a962c02 1354 len = length >> TARGET_PAGE_BITS;
3a7d929e 1355#ifdef USE_KQEMU
6a00d601
FB
1356 /* XXX: should not depend on cpu context */
1357 env = first_cpu;
3a7d929e 1358 if (env->kqemu_enabled) {
f23db169
FB
1359 ram_addr_t addr;
1360 addr = start;
1361 for(i = 0; i < len; i++) {
1362 kqemu_set_notdirty(env, addr);
1363 addr += TARGET_PAGE_SIZE;
1364 }
3a7d929e
FB
1365 }
1366#endif
f23db169
FB
1367 mask = ~dirty_flags;
1368 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1369 for(i = 0; i < len; i++)
1370 p[i] &= mask;
1371
1ccde1cb
FB
1372 /* we modify the TLB cache so that the dirty bit will be set again
1373 when accessing the range */
59817ccb 1374 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1375 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1376 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1377 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1378 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1379 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1380 }
59817ccb
FB
1381
1382#if !defined(CONFIG_SOFTMMU)
1383 /* XXX: this is expensive */
1384 {
1385 VirtPageDesc *p;
1386 int j;
1387 target_ulong addr;
1388
1389 for(i = 0; i < L1_SIZE; i++) {
1390 p = l1_virt_map[i];
1391 if (p) {
1392 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1393 for(j = 0; j < L2_SIZE; j++) {
1394 if (p->valid_tag == virt_valid_tag &&
1395 p->phys_addr >= start && p->phys_addr < end &&
1396 (p->prot & PROT_WRITE)) {
1397 if (addr < MMAP_AREA_END) {
1398 mprotect((void *)addr, TARGET_PAGE_SIZE,
1399 p->prot & ~PROT_WRITE);
1400 }
1401 }
1402 addr += TARGET_PAGE_SIZE;
1403 p++;
1404 }
1405 }
1406 }
1407 }
1408#endif
1ccde1cb
FB
1409}
1410
3a7d929e
FB
1411static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1412{
1413 ram_addr_t ram_addr;
1414
84b7b8e7
FB
1415 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1416 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1417 tlb_entry->addend - (unsigned long)phys_ram_base;
1418 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1419 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1420 }
1421 }
1422}
1423
1424/* update the TLB according to the current state of the dirty bits */
1425void cpu_tlb_update_dirty(CPUState *env)
1426{
1427 int i;
1428 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1429 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1430 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1431 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1432}
1433
1ccde1cb 1434static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1435 unsigned long start)
1ccde1cb
FB
1436{
1437 unsigned long addr;
84b7b8e7
FB
1438 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1439 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1440 if (addr == start) {
84b7b8e7 1441 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1442 }
1443 }
1444}
1445
1446/* update the TLB corresponding to virtual page vaddr and phys addr
1447 addr so that it is no longer dirty */
6a00d601
FB
1448static inline void tlb_set_dirty(CPUState *env,
1449 unsigned long addr, target_ulong vaddr)
1ccde1cb 1450{
1ccde1cb
FB
1451 int i;
1452
1ccde1cb
FB
1453 addr &= TARGET_PAGE_MASK;
1454 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1455 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1456 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1457}
1458
59817ccb
FB
1459/* add a new TLB entry. At most one entry for a given virtual address
1460 is permitted. Return 0 if OK or 2 if the page could not be mapped
1461 (can only happen in non SOFTMMU mode for I/O pages or pages
1462 conflicting with the host address space). */
84b7b8e7
FB
1463int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1464 target_phys_addr_t paddr, int prot,
1465 int is_user, int is_softmmu)
9fa3e853 1466{
92e873b9 1467 PhysPageDesc *p;
4f2ac237 1468 unsigned long pd;
9fa3e853 1469 unsigned int index;
4f2ac237 1470 target_ulong address;
108c49b8 1471 target_phys_addr_t addend;
9fa3e853 1472 int ret;
84b7b8e7 1473 CPUTLBEntry *te;
9fa3e853 1474
92e873b9 1475 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1476 if (!p) {
1477 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1478 } else {
1479 pd = p->phys_offset;
9fa3e853
FB
1480 }
1481#if defined(DEBUG_TLB)
3a7d929e 1482 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1483 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1484#endif
1485
1486 ret = 0;
1487#if !defined(CONFIG_SOFTMMU)
1488 if (is_softmmu)
1489#endif
1490 {
2a4188a3 1491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1492 /* IO memory case */
1493 address = vaddr | pd;
1494 addend = paddr;
1495 } else {
1496 /* standard memory */
1497 address = vaddr;
1498 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1499 }
1500
90f18422 1501 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1502 addend -= vaddr;
84b7b8e7
FB
1503 te = &env->tlb_table[is_user][index];
1504 te->addend = addend;
67b915a5 1505 if (prot & PAGE_READ) {
84b7b8e7
FB
1506 te->addr_read = address;
1507 } else {
1508 te->addr_read = -1;
1509 }
1510 if (prot & PAGE_EXEC) {
1511 te->addr_code = address;
9fa3e853 1512 } else {
84b7b8e7 1513 te->addr_code = -1;
9fa3e853 1514 }
67b915a5 1515 if (prot & PAGE_WRITE) {
856074ec
FB
1516 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1517 (pd & IO_MEM_ROMD)) {
1518 /* write access calls the I/O callback */
1519 te->addr_write = vaddr |
1520 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
3a7d929e 1521 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1522 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1523 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1524 } else {
84b7b8e7 1525 te->addr_write = address;
9fa3e853
FB
1526 }
1527 } else {
84b7b8e7 1528 te->addr_write = -1;
9fa3e853
FB
1529 }
1530 }
1531#if !defined(CONFIG_SOFTMMU)
1532 else {
1533 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1534 /* IO access: no mapping is done as it will be handled by the
1535 soft MMU */
1536 if (!(env->hflags & HF_SOFTMMU_MASK))
1537 ret = 2;
1538 } else {
1539 void *map_addr;
59817ccb
FB
1540
1541 if (vaddr >= MMAP_AREA_END) {
1542 ret = 2;
1543 } else {
1544 if (prot & PROT_WRITE) {
1545 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1546#if defined(TARGET_HAS_SMC) || 1
59817ccb 1547 first_tb ||
d720b93d 1548#endif
59817ccb
FB
1549 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1550 !cpu_physical_memory_is_dirty(pd))) {
1551 /* ROM: we do as if code was inside */
1552 /* if code is present, we only map as read only and save the
1553 original mapping */
1554 VirtPageDesc *vp;
1555
90f18422 1556 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1557 vp->phys_addr = pd;
1558 vp->prot = prot;
1559 vp->valid_tag = virt_valid_tag;
1560 prot &= ~PAGE_WRITE;
1561 }
1562 }
1563 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1564 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1565 if (map_addr == MAP_FAILED) {
1566 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1567 paddr, vaddr);
9fa3e853 1568 }
9fa3e853
FB
1569 }
1570 }
1571 }
1572#endif
1573 return ret;
1574}
1575
1576/* called from signal handler: invalidate the code and unprotect the
1577 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1578int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1579{
1580#if !defined(CONFIG_SOFTMMU)
1581 VirtPageDesc *vp;
1582
1583#if defined(DEBUG_TLB)
1584 printf("page_unprotect: addr=0x%08x\n", addr);
1585#endif
1586 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1587
1588 /* if it is not mapped, no need to worry here */
1589 if (addr >= MMAP_AREA_END)
1590 return 0;
9fa3e853
FB
1591 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1592 if (!vp)
1593 return 0;
1594 /* NOTE: in this case, validate_tag is _not_ tested as it
1595 validates only the code TLB */
1596 if (vp->valid_tag != virt_valid_tag)
1597 return 0;
1598 if (!(vp->prot & PAGE_WRITE))
1599 return 0;
1600#if defined(DEBUG_TLB)
1601 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1602 addr, vp->phys_addr, vp->prot);
1603#endif
59817ccb
FB
1604 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1605 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1606 (unsigned long)addr, vp->prot);
d720b93d 1607 /* set the dirty bit */
0a962c02 1608 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1609 /* flush the code inside */
1610 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1611 return 1;
1612#else
1613 return 0;
1614#endif
33417e70
FB
1615}
1616
0124311e
FB
1617#else
1618
ee8b7021 1619void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1620{
1621}
1622
2e12669a 1623void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1624{
1625}
1626
84b7b8e7
FB
1627int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1628 target_phys_addr_t paddr, int prot,
1629 int is_user, int is_softmmu)
9fa3e853
FB
1630{
1631 return 0;
1632}
0124311e 1633
9fa3e853
FB
1634/* dump memory mappings */
1635void page_dump(FILE *f)
33417e70 1636{
9fa3e853
FB
1637 unsigned long start, end;
1638 int i, j, prot, prot1;
1639 PageDesc *p;
33417e70 1640
9fa3e853
FB
1641 fprintf(f, "%-8s %-8s %-8s %s\n",
1642 "start", "end", "size", "prot");
1643 start = -1;
1644 end = -1;
1645 prot = 0;
1646 for(i = 0; i <= L1_SIZE; i++) {
1647 if (i < L1_SIZE)
1648 p = l1_map[i];
1649 else
1650 p = NULL;
1651 for(j = 0;j < L2_SIZE; j++) {
1652 if (!p)
1653 prot1 = 0;
1654 else
1655 prot1 = p[j].flags;
1656 if (prot1 != prot) {
1657 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1658 if (start != -1) {
1659 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1660 start, end, end - start,
1661 prot & PAGE_READ ? 'r' : '-',
1662 prot & PAGE_WRITE ? 'w' : '-',
1663 prot & PAGE_EXEC ? 'x' : '-');
1664 }
1665 if (prot1 != 0)
1666 start = end;
1667 else
1668 start = -1;
1669 prot = prot1;
1670 }
1671 if (!p)
1672 break;
1673 }
33417e70 1674 }
33417e70
FB
1675}
1676
53a5960a 1677int page_get_flags(target_ulong address)
33417e70 1678{
9fa3e853
FB
1679 PageDesc *p;
1680
1681 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1682 if (!p)
9fa3e853
FB
1683 return 0;
1684 return p->flags;
1685}
1686
1687/* modify the flags of a page and invalidate the code if
1688 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1689 depending on PAGE_WRITE */
53a5960a 1690void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1691{
1692 PageDesc *p;
53a5960a 1693 target_ulong addr;
9fa3e853
FB
1694
1695 start = start & TARGET_PAGE_MASK;
1696 end = TARGET_PAGE_ALIGN(end);
1697 if (flags & PAGE_WRITE)
1698 flags |= PAGE_WRITE_ORG;
1699 spin_lock(&tb_lock);
1700 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1701 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1702 /* if the write protection is set, then we invalidate the code
1703 inside */
1704 if (!(p->flags & PAGE_WRITE) &&
1705 (flags & PAGE_WRITE) &&
1706 p->first_tb) {
d720b93d 1707 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1708 }
1709 p->flags = flags;
1710 }
1711 spin_unlock(&tb_lock);
33417e70
FB
1712}
1713
9fa3e853
FB
1714/* called from signal handler: invalidate the code and unprotect the
1715 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1716int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1717{
1718 unsigned int page_index, prot, pindex;
1719 PageDesc *p, *p1;
53a5960a 1720 target_ulong host_start, host_end, addr;
9fa3e853 1721
83fb7adf 1722 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1723 page_index = host_start >> TARGET_PAGE_BITS;
1724 p1 = page_find(page_index);
1725 if (!p1)
1726 return 0;
83fb7adf 1727 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1728 p = p1;
1729 prot = 0;
1730 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1731 prot |= p->flags;
1732 p++;
1733 }
1734 /* if the page was really writable, then we change its
1735 protection back to writable */
1736 if (prot & PAGE_WRITE_ORG) {
1737 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1738 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1739 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1740 (prot & PAGE_BITS) | PAGE_WRITE);
1741 p1[pindex].flags |= PAGE_WRITE;
1742 /* and since the content will be modified, we must invalidate
1743 the corresponding translated code. */
d720b93d 1744 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1745#ifdef DEBUG_TB_CHECK
1746 tb_invalidate_check(address);
1747#endif
1748 return 1;
1749 }
1750 }
1751 return 0;
1752}
1753
1754/* call this function when system calls directly modify a memory area */
53a5960a
PB
1755/* ??? This should be redundant now we have lock_user. */
1756void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1757{
53a5960a 1758 target_ulong start, end, addr;
9fa3e853 1759
53a5960a 1760 start = data;
9fa3e853
FB
1761 end = start + data_size;
1762 start &= TARGET_PAGE_MASK;
1763 end = TARGET_PAGE_ALIGN(end);
1764 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1765 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1766 }
1767}
1768
6a00d601
FB
1769static inline void tlb_set_dirty(CPUState *env,
1770 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1771{
1772}
9fa3e853
FB
1773#endif /* defined(CONFIG_USER_ONLY) */
1774
33417e70
FB
1775/* register physical memory. 'size' must be a multiple of the target
1776 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1777 io memory page */
2e12669a
FB
1778void cpu_register_physical_memory(target_phys_addr_t start_addr,
1779 unsigned long size,
1780 unsigned long phys_offset)
33417e70 1781{
108c49b8 1782 target_phys_addr_t addr, end_addr;
92e873b9 1783 PhysPageDesc *p;
9d42037b 1784 CPUState *env;
33417e70 1785
5fd386f6 1786 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1787 end_addr = start_addr + size;
5fd386f6 1788 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1789 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1790 p->phys_offset = phys_offset;
2a4188a3
FB
1791 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1792 (phys_offset & IO_MEM_ROMD))
33417e70
FB
1793 phys_offset += TARGET_PAGE_SIZE;
1794 }
9d42037b
FB
1795
1796 /* since each CPU stores ram addresses in its TLB cache, we must
1797 reset the modified entries */
1798 /* XXX: slow ! */
1799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1800 tlb_flush(env, 1);
1801 }
33417e70
FB
1802}
1803
ba863458
FB
1804/* XXX: temporary until new memory mapping API */
1805uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1806{
1807 PhysPageDesc *p;
1808
1809 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1810 if (!p)
1811 return IO_MEM_UNASSIGNED;
1812 return p->phys_offset;
1813}
1814
a4193c8a 1815static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 1816{
67d3b957
PB
1817#ifdef DEBUG_UNASSIGNED
1818 printf("Unassigned mem read 0x%08x\n", (int)addr);
1819#endif
33417e70
FB
1820 return 0;
1821}
1822
a4193c8a 1823static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 1824{
67d3b957
PB
1825#ifdef DEBUG_UNASSIGNED
1826 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1827#endif
33417e70
FB
1828}
1829
1830static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1831 unassigned_mem_readb,
1832 unassigned_mem_readb,
1833 unassigned_mem_readb,
1834};
1835
1836static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1837 unassigned_mem_writeb,
1838 unassigned_mem_writeb,
1839 unassigned_mem_writeb,
1840};
1841
3a7d929e 1842static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1843{
3a7d929e
FB
1844 unsigned long ram_addr;
1845 int dirty_flags;
1846 ram_addr = addr - (unsigned long)phys_ram_base;
1847 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1848 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1849#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1850 tb_invalidate_phys_page_fast(ram_addr, 1);
1851 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1852#endif
3a7d929e 1853 }
c27004ec 1854 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1855#ifdef USE_KQEMU
1856 if (cpu_single_env->kqemu_enabled &&
1857 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1858 kqemu_modify_page(cpu_single_env, ram_addr);
1859#endif
f23db169
FB
1860 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1861 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1862 /* we remove the notdirty callback only if the code has been
1863 flushed */
1864 if (dirty_flags == 0xff)
6a00d601 1865 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1866}
1867
3a7d929e 1868static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1869{
3a7d929e
FB
1870 unsigned long ram_addr;
1871 int dirty_flags;
1872 ram_addr = addr - (unsigned long)phys_ram_base;
1873 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1874 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1875#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1876 tb_invalidate_phys_page_fast(ram_addr, 2);
1877 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1878#endif
3a7d929e 1879 }
c27004ec 1880 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
1881#ifdef USE_KQEMU
1882 if (cpu_single_env->kqemu_enabled &&
1883 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1884 kqemu_modify_page(cpu_single_env, ram_addr);
1885#endif
f23db169
FB
1886 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1887 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1888 /* we remove the notdirty callback only if the code has been
1889 flushed */
1890 if (dirty_flags == 0xff)
6a00d601 1891 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1892}
1893
3a7d929e 1894static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1895{
3a7d929e
FB
1896 unsigned long ram_addr;
1897 int dirty_flags;
1898 ram_addr = addr - (unsigned long)phys_ram_base;
1899 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1900 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1901#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1902 tb_invalidate_phys_page_fast(ram_addr, 4);
1903 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1904#endif
3a7d929e 1905 }
c27004ec 1906 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
1907#ifdef USE_KQEMU
1908 if (cpu_single_env->kqemu_enabled &&
1909 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1910 kqemu_modify_page(cpu_single_env, ram_addr);
1911#endif
f23db169
FB
1912 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1913 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1914 /* we remove the notdirty callback only if the code has been
1915 flushed */
1916 if (dirty_flags == 0xff)
6a00d601 1917 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1918}
1919
3a7d929e 1920static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1921 NULL, /* never used */
1922 NULL, /* never used */
1923 NULL, /* never used */
1924};
1925
1ccde1cb
FB
1926static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1927 notdirty_mem_writeb,
1928 notdirty_mem_writew,
1929 notdirty_mem_writel,
1930};
1931
33417e70
FB
1932static void io_mem_init(void)
1933{
3a7d929e 1934 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1935 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1936 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1937 io_mem_nb = 5;
1938
1939 /* alloc dirty bits array */
0a962c02 1940 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1941 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1942}
1943
1944/* mem_read and mem_write are arrays of functions containing the
1945 function to access byte (index 0), word (index 1) and dword (index
1946 2). All functions must be supplied. If io_index is non zero, the
1947 corresponding io zone is modified. If it is zero, a new io zone is
1948 allocated. The return value can be used with
1949 cpu_register_physical_memory(). (-1) is returned if error. */
1950int cpu_register_io_memory(int io_index,
1951 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1952 CPUWriteMemoryFunc **mem_write,
1953 void *opaque)
33417e70
FB
1954{
1955 int i;
1956
1957 if (io_index <= 0) {
b5ff1b31 1958 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1959 return -1;
1960 io_index = io_mem_nb++;
1961 } else {
1962 if (io_index >= IO_MEM_NB_ENTRIES)
1963 return -1;
1964 }
b5ff1b31 1965
33417e70
FB
1966 for(i = 0;i < 3; i++) {
1967 io_mem_read[io_index][i] = mem_read[i];
1968 io_mem_write[io_index][i] = mem_write[i];
1969 }
a4193c8a 1970 io_mem_opaque[io_index] = opaque;
33417e70
FB
1971 return io_index << IO_MEM_SHIFT;
1972}
61382a50 1973
8926b517
FB
1974CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1975{
1976 return io_mem_write[io_index >> IO_MEM_SHIFT];
1977}
1978
1979CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1980{
1981 return io_mem_read[io_index >> IO_MEM_SHIFT];
1982}
1983
13eb76e0
FB
1984/* physical memory access (slow version, mainly for debug) */
1985#if defined(CONFIG_USER_ONLY)
2e12669a 1986void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1987 int len, int is_write)
1988{
1989 int l, flags;
1990 target_ulong page;
53a5960a 1991 void * p;
13eb76e0
FB
1992
1993 while (len > 0) {
1994 page = addr & TARGET_PAGE_MASK;
1995 l = (page + TARGET_PAGE_SIZE) - addr;
1996 if (l > len)
1997 l = len;
1998 flags = page_get_flags(page);
1999 if (!(flags & PAGE_VALID))
2000 return;
2001 if (is_write) {
2002 if (!(flags & PAGE_WRITE))
2003 return;
53a5960a
PB
2004 p = lock_user(addr, len, 0);
2005 memcpy(p, buf, len);
2006 unlock_user(p, addr, len);
13eb76e0
FB
2007 } else {
2008 if (!(flags & PAGE_READ))
2009 return;
53a5960a
PB
2010 p = lock_user(addr, len, 1);
2011 memcpy(buf, p, len);
2012 unlock_user(p, addr, 0);
13eb76e0
FB
2013 }
2014 len -= l;
2015 buf += l;
2016 addr += l;
2017 }
2018}
8df1cd07 2019
13eb76e0 2020#else
2e12669a 2021void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2022 int len, int is_write)
2023{
2024 int l, io_index;
2025 uint8_t *ptr;
2026 uint32_t val;
2e12669a
FB
2027 target_phys_addr_t page;
2028 unsigned long pd;
92e873b9 2029 PhysPageDesc *p;
13eb76e0
FB
2030
2031 while (len > 0) {
2032 page = addr & TARGET_PAGE_MASK;
2033 l = (page + TARGET_PAGE_SIZE) - addr;
2034 if (l > len)
2035 l = len;
92e873b9 2036 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2037 if (!p) {
2038 pd = IO_MEM_UNASSIGNED;
2039 } else {
2040 pd = p->phys_offset;
2041 }
2042
2043 if (is_write) {
3a7d929e 2044 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2045 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2046 /* XXX: could force cpu_single_env to NULL to avoid
2047 potential bugs */
13eb76e0 2048 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2049 /* 32 bit write access */
c27004ec 2050 val = ldl_p(buf);
a4193c8a 2051 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2052 l = 4;
2053 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2054 /* 16 bit write access */
c27004ec 2055 val = lduw_p(buf);
a4193c8a 2056 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2057 l = 2;
2058 } else {
1c213d19 2059 /* 8 bit write access */
c27004ec 2060 val = ldub_p(buf);
a4193c8a 2061 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2062 l = 1;
2063 }
2064 } else {
b448f2f3
FB
2065 unsigned long addr1;
2066 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2067 /* RAM case */
b448f2f3 2068 ptr = phys_ram_base + addr1;
13eb76e0 2069 memcpy(ptr, buf, l);
3a7d929e
FB
2070 if (!cpu_physical_memory_is_dirty(addr1)) {
2071 /* invalidate code */
2072 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2073 /* set dirty bit */
f23db169
FB
2074 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2075 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2076 }
13eb76e0
FB
2077 }
2078 } else {
2a4188a3
FB
2079 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2080 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2081 /* I/O case */
2082 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2083 if (l >= 4 && ((addr & 3) == 0)) {
2084 /* 32 bit read access */
a4193c8a 2085 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2086 stl_p(buf, val);
13eb76e0
FB
2087 l = 4;
2088 } else if (l >= 2 && ((addr & 1) == 0)) {
2089 /* 16 bit read access */
a4193c8a 2090 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2091 stw_p(buf, val);
13eb76e0
FB
2092 l = 2;
2093 } else {
1c213d19 2094 /* 8 bit read access */
a4193c8a 2095 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2096 stb_p(buf, val);
13eb76e0
FB
2097 l = 1;
2098 }
2099 } else {
2100 /* RAM case */
2101 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2102 (addr & ~TARGET_PAGE_MASK);
2103 memcpy(buf, ptr, l);
2104 }
2105 }
2106 len -= l;
2107 buf += l;
2108 addr += l;
2109 }
2110}
8df1cd07 2111
d0ecd2aa
FB
2112/* used for ROM loading : can write in RAM and ROM */
2113void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2114 const uint8_t *buf, int len)
2115{
2116 int l;
2117 uint8_t *ptr;
2118 target_phys_addr_t page;
2119 unsigned long pd;
2120 PhysPageDesc *p;
2121
2122 while (len > 0) {
2123 page = addr & TARGET_PAGE_MASK;
2124 l = (page + TARGET_PAGE_SIZE) - addr;
2125 if (l > len)
2126 l = len;
2127 p = phys_page_find(page >> TARGET_PAGE_BITS);
2128 if (!p) {
2129 pd = IO_MEM_UNASSIGNED;
2130 } else {
2131 pd = p->phys_offset;
2132 }
2133
2134 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2135 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2136 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2137 /* do nothing */
2138 } else {
2139 unsigned long addr1;
2140 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2141 /* ROM/RAM case */
2142 ptr = phys_ram_base + addr1;
2143 memcpy(ptr, buf, l);
2144 }
2145 len -= l;
2146 buf += l;
2147 addr += l;
2148 }
2149}
2150
2151
8df1cd07
FB
2152/* warning: addr must be aligned */
2153uint32_t ldl_phys(target_phys_addr_t addr)
2154{
2155 int io_index;
2156 uint8_t *ptr;
2157 uint32_t val;
2158 unsigned long pd;
2159 PhysPageDesc *p;
2160
2161 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2162 if (!p) {
2163 pd = IO_MEM_UNASSIGNED;
2164 } else {
2165 pd = p->phys_offset;
2166 }
2167
2a4188a3
FB
2168 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2169 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2170 /* I/O case */
2171 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2172 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2173 } else {
2174 /* RAM case */
2175 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2176 (addr & ~TARGET_PAGE_MASK);
2177 val = ldl_p(ptr);
2178 }
2179 return val;
2180}
2181
84b7b8e7
FB
2182/* warning: addr must be aligned */
2183uint64_t ldq_phys(target_phys_addr_t addr)
2184{
2185 int io_index;
2186 uint8_t *ptr;
2187 uint64_t val;
2188 unsigned long pd;
2189 PhysPageDesc *p;
2190
2191 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2192 if (!p) {
2193 pd = IO_MEM_UNASSIGNED;
2194 } else {
2195 pd = p->phys_offset;
2196 }
2197
2a4188a3
FB
2198 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2199 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2200 /* I/O case */
2201 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2202#ifdef TARGET_WORDS_BIGENDIAN
2203 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2204 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2205#else
2206 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2207 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2208#endif
2209 } else {
2210 /* RAM case */
2211 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2212 (addr & ~TARGET_PAGE_MASK);
2213 val = ldq_p(ptr);
2214 }
2215 return val;
2216}
2217
aab33094
FB
2218/* XXX: optimize */
2219uint32_t ldub_phys(target_phys_addr_t addr)
2220{
2221 uint8_t val;
2222 cpu_physical_memory_read(addr, &val, 1);
2223 return val;
2224}
2225
2226/* XXX: optimize */
2227uint32_t lduw_phys(target_phys_addr_t addr)
2228{
2229 uint16_t val;
2230 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2231 return tswap16(val);
2232}
2233
8df1cd07
FB
2234/* warning: addr must be aligned. The ram page is not masked as dirty
2235 and the code inside is not invalidated. It is useful if the dirty
2236 bits are used to track modified PTEs */
2237void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2238{
2239 int io_index;
2240 uint8_t *ptr;
2241 unsigned long pd;
2242 PhysPageDesc *p;
2243
2244 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2245 if (!p) {
2246 pd = IO_MEM_UNASSIGNED;
2247 } else {
2248 pd = p->phys_offset;
2249 }
2250
3a7d929e 2251 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2252 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2253 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2254 } else {
2255 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2256 (addr & ~TARGET_PAGE_MASK);
2257 stl_p(ptr, val);
2258 }
2259}
2260
2261/* warning: addr must be aligned */
8df1cd07
FB
2262void stl_phys(target_phys_addr_t addr, uint32_t val)
2263{
2264 int io_index;
2265 uint8_t *ptr;
2266 unsigned long pd;
2267 PhysPageDesc *p;
2268
2269 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2270 if (!p) {
2271 pd = IO_MEM_UNASSIGNED;
2272 } else {
2273 pd = p->phys_offset;
2274 }
2275
3a7d929e 2276 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2277 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2278 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2279 } else {
2280 unsigned long addr1;
2281 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2282 /* RAM case */
2283 ptr = phys_ram_base + addr1;
2284 stl_p(ptr, val);
3a7d929e
FB
2285 if (!cpu_physical_memory_is_dirty(addr1)) {
2286 /* invalidate code */
2287 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2288 /* set dirty bit */
f23db169
FB
2289 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2290 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2291 }
8df1cd07
FB
2292 }
2293}
2294
aab33094
FB
2295/* XXX: optimize */
2296void stb_phys(target_phys_addr_t addr, uint32_t val)
2297{
2298 uint8_t v = val;
2299 cpu_physical_memory_write(addr, &v, 1);
2300}
2301
2302/* XXX: optimize */
2303void stw_phys(target_phys_addr_t addr, uint32_t val)
2304{
2305 uint16_t v = tswap16(val);
2306 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2307}
2308
2309/* XXX: optimize */
2310void stq_phys(target_phys_addr_t addr, uint64_t val)
2311{
2312 val = tswap64(val);
2313 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2314}
2315
13eb76e0
FB
2316#endif
2317
2318/* virtual memory access for debug */
b448f2f3
FB
2319int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2320 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2321{
2322 int l;
2323 target_ulong page, phys_addr;
2324
2325 while (len > 0) {
2326 page = addr & TARGET_PAGE_MASK;
2327 phys_addr = cpu_get_phys_page_debug(env, page);
2328 /* if no physical page mapped, return an error */
2329 if (phys_addr == -1)
2330 return -1;
2331 l = (page + TARGET_PAGE_SIZE) - addr;
2332 if (l > len)
2333 l = len;
b448f2f3
FB
2334 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2335 buf, l, is_write);
13eb76e0
FB
2336 len -= l;
2337 buf += l;
2338 addr += l;
2339 }
2340 return 0;
2341}
2342
e3db7226
FB
2343void dump_exec_info(FILE *f,
2344 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2345{
2346 int i, target_code_size, max_target_code_size;
2347 int direct_jmp_count, direct_jmp2_count, cross_page;
2348 TranslationBlock *tb;
2349
2350 target_code_size = 0;
2351 max_target_code_size = 0;
2352 cross_page = 0;
2353 direct_jmp_count = 0;
2354 direct_jmp2_count = 0;
2355 for(i = 0; i < nb_tbs; i++) {
2356 tb = &tbs[i];
2357 target_code_size += tb->size;
2358 if (tb->size > max_target_code_size)
2359 max_target_code_size = tb->size;
2360 if (tb->page_addr[1] != -1)
2361 cross_page++;
2362 if (tb->tb_next_offset[0] != 0xffff) {
2363 direct_jmp_count++;
2364 if (tb->tb_next_offset[1] != 0xffff) {
2365 direct_jmp2_count++;
2366 }
2367 }
2368 }
2369 /* XXX: avoid using doubles ? */
2370 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2371 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2372 nb_tbs ? target_code_size / nb_tbs : 0,
2373 max_target_code_size);
2374 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2375 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2376 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2377 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2378 cross_page,
2379 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2380 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2381 direct_jmp_count,
2382 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2383 direct_jmp2_count,
2384 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2385 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2386 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2387 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2388}
2389
61382a50
FB
2390#if !defined(CONFIG_USER_ONLY)
2391
2392#define MMUSUFFIX _cmmu
2393#define GETPC() NULL
2394#define env cpu_single_env
b769d8fe 2395#define SOFTMMU_CODE_ACCESS
61382a50
FB
2396
2397#define SHIFT 0
2398#include "softmmu_template.h"
2399
2400#define SHIFT 1
2401#include "softmmu_template.h"
2402
2403#define SHIFT 2
2404#include "softmmu_template.h"
2405
2406#define SHIFT 3
2407#include "softmmu_template.h"
2408
2409#undef env
2410
2411#endif