]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
64 bit virtual addressing fix
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
53
54TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 56TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 57int nb_tbs;
eb51d102
FB
58/* any access to the tbs or the page table must use this lock */
59spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 60
b8076a74 61uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
62uint8_t *code_gen_ptr;
63
9fa3e853
FB
64int phys_ram_size;
65int phys_ram_fd;
66uint8_t *phys_ram_base;
1ccde1cb 67uint8_t *phys_ram_dirty;
9fa3e853 68
54936004 69typedef struct PageDesc {
92e873b9 70 /* list of TBs intersecting this ram page */
fd6ce8f6 71 TranslationBlock *first_tb;
9fa3e853
FB
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76#if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78#endif
54936004
FB
79} PageDesc;
80
92e873b9
FB
81typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 83 uint32_t phys_offset;
92e873b9
FB
84} PhysPageDesc;
85
90f18422
FB
86/* Note: the VirtPage handling is absolete and will be suppressed
87 ASAP */
9fa3e853
FB
88typedef struct VirtPageDesc {
89 /* physical address of code page. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 target_ulong phys_addr;
92 unsigned int valid_tag;
93#if !defined(CONFIG_SOFTMMU)
94 /* original page access rights. It is valid only if 'valid_tag'
95 matches 'virt_valid_tag' */
96 unsigned int prot;
97#endif
98} VirtPageDesc;
99
54936004
FB
100#define L2_BITS 10
101#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
102
103#define L1_SIZE (1 << L1_BITS)
104#define L2_SIZE (1 << L2_BITS)
105
33417e70 106static void io_mem_init(void);
fd6ce8f6 107
83fb7adf
FB
108unsigned long qemu_real_host_page_size;
109unsigned long qemu_host_page_bits;
110unsigned long qemu_host_page_size;
111unsigned long qemu_host_page_mask;
54936004 112
92e873b9 113/* XXX: for system emulation, it could just be an array */
54936004 114static PageDesc *l1_map[L1_SIZE];
0a962c02 115PhysPageDesc **l1_phys_map;
54936004 116
9fa3e853 117#if !defined(CONFIG_USER_ONLY)
90f18422
FB
118#if TARGET_LONG_BITS > 32
119#define VIRT_L_BITS 9
120#define VIRT_L_SIZE (1 << VIRT_L_BITS)
121static void *l1_virt_map[VIRT_L_SIZE];
122#else
9fa3e853 123static VirtPageDesc *l1_virt_map[L1_SIZE];
90f18422 124#endif
9fa3e853
FB
125static unsigned int virt_valid_tag;
126#endif
127
33417e70 128/* io memory support */
33417e70
FB
129CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
130CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 131void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
132static int io_mem_nb;
133
34865134
FB
134/* log support */
135char *logfilename = "/tmp/qemu.log";
136FILE *logfile;
137int loglevel;
138
e3db7226
FB
139/* statistics */
140static int tlb_flush_count;
141static int tb_flush_count;
142static int tb_phys_invalidate_count;
143
b346ff46 144static void page_init(void)
54936004 145{
83fb7adf 146 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 147 TARGET_PAGE_SIZE */
67b915a5 148#ifdef _WIN32
d5a8f07c
FB
149 {
150 SYSTEM_INFO system_info;
151 DWORD old_protect;
152
153 GetSystemInfo(&system_info);
154 qemu_real_host_page_size = system_info.dwPageSize;
155
156 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
157 PAGE_EXECUTE_READWRITE, &old_protect);
158 }
67b915a5 159#else
83fb7adf 160 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
161 {
162 unsigned long start, end;
163
164 start = (unsigned long)code_gen_buffer;
165 start &= ~(qemu_real_host_page_size - 1);
166
167 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
168 end += qemu_real_host_page_size - 1;
169 end &= ~(qemu_real_host_page_size - 1);
170
171 mprotect((void *)start, end - start,
172 PROT_READ | PROT_WRITE | PROT_EXEC);
173 }
67b915a5 174#endif
d5a8f07c 175
83fb7adf
FB
176 if (qemu_host_page_size == 0)
177 qemu_host_page_size = qemu_real_host_page_size;
178 if (qemu_host_page_size < TARGET_PAGE_SIZE)
179 qemu_host_page_size = TARGET_PAGE_SIZE;
180 qemu_host_page_bits = 0;
181 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
182 qemu_host_page_bits++;
183 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
184#if !defined(CONFIG_USER_ONLY)
185 virt_valid_tag = 1;
186#endif
0a962c02
FB
187 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(PhysPageDesc *));
188 memset(l1_phys_map, 0, L1_SIZE * sizeof(PhysPageDesc *));
54936004
FB
189}
190
fd6ce8f6 191static inline PageDesc *page_find_alloc(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc **lp, *p;
194
54936004
FB
195 lp = &l1_map[index >> L2_BITS];
196 p = *lp;
197 if (!p) {
198 /* allocate if not found */
59817ccb 199 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 200 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
201 *lp = p;
202 }
203 return p + (index & (L2_SIZE - 1));
204}
205
fd6ce8f6 206static inline PageDesc *page_find(unsigned int index)
54936004 207{
54936004
FB
208 PageDesc *p;
209
54936004
FB
210 p = l1_map[index >> L2_BITS];
211 if (!p)
212 return 0;
fd6ce8f6
FB
213 return p + (index & (L2_SIZE - 1));
214}
215
92e873b9
FB
216static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
217{
218 PhysPageDesc **lp, *p;
219
220 lp = &l1_phys_map[index >> L2_BITS];
221 p = *lp;
222 if (!p) {
223 /* allocate if not found */
0a962c02 224 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
225 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
226 *lp = p;
227 }
228 return p + (index & (L2_SIZE - 1));
229}
230
231static inline PhysPageDesc *phys_page_find(unsigned int index)
232{
233 PhysPageDesc *p;
234
235 p = l1_phys_map[index >> L2_BITS];
236 if (!p)
237 return 0;
238 return p + (index & (L2_SIZE - 1));
239}
240
9fa3e853 241#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
242static void tlb_protect_code(CPUState *env, target_ulong addr);
243static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853 244
90f18422 245static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
fd6ce8f6 246{
c27004ec 247#if TARGET_LONG_BITS > 32
90f18422
FB
248 void **p, **lp;
249
250 p = l1_virt_map;
251 lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
252 p = *lp;
253 if (!p) {
254 if (!alloc)
255 return NULL;
256 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
257 *lp = p;
258 }
259 lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
260 p = *lp;
261 if (!p) {
262 if (!alloc)
263 return NULL;
264 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
265 *lp = p;
266 }
267 lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
268 p = *lp;
269 if (!p) {
270 if (!alloc)
271 return NULL;
272 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
273 *lp = p;
274 }
275 lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
276 p = *lp;
277 if (!p) {
278 if (!alloc)
279 return NULL;
280 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
281 *lp = p;
282 }
283 lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
284 p = *lp;
285 if (!p) {
286 if (!alloc)
287 return NULL;
288 p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
289 *lp = p;
290 }
291 return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
292#else
293 VirtPageDesc *p, **lp;
294
9fa3e853
FB
295 lp = &l1_virt_map[index >> L2_BITS];
296 p = *lp;
297 if (!p) {
298 /* allocate if not found */
90f18422
FB
299 if (!alloc)
300 return NULL;
301 p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
302 *lp = p;
303 }
304 return p + (index & (L2_SIZE - 1));
90f18422 305#endif
9fa3e853
FB
306}
307
90f18422 308static inline VirtPageDesc *virt_page_find(target_ulong index)
9fa3e853 309{
90f18422
FB
310 return virt_page_find_alloc(index, 0);
311}
9fa3e853 312
90f18422
FB
313#if TARGET_LONG_BITS > 32
314static void virt_page_flush_internal(void **p, int level)
315{
316 int i;
317 if (level == 0) {
318 VirtPageDesc *q = (VirtPageDesc *)p;
319 for(i = 0; i < VIRT_L_SIZE; i++)
320 q[i].valid_tag = 0;
321 } else {
322 level--;
323 for(i = 0; i < VIRT_L_SIZE; i++) {
324 if (p[i])
325 virt_page_flush_internal(p[i], level);
326 }
327 }
54936004 328}
90f18422 329#endif
54936004 330
9fa3e853 331static void virt_page_flush(void)
54936004 332{
9fa3e853
FB
333 virt_valid_tag++;
334
335 if (virt_valid_tag == 0) {
336 virt_valid_tag = 1;
90f18422
FB
337#if TARGET_LONG_BITS > 32
338 virt_page_flush_internal(l1_virt_map, 5);
339#else
340 {
341 int i, j;
342 VirtPageDesc *p;
343 for(i = 0; i < L1_SIZE; i++) {
344 p = l1_virt_map[i];
345 if (p) {
346 for(j = 0; j < L2_SIZE; j++)
347 p[j].valid_tag = 0;
348 }
9fa3e853 349 }
fd6ce8f6 350 }
90f18422 351#endif
54936004
FB
352 }
353}
9fa3e853
FB
354#else
355static void virt_page_flush(void)
356{
357}
358#endif
fd6ce8f6 359
b346ff46 360void cpu_exec_init(void)
fd6ce8f6
FB
361{
362 if (!code_gen_ptr) {
363 code_gen_ptr = code_gen_buffer;
b346ff46 364 page_init();
33417e70 365 io_mem_init();
fd6ce8f6
FB
366 }
367}
368
9fa3e853
FB
369static inline void invalidate_page_bitmap(PageDesc *p)
370{
371 if (p->code_bitmap) {
59817ccb 372 qemu_free(p->code_bitmap);
9fa3e853
FB
373 p->code_bitmap = NULL;
374 }
375 p->code_write_count = 0;
376}
377
fd6ce8f6
FB
378/* set to NULL all the 'first_tb' fields in all PageDescs */
379static void page_flush_tb(void)
380{
381 int i, j;
382 PageDesc *p;
383
384 for(i = 0; i < L1_SIZE; i++) {
385 p = l1_map[i];
386 if (p) {
9fa3e853
FB
387 for(j = 0; j < L2_SIZE; j++) {
388 p->first_tb = NULL;
389 invalidate_page_bitmap(p);
390 p++;
391 }
fd6ce8f6
FB
392 }
393 }
394}
395
396/* flush all the translation blocks */
d4e8164f 397/* XXX: tb_flush is currently not thread safe */
0124311e 398void tb_flush(CPUState *env)
fd6ce8f6 399{
0124311e 400#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
401 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
402 code_gen_ptr - code_gen_buffer,
403 nb_tbs,
0124311e 404 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
405#endif
406 nb_tbs = 0;
8a8a608f 407 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
408 virt_page_flush();
409
8a8a608f 410 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 411 page_flush_tb();
9fa3e853 412
fd6ce8f6 413 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
414 /* XXX: flush processor icache at this point if cache flush is
415 expensive */
e3db7226 416 tb_flush_count++;
fd6ce8f6
FB
417}
418
419#ifdef DEBUG_TB_CHECK
420
421static void tb_invalidate_check(unsigned long address)
422{
423 TranslationBlock *tb;
424 int i;
425 address &= TARGET_PAGE_MASK;
426 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
427 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
428 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
429 address >= tb->pc + tb->size)) {
430 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
431 address, tb->pc, tb->size);
432 }
433 }
434 }
435}
436
437/* verify that all the pages have correct rights for code */
438static void tb_page_check(void)
439{
440 TranslationBlock *tb;
441 int i, flags1, flags2;
442
443 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
444 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
445 flags1 = page_get_flags(tb->pc);
446 flags2 = page_get_flags(tb->pc + tb->size - 1);
447 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
448 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
449 tb->pc, tb->size, flags1, flags2);
450 }
451 }
452 }
453}
454
d4e8164f
FB
455void tb_jmp_check(TranslationBlock *tb)
456{
457 TranslationBlock *tb1;
458 unsigned int n1;
459
460 /* suppress any remaining jumps to this TB */
461 tb1 = tb->jmp_first;
462 for(;;) {
463 n1 = (long)tb1 & 3;
464 tb1 = (TranslationBlock *)((long)tb1 & ~3);
465 if (n1 == 2)
466 break;
467 tb1 = tb1->jmp_next[n1];
468 }
469 /* check end of list */
470 if (tb1 != tb) {
471 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
472 }
473}
474
fd6ce8f6
FB
475#endif
476
477/* invalidate one TB */
478static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
479 int next_offset)
480{
481 TranslationBlock *tb1;
482 for(;;) {
483 tb1 = *ptb;
484 if (tb1 == tb) {
485 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
486 break;
487 }
488 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
489 }
490}
491
9fa3e853
FB
492static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
493{
494 TranslationBlock *tb1;
495 unsigned int n1;
496
497 for(;;) {
498 tb1 = *ptb;
499 n1 = (long)tb1 & 3;
500 tb1 = (TranslationBlock *)((long)tb1 & ~3);
501 if (tb1 == tb) {
502 *ptb = tb1->page_next[n1];
503 break;
504 }
505 ptb = &tb1->page_next[n1];
506 }
507}
508
d4e8164f
FB
509static inline void tb_jmp_remove(TranslationBlock *tb, int n)
510{
511 TranslationBlock *tb1, **ptb;
512 unsigned int n1;
513
514 ptb = &tb->jmp_next[n];
515 tb1 = *ptb;
516 if (tb1) {
517 /* find tb(n) in circular list */
518 for(;;) {
519 tb1 = *ptb;
520 n1 = (long)tb1 & 3;
521 tb1 = (TranslationBlock *)((long)tb1 & ~3);
522 if (n1 == n && tb1 == tb)
523 break;
524 if (n1 == 2) {
525 ptb = &tb1->jmp_first;
526 } else {
527 ptb = &tb1->jmp_next[n1];
528 }
529 }
530 /* now we can suppress tb(n) from the list */
531 *ptb = tb->jmp_next[n];
532
533 tb->jmp_next[n] = NULL;
534 }
535}
536
537/* reset the jump entry 'n' of a TB so that it is not chained to
538 another TB */
539static inline void tb_reset_jump(TranslationBlock *tb, int n)
540{
541 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
542}
543
9fa3e853 544static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 545{
d4e8164f 546 unsigned int h, n1;
9fa3e853 547 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 548
36bdbe54 549 tb_invalidated_flag = 1;
59817ccb 550
fd6ce8f6
FB
551 /* remove the TB from the hash list */
552 h = tb_hash_func(tb->pc);
9fa3e853
FB
553 ptb = &tb_hash[h];
554 for(;;) {
555 tb1 = *ptb;
556 /* NOTE: the TB is not necessarily linked in the hash. It
557 indicates that it is not currently used */
558 if (tb1 == NULL)
559 return;
560 if (tb1 == tb) {
561 *ptb = tb1->hash_next;
562 break;
563 }
564 ptb = &tb1->hash_next;
fd6ce8f6 565 }
d4e8164f
FB
566
567 /* suppress this TB from the two jump lists */
568 tb_jmp_remove(tb, 0);
569 tb_jmp_remove(tb, 1);
570
571 /* suppress any remaining jumps to this TB */
572 tb1 = tb->jmp_first;
573 for(;;) {
574 n1 = (long)tb1 & 3;
575 if (n1 == 2)
576 break;
577 tb1 = (TranslationBlock *)((long)tb1 & ~3);
578 tb2 = tb1->jmp_next[n1];
579 tb_reset_jump(tb1, n1);
580 tb1->jmp_next[n1] = NULL;
581 tb1 = tb2;
582 }
583 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
584}
585
9fa3e853 586static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 587{
fd6ce8f6 588 PageDesc *p;
9fa3e853
FB
589 unsigned int h;
590 target_ulong phys_pc;
591
592 /* remove the TB from the hash list */
593 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
594 h = tb_phys_hash_func(phys_pc);
595 tb_remove(&tb_phys_hash[h], tb,
596 offsetof(TranslationBlock, phys_hash_next));
597
598 /* remove the TB from the page list */
599 if (tb->page_addr[0] != page_addr) {
600 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
601 tb_page_remove(&p->first_tb, tb);
602 invalidate_page_bitmap(p);
603 }
604 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
605 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
606 tb_page_remove(&p->first_tb, tb);
607 invalidate_page_bitmap(p);
608 }
609
610 tb_invalidate(tb);
e3db7226 611 tb_phys_invalidate_count++;
9fa3e853
FB
612}
613
614static inline void set_bits(uint8_t *tab, int start, int len)
615{
616 int end, mask, end1;
617
618 end = start + len;
619 tab += start >> 3;
620 mask = 0xff << (start & 7);
621 if ((start & ~7) == (end & ~7)) {
622 if (start < end) {
623 mask &= ~(0xff << (end & 7));
624 *tab |= mask;
625 }
626 } else {
627 *tab++ |= mask;
628 start = (start + 8) & ~7;
629 end1 = end & ~7;
630 while (start < end1) {
631 *tab++ = 0xff;
632 start += 8;
633 }
634 if (start < end) {
635 mask = ~(0xff << (end & 7));
636 *tab |= mask;
637 }
638 }
639}
640
641static void build_page_bitmap(PageDesc *p)
642{
643 int n, tb_start, tb_end;
644 TranslationBlock *tb;
645
59817ccb 646 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
647 if (!p->code_bitmap)
648 return;
649 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
650
651 tb = p->first_tb;
652 while (tb != NULL) {
653 n = (long)tb & 3;
654 tb = (TranslationBlock *)((long)tb & ~3);
655 /* NOTE: this is subtle as a TB may span two physical pages */
656 if (n == 0) {
657 /* NOTE: tb_end may be after the end of the page, but
658 it is not a problem */
659 tb_start = tb->pc & ~TARGET_PAGE_MASK;
660 tb_end = tb_start + tb->size;
661 if (tb_end > TARGET_PAGE_SIZE)
662 tb_end = TARGET_PAGE_SIZE;
663 } else {
664 tb_start = 0;
665 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
666 }
667 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
668 tb = tb->page_next[n];
669 }
670}
671
d720b93d
FB
672#ifdef TARGET_HAS_PRECISE_SMC
673
674static void tb_gen_code(CPUState *env,
675 target_ulong pc, target_ulong cs_base, int flags,
676 int cflags)
677{
678 TranslationBlock *tb;
679 uint8_t *tc_ptr;
680 target_ulong phys_pc, phys_page2, virt_page2;
681 int code_gen_size;
682
c27004ec
FB
683 phys_pc = get_phys_addr_code(env, pc);
684 tb = tb_alloc(pc);
d720b93d
FB
685 if (!tb) {
686 /* flush must be done */
687 tb_flush(env);
688 /* cannot fail at this point */
c27004ec 689 tb = tb_alloc(pc);
d720b93d
FB
690 }
691 tc_ptr = code_gen_ptr;
692 tb->tc_ptr = tc_ptr;
693 tb->cs_base = cs_base;
694 tb->flags = flags;
695 tb->cflags = cflags;
696 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
697 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
698
699 /* check next page if needed */
c27004ec 700 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 701 phys_page2 = -1;
c27004ec 702 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
703 phys_page2 = get_phys_addr_code(env, virt_page2);
704 }
705 tb_link_phys(tb, phys_pc, phys_page2);
706}
707#endif
708
9fa3e853
FB
709/* invalidate all TBs which intersect with the target physical page
710 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
711 the same physical page. 'is_cpu_write_access' should be true if called
712 from a real cpu write access: the virtual CPU will exit the current
713 TB if code is modified inside this TB. */
714void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
715 int is_cpu_write_access)
716{
717 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 718 CPUState *env = cpu_single_env;
9fa3e853 719 PageDesc *p;
ea1c1802 720 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 721 target_ulong tb_start, tb_end;
d720b93d 722 target_ulong current_pc, current_cs_base;
9fa3e853
FB
723
724 p = page_find(start >> TARGET_PAGE_BITS);
725 if (!p)
726 return;
727 if (!p->code_bitmap &&
d720b93d
FB
728 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
729 is_cpu_write_access) {
9fa3e853
FB
730 /* build code bitmap */
731 build_page_bitmap(p);
732 }
733
734 /* we remove all the TBs in the range [start, end[ */
735 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
736 current_tb_not_found = is_cpu_write_access;
737 current_tb_modified = 0;
738 current_tb = NULL; /* avoid warning */
739 current_pc = 0; /* avoid warning */
740 current_cs_base = 0; /* avoid warning */
741 current_flags = 0; /* avoid warning */
9fa3e853
FB
742 tb = p->first_tb;
743 while (tb != NULL) {
744 n = (long)tb & 3;
745 tb = (TranslationBlock *)((long)tb & ~3);
746 tb_next = tb->page_next[n];
747 /* NOTE: this is subtle as a TB may span two physical pages */
748 if (n == 0) {
749 /* NOTE: tb_end may be after the end of the page, but
750 it is not a problem */
751 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752 tb_end = tb_start + tb->size;
753 } else {
754 tb_start = tb->page_addr[1];
755 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
756 }
757 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
758#ifdef TARGET_HAS_PRECISE_SMC
759 if (current_tb_not_found) {
760 current_tb_not_found = 0;
761 current_tb = NULL;
762 if (env->mem_write_pc) {
763 /* now we have a real cpu fault */
764 current_tb = tb_find_pc(env->mem_write_pc);
765 }
766 }
767 if (current_tb == tb &&
768 !(current_tb->cflags & CF_SINGLE_INSN)) {
769 /* If we are modifying the current TB, we must stop
770 its execution. We could be more precise by checking
771 that the modification is after the current PC, but it
772 would require a specialized function to partially
773 restore the CPU state */
774
775 current_tb_modified = 1;
776 cpu_restore_state(current_tb, env,
777 env->mem_write_pc, NULL);
778#if defined(TARGET_I386)
779 current_flags = env->hflags;
780 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
781 current_cs_base = (target_ulong)env->segs[R_CS].base;
782 current_pc = current_cs_base + env->eip;
783#else
784#error unsupported CPU
785#endif
786 }
787#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
788 saved_tb = env->current_tb;
789 env->current_tb = NULL;
9fa3e853 790 tb_phys_invalidate(tb, -1);
ea1c1802
FB
791 env->current_tb = saved_tb;
792 if (env->interrupt_request && env->current_tb)
793 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
794 }
795 tb = tb_next;
796 }
797#if !defined(CONFIG_USER_ONLY)
798 /* if no code remaining, no need to continue to use slow writes */
799 if (!p->first_tb) {
800 invalidate_page_bitmap(p);
d720b93d
FB
801 if (is_cpu_write_access) {
802 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
803 }
804 }
805#endif
806#ifdef TARGET_HAS_PRECISE_SMC
807 if (current_tb_modified) {
808 /* we generate a block containing just the instruction
809 modifying the memory. It will ensure that it cannot modify
810 itself */
ea1c1802 811 env->current_tb = NULL;
d720b93d
FB
812 tb_gen_code(env, current_pc, current_cs_base, current_flags,
813 CF_SINGLE_INSN);
814 cpu_resume_from_signal(env, NULL);
9fa3e853 815 }
fd6ce8f6 816#endif
9fa3e853 817}
fd6ce8f6 818
9fa3e853 819/* len must be <= 8 and start must be a multiple of len */
d720b93d 820static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
821{
822 PageDesc *p;
823 int offset, b;
59817ccb 824#if 0
a4193c8a
FB
825 if (1) {
826 if (loglevel) {
827 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
828 cpu_single_env->mem_write_vaddr, len,
829 cpu_single_env->eip,
830 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
831 }
59817ccb
FB
832 }
833#endif
9fa3e853
FB
834 p = page_find(start >> TARGET_PAGE_BITS);
835 if (!p)
836 return;
837 if (p->code_bitmap) {
838 offset = start & ~TARGET_PAGE_MASK;
839 b = p->code_bitmap[offset >> 3] >> (offset & 7);
840 if (b & ((1 << len) - 1))
841 goto do_invalidate;
842 } else {
843 do_invalidate:
d720b93d 844 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
845 }
846}
847
9fa3e853 848#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
849static void tb_invalidate_phys_page(target_ulong addr,
850 unsigned long pc, void *puc)
9fa3e853 851{
d720b93d
FB
852 int n, current_flags, current_tb_modified;
853 target_ulong current_pc, current_cs_base;
9fa3e853 854 PageDesc *p;
d720b93d
FB
855 TranslationBlock *tb, *current_tb;
856#ifdef TARGET_HAS_PRECISE_SMC
857 CPUState *env = cpu_single_env;
858#endif
9fa3e853
FB
859
860 addr &= TARGET_PAGE_MASK;
861 p = page_find(addr >> TARGET_PAGE_BITS);
862 if (!p)
863 return;
864 tb = p->first_tb;
d720b93d
FB
865 current_tb_modified = 0;
866 current_tb = NULL;
867 current_pc = 0; /* avoid warning */
868 current_cs_base = 0; /* avoid warning */
869 current_flags = 0; /* avoid warning */
870#ifdef TARGET_HAS_PRECISE_SMC
871 if (tb && pc != 0) {
872 current_tb = tb_find_pc(pc);
873 }
874#endif
9fa3e853
FB
875 while (tb != NULL) {
876 n = (long)tb & 3;
877 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
878#ifdef TARGET_HAS_PRECISE_SMC
879 if (current_tb == tb &&
880 !(current_tb->cflags & CF_SINGLE_INSN)) {
881 /* If we are modifying the current TB, we must stop
882 its execution. We could be more precise by checking
883 that the modification is after the current PC, but it
884 would require a specialized function to partially
885 restore the CPU state */
886
887 current_tb_modified = 1;
888 cpu_restore_state(current_tb, env, pc, puc);
889#if defined(TARGET_I386)
890 current_flags = env->hflags;
891 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
892 current_cs_base = (target_ulong)env->segs[R_CS].base;
893 current_pc = current_cs_base + env->eip;
894#else
895#error unsupported CPU
896#endif
897 }
898#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
899 tb_phys_invalidate(tb, addr);
900 tb = tb->page_next[n];
901 }
fd6ce8f6 902 p->first_tb = NULL;
d720b93d
FB
903#ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb_modified) {
905 /* we generate a block containing just the instruction
906 modifying the memory. It will ensure that it cannot modify
907 itself */
ea1c1802 908 env->current_tb = NULL;
d720b93d
FB
909 tb_gen_code(env, current_pc, current_cs_base, current_flags,
910 CF_SINGLE_INSN);
911 cpu_resume_from_signal(env, puc);
912 }
913#endif
fd6ce8f6 914}
9fa3e853 915#endif
fd6ce8f6
FB
916
917/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
918static inline void tb_alloc_page(TranslationBlock *tb,
919 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
920{
921 PageDesc *p;
9fa3e853
FB
922 TranslationBlock *last_first_tb;
923
924 tb->page_addr[n] = page_addr;
925 p = page_find(page_addr >> TARGET_PAGE_BITS);
926 tb->page_next[n] = p->first_tb;
927 last_first_tb = p->first_tb;
928 p->first_tb = (TranslationBlock *)((long)tb | n);
929 invalidate_page_bitmap(p);
fd6ce8f6 930
107db443 931#if defined(TARGET_HAS_SMC) || 1
d720b93d 932
9fa3e853 933#if defined(CONFIG_USER_ONLY)
fd6ce8f6 934 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
935 unsigned long host_start, host_end, addr;
936 int prot;
937
fd6ce8f6
FB
938 /* force the host page as non writable (writes will have a
939 page fault + mprotect overhead) */
83fb7adf
FB
940 host_start = page_addr & qemu_host_page_mask;
941 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
942 prot = 0;
943 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
944 prot |= page_get_flags(addr);
83fb7adf 945 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
946 (prot & PAGE_BITS) & ~PAGE_WRITE);
947#ifdef DEBUG_TB_INVALIDATE
948 printf("protecting code page: 0x%08lx\n",
949 host_start);
950#endif
951 p->flags &= ~PAGE_WRITE;
fd6ce8f6 952 }
9fa3e853
FB
953#else
954 /* if some code is already present, then the pages are already
955 protected. So we handle the case where only the first TB is
956 allocated in a physical page */
957 if (!last_first_tb) {
958 target_ulong virt_addr;
959
960 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
961 tlb_protect_code(cpu_single_env, virt_addr);
962 }
963#endif
d720b93d
FB
964
965#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
966}
967
968/* Allocate a new translation block. Flush the translation buffer if
969 too many translation blocks or too much generated code. */
c27004ec 970TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
971{
972 TranslationBlock *tb;
fd6ce8f6
FB
973
974 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
975 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 976 return NULL;
fd6ce8f6
FB
977 tb = &tbs[nb_tbs++];
978 tb->pc = pc;
b448f2f3 979 tb->cflags = 0;
d4e8164f
FB
980 return tb;
981}
982
9fa3e853
FB
983/* add a new TB and link it to the physical page tables. phys_page2 is
984 (-1) to indicate that only one page contains the TB. */
985void tb_link_phys(TranslationBlock *tb,
986 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 987{
9fa3e853
FB
988 unsigned int h;
989 TranslationBlock **ptb;
990
991 /* add in the physical hash table */
992 h = tb_phys_hash_func(phys_pc);
993 ptb = &tb_phys_hash[h];
994 tb->phys_hash_next = *ptb;
995 *ptb = tb;
fd6ce8f6
FB
996
997 /* add in the page list */
9fa3e853
FB
998 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
999 if (phys_page2 != -1)
1000 tb_alloc_page(tb, 1, phys_page2);
1001 else
1002 tb->page_addr[1] = -1;
61382a50
FB
1003#ifdef DEBUG_TB_CHECK
1004 tb_page_check();
1005#endif
9fa3e853
FB
1006}
1007
1008/* link the tb with the other TBs */
1009void tb_link(TranslationBlock *tb)
1010{
1011#if !defined(CONFIG_USER_ONLY)
1012 {
1013 VirtPageDesc *vp;
1014 target_ulong addr;
1015
1016 /* save the code memory mappings (needed to invalidate the code) */
1017 addr = tb->pc & TARGET_PAGE_MASK;
90f18422 1018 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
98857888
FB
1019#ifdef DEBUG_TLB_CHECK
1020 if (vp->valid_tag == virt_valid_tag &&
1021 vp->phys_addr != tb->page_addr[0]) {
1022 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1023 addr, tb->page_addr[0], vp->phys_addr);
1024 }
1025#endif
9fa3e853 1026 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
1027 if (vp->valid_tag != virt_valid_tag) {
1028 vp->valid_tag = virt_valid_tag;
1029#if !defined(CONFIG_SOFTMMU)
1030 vp->prot = 0;
1031#endif
1032 }
9fa3e853
FB
1033
1034 if (tb->page_addr[1] != -1) {
1035 addr += TARGET_PAGE_SIZE;
90f18422 1036 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
98857888
FB
1037#ifdef DEBUG_TLB_CHECK
1038 if (vp->valid_tag == virt_valid_tag &&
1039 vp->phys_addr != tb->page_addr[1]) {
1040 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1041 addr, tb->page_addr[1], vp->phys_addr);
1042 }
1043#endif
9fa3e853 1044 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
1045 if (vp->valid_tag != virt_valid_tag) {
1046 vp->valid_tag = virt_valid_tag;
1047#if !defined(CONFIG_SOFTMMU)
1048 vp->prot = 0;
1049#endif
1050 }
9fa3e853
FB
1051 }
1052 }
1053#endif
1054
d4e8164f
FB
1055 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1056 tb->jmp_next[0] = NULL;
1057 tb->jmp_next[1] = NULL;
b448f2f3
FB
1058#ifdef USE_CODE_COPY
1059 tb->cflags &= ~CF_FP_USED;
1060 if (tb->cflags & CF_TB_FP_USED)
1061 tb->cflags |= CF_FP_USED;
1062#endif
d4e8164f
FB
1063
1064 /* init original jump addresses */
1065 if (tb->tb_next_offset[0] != 0xffff)
1066 tb_reset_jump(tb, 0);
1067 if (tb->tb_next_offset[1] != 0xffff)
1068 tb_reset_jump(tb, 1);
fd6ce8f6
FB
1069}
1070
9fa3e853
FB
1071/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1072 tb[1].tc_ptr. Return NULL if not found */
1073TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1074{
9fa3e853
FB
1075 int m_min, m_max, m;
1076 unsigned long v;
1077 TranslationBlock *tb;
a513fe19
FB
1078
1079 if (nb_tbs <= 0)
1080 return NULL;
1081 if (tc_ptr < (unsigned long)code_gen_buffer ||
1082 tc_ptr >= (unsigned long)code_gen_ptr)
1083 return NULL;
1084 /* binary search (cf Knuth) */
1085 m_min = 0;
1086 m_max = nb_tbs - 1;
1087 while (m_min <= m_max) {
1088 m = (m_min + m_max) >> 1;
1089 tb = &tbs[m];
1090 v = (unsigned long)tb->tc_ptr;
1091 if (v == tc_ptr)
1092 return tb;
1093 else if (tc_ptr < v) {
1094 m_max = m - 1;
1095 } else {
1096 m_min = m + 1;
1097 }
1098 }
1099 return &tbs[m_max];
1100}
7501267e 1101
ea041c0e
FB
1102static void tb_reset_jump_recursive(TranslationBlock *tb);
1103
1104static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1105{
1106 TranslationBlock *tb1, *tb_next, **ptb;
1107 unsigned int n1;
1108
1109 tb1 = tb->jmp_next[n];
1110 if (tb1 != NULL) {
1111 /* find head of list */
1112 for(;;) {
1113 n1 = (long)tb1 & 3;
1114 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1115 if (n1 == 2)
1116 break;
1117 tb1 = tb1->jmp_next[n1];
1118 }
1119 /* we are now sure now that tb jumps to tb1 */
1120 tb_next = tb1;
1121
1122 /* remove tb from the jmp_first list */
1123 ptb = &tb_next->jmp_first;
1124 for(;;) {
1125 tb1 = *ptb;
1126 n1 = (long)tb1 & 3;
1127 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1128 if (n1 == n && tb1 == tb)
1129 break;
1130 ptb = &tb1->jmp_next[n1];
1131 }
1132 *ptb = tb->jmp_next[n];
1133 tb->jmp_next[n] = NULL;
1134
1135 /* suppress the jump to next tb in generated code */
1136 tb_reset_jump(tb, n);
1137
0124311e 1138 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1139 tb_reset_jump_recursive(tb_next);
1140 }
1141}
1142
1143static void tb_reset_jump_recursive(TranslationBlock *tb)
1144{
1145 tb_reset_jump_recursive2(tb, 0);
1146 tb_reset_jump_recursive2(tb, 1);
1147}
1148
1fddef4b 1149#if defined(TARGET_HAS_ICE)
d720b93d
FB
1150static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1151{
1152 target_ulong phys_addr;
1153
1154 phys_addr = cpu_get_phys_page_debug(env, pc);
1155 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1156}
c27004ec 1157#endif
d720b93d 1158
c33a346e
FB
1159/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1160 breakpoint is reached */
2e12669a 1161int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1162{
1fddef4b 1163#if defined(TARGET_HAS_ICE)
4c3a88a2 1164 int i;
d720b93d 1165
4c3a88a2
FB
1166 for(i = 0; i < env->nb_breakpoints; i++) {
1167 if (env->breakpoints[i] == pc)
1168 return 0;
1169 }
1170
1171 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1172 return -1;
1173 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1174
1175 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1176 return 0;
1177#else
1178 return -1;
1179#endif
1180}
1181
1182/* remove a breakpoint */
2e12669a 1183int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1184{
1fddef4b 1185#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1186 int i;
1187 for(i = 0; i < env->nb_breakpoints; i++) {
1188 if (env->breakpoints[i] == pc)
1189 goto found;
1190 }
1191 return -1;
1192 found:
4c3a88a2 1193 env->nb_breakpoints--;
1fddef4b
FB
1194 if (i < env->nb_breakpoints)
1195 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1196
1197 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1198 return 0;
1199#else
1200 return -1;
1201#endif
1202}
1203
c33a346e
FB
1204/* enable or disable single step mode. EXCP_DEBUG is returned by the
1205 CPU loop after each instruction */
1206void cpu_single_step(CPUState *env, int enabled)
1207{
1fddef4b 1208#if defined(TARGET_HAS_ICE)
c33a346e
FB
1209 if (env->singlestep_enabled != enabled) {
1210 env->singlestep_enabled = enabled;
1211 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1212 /* XXX: only flush what is necessary */
0124311e 1213 tb_flush(env);
c33a346e
FB
1214 }
1215#endif
1216}
1217
34865134
FB
1218/* enable or disable low levels log */
1219void cpu_set_log(int log_flags)
1220{
1221 loglevel = log_flags;
1222 if (loglevel && !logfile) {
1223 logfile = fopen(logfilename, "w");
1224 if (!logfile) {
1225 perror(logfilename);
1226 _exit(1);
1227 }
9fa3e853
FB
1228#if !defined(CONFIG_SOFTMMU)
1229 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1230 {
1231 static uint8_t logfile_buf[4096];
1232 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1233 }
1234#else
34865134 1235 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1236#endif
34865134
FB
1237 }
1238}
1239
1240void cpu_set_log_filename(const char *filename)
1241{
1242 logfilename = strdup(filename);
1243}
c33a346e 1244
0124311e 1245/* mask must never be zero, except for A20 change call */
68a79315 1246void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1247{
1248 TranslationBlock *tb;
ee8b7021 1249 static int interrupt_lock;
59817ccb 1250
68a79315 1251 env->interrupt_request |= mask;
ea041c0e
FB
1252 /* if the cpu is currently executing code, we must unlink it and
1253 all the potentially executing TB */
1254 tb = env->current_tb;
ee8b7021
FB
1255 if (tb && !testandset(&interrupt_lock)) {
1256 env->current_tb = NULL;
ea041c0e 1257 tb_reset_jump_recursive(tb);
ee8b7021 1258 interrupt_lock = 0;
ea041c0e
FB
1259 }
1260}
1261
b54ad049
FB
1262void cpu_reset_interrupt(CPUState *env, int mask)
1263{
1264 env->interrupt_request &= ~mask;
1265}
1266
f193c797
FB
1267CPULogItem cpu_log_items[] = {
1268 { CPU_LOG_TB_OUT_ASM, "out_asm",
1269 "show generated host assembly code for each compiled TB" },
1270 { CPU_LOG_TB_IN_ASM, "in_asm",
1271 "show target assembly code for each compiled TB" },
1272 { CPU_LOG_TB_OP, "op",
1273 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1274#ifdef TARGET_I386
1275 { CPU_LOG_TB_OP_OPT, "op_opt",
1276 "show micro ops after optimization for each compiled TB" },
1277#endif
1278 { CPU_LOG_INT, "int",
1279 "show interrupts/exceptions in short format" },
1280 { CPU_LOG_EXEC, "exec",
1281 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1282 { CPU_LOG_TB_CPU, "cpu",
1283 "show CPU state before bloc translation" },
f193c797
FB
1284#ifdef TARGET_I386
1285 { CPU_LOG_PCALL, "pcall",
1286 "show protected mode far calls/returns/exceptions" },
1287#endif
8e3a9fd2 1288#ifdef DEBUG_IOPORT
fd872598
FB
1289 { CPU_LOG_IOPORT, "ioport",
1290 "show all i/o ports accesses" },
8e3a9fd2 1291#endif
f193c797
FB
1292 { 0, NULL, NULL },
1293};
1294
1295static int cmp1(const char *s1, int n, const char *s2)
1296{
1297 if (strlen(s2) != n)
1298 return 0;
1299 return memcmp(s1, s2, n) == 0;
1300}
1301
1302/* takes a comma separated list of log masks. Return 0 if error. */
1303int cpu_str_to_log_mask(const char *str)
1304{
1305 CPULogItem *item;
1306 int mask;
1307 const char *p, *p1;
1308
1309 p = str;
1310 mask = 0;
1311 for(;;) {
1312 p1 = strchr(p, ',');
1313 if (!p1)
1314 p1 = p + strlen(p);
8e3a9fd2
FB
1315 if(cmp1(p,p1-p,"all")) {
1316 for(item = cpu_log_items; item->mask != 0; item++) {
1317 mask |= item->mask;
1318 }
1319 } else {
f193c797
FB
1320 for(item = cpu_log_items; item->mask != 0; item++) {
1321 if (cmp1(p, p1 - p, item->name))
1322 goto found;
1323 }
1324 return 0;
8e3a9fd2 1325 }
f193c797
FB
1326 found:
1327 mask |= item->mask;
1328 if (*p1 != ',')
1329 break;
1330 p = p1 + 1;
1331 }
1332 return mask;
1333}
ea041c0e 1334
7501267e
FB
1335void cpu_abort(CPUState *env, const char *fmt, ...)
1336{
1337 va_list ap;
1338
1339 va_start(ap, fmt);
1340 fprintf(stderr, "qemu: fatal: ");
1341 vfprintf(stderr, fmt, ap);
1342 fprintf(stderr, "\n");
1343#ifdef TARGET_I386
7fe48483
FB
1344 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1345#else
1346 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1347#endif
1348 va_end(ap);
1349 abort();
1350}
1351
0124311e
FB
1352#if !defined(CONFIG_USER_ONLY)
1353
ee8b7021
FB
1354/* NOTE: if flush_global is true, also flush global entries (not
1355 implemented yet) */
1356void tlb_flush(CPUState *env, int flush_global)
33417e70 1357{
33417e70 1358 int i;
0124311e 1359
9fa3e853
FB
1360#if defined(DEBUG_TLB)
1361 printf("tlb_flush:\n");
1362#endif
0124311e
FB
1363 /* must reset current TB so that interrupts cannot modify the
1364 links while we are modifying them */
1365 env->current_tb = NULL;
1366
33417e70
FB
1367 for(i = 0; i < CPU_TLB_SIZE; i++) {
1368 env->tlb_read[0][i].address = -1;
1369 env->tlb_write[0][i].address = -1;
1370 env->tlb_read[1][i].address = -1;
1371 env->tlb_write[1][i].address = -1;
1372 }
9fa3e853
FB
1373
1374 virt_page_flush();
8a8a608f 1375 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1376
1377#if !defined(CONFIG_SOFTMMU)
1378 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1379#endif
1380#ifdef USE_KQEMU
1381 if (env->kqemu_enabled) {
1382 kqemu_flush(env, flush_global);
1383 }
9fa3e853 1384#endif
e3db7226 1385 tlb_flush_count++;
33417e70
FB
1386}
1387
274da6b2 1388static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1389{
1390 if (addr == (tlb_entry->address &
1391 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1392 tlb_entry->address = -1;
1393}
1394
2e12669a 1395void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1396{
9fa3e853
FB
1397 int i, n;
1398 VirtPageDesc *vp;
1399 PageDesc *p;
1400 TranslationBlock *tb;
0124311e 1401
9fa3e853
FB
1402#if defined(DEBUG_TLB)
1403 printf("tlb_flush_page: 0x%08x\n", addr);
1404#endif
0124311e
FB
1405 /* must reset current TB so that interrupts cannot modify the
1406 links while we are modifying them */
1407 env->current_tb = NULL;
61382a50
FB
1408
1409 addr &= TARGET_PAGE_MASK;
1410 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1411 tlb_flush_entry(&env->tlb_read[0][i], addr);
1412 tlb_flush_entry(&env->tlb_write[0][i], addr);
1413 tlb_flush_entry(&env->tlb_read[1][i], addr);
1414 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1415
9fa3e853
FB
1416 /* remove from the virtual pc hash table all the TB at this
1417 virtual address */
1418
1419 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1420 if (vp && vp->valid_tag == virt_valid_tag) {
1421 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1422 if (p) {
1423 /* we remove all the links to the TBs in this virtual page */
1424 tb = p->first_tb;
1425 while (tb != NULL) {
1426 n = (long)tb & 3;
1427 tb = (TranslationBlock *)((long)tb & ~3);
1428 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1429 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1430 tb_invalidate(tb);
1431 }
1432 tb = tb->page_next[n];
1433 }
1434 }
98857888 1435 vp->valid_tag = 0;
9fa3e853
FB
1436 }
1437
0124311e 1438#if !defined(CONFIG_SOFTMMU)
9fa3e853 1439 if (addr < MMAP_AREA_END)
0124311e 1440 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1441#endif
0a962c02
FB
1442#ifdef USE_KQEMU
1443 if (env->kqemu_enabled) {
1444 kqemu_flush_page(env, addr);
1445 }
1446#endif
9fa3e853
FB
1447}
1448
4f2ac237 1449static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1450{
1451 if (addr == (tlb_entry->address &
1452 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1453 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1454 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1455 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1456 }
1457}
1458
1459/* update the TLBs so that writes to code in the virtual page 'addr'
1460 can be detected */
4f2ac237 1461static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1462{
1463 int i;
1464
1465 addr &= TARGET_PAGE_MASK;
1466 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1467 tlb_protect_code1(&env->tlb_write[0][i], addr);
1468 tlb_protect_code1(&env->tlb_write[1][i], addr);
1469#if !defined(CONFIG_SOFTMMU)
1470 /* NOTE: as we generated the code for this page, it is already at
1471 least readable */
1472 if (addr < MMAP_AREA_END)
1473 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1474#endif
1475}
1476
9fa3e853 1477static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1478 unsigned long phys_addr)
9fa3e853
FB
1479{
1480 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1481 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1482 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1483 }
1484}
1485
1486/* update the TLB so that writes in physical page 'phys_addr' are no longer
1487 tested self modifying code */
4f2ac237 1488static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1489{
1490 int i;
1491
1492 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1493 phys_addr += (long)phys_ram_base;
1494 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1495 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1496 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1497}
1498
1499static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1500 unsigned long start, unsigned long length)
1501{
1502 unsigned long addr;
1503 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1504 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1505 if ((addr - start) < length) {
1506 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1507 }
1508 }
1509}
1510
0a962c02
FB
1511void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1512 int dirty_flags)
1ccde1cb
FB
1513{
1514 CPUState *env;
4f2ac237 1515 unsigned long length, start1;
0a962c02
FB
1516 int i, mask, len;
1517 uint8_t *p;
1ccde1cb
FB
1518
1519 start &= TARGET_PAGE_MASK;
1520 end = TARGET_PAGE_ALIGN(end);
1521
1522 length = end - start;
1523 if (length == 0)
1524 return;
0a962c02
FB
1525 mask = ~dirty_flags;
1526 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1527 len = length >> TARGET_PAGE_BITS;
1528 for(i = 0; i < len; i++)
1529 p[i] &= mask;
1ccde1cb
FB
1530
1531 env = cpu_single_env;
1532 /* we modify the TLB cache so that the dirty bit will be set again
1533 when accessing the range */
59817ccb 1534 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1535 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1536 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1537 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1538 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1539
1540#if !defined(CONFIG_SOFTMMU)
1541 /* XXX: this is expensive */
1542 {
1543 VirtPageDesc *p;
1544 int j;
1545 target_ulong addr;
1546
1547 for(i = 0; i < L1_SIZE; i++) {
1548 p = l1_virt_map[i];
1549 if (p) {
1550 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1551 for(j = 0; j < L2_SIZE; j++) {
1552 if (p->valid_tag == virt_valid_tag &&
1553 p->phys_addr >= start && p->phys_addr < end &&
1554 (p->prot & PROT_WRITE)) {
1555 if (addr < MMAP_AREA_END) {
1556 mprotect((void *)addr, TARGET_PAGE_SIZE,
1557 p->prot & ~PROT_WRITE);
1558 }
1559 }
1560 addr += TARGET_PAGE_SIZE;
1561 p++;
1562 }
1563 }
1564 }
1565 }
1566#endif
1ccde1cb
FB
1567}
1568
1569static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1570 unsigned long start)
1571{
1572 unsigned long addr;
1573 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1574 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1575 if (addr == start) {
1576 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1577 }
1578 }
1579}
1580
1581/* update the TLB corresponding to virtual page vaddr and phys addr
1582 addr so that it is no longer dirty */
1583static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1584{
1585 CPUState *env = cpu_single_env;
1586 int i;
1587
0a962c02 1588 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1ccde1cb
FB
1589
1590 addr &= TARGET_PAGE_MASK;
1591 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1592 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1593 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1594}
1595
59817ccb
FB
1596/* add a new TLB entry. At most one entry for a given virtual address
1597 is permitted. Return 0 if OK or 2 if the page could not be mapped
1598 (can only happen in non SOFTMMU mode for I/O pages or pages
1599 conflicting with the host address space). */
2e12669a
FB
1600int tlb_set_page(CPUState *env, target_ulong vaddr,
1601 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1602 int is_user, int is_softmmu)
1603{
92e873b9 1604 PhysPageDesc *p;
4f2ac237 1605 unsigned long pd;
9fa3e853
FB
1606 TranslationBlock *first_tb;
1607 unsigned int index;
4f2ac237
FB
1608 target_ulong address;
1609 unsigned long addend;
9fa3e853
FB
1610 int ret;
1611
92e873b9
FB
1612 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1613 first_tb = NULL;
9fa3e853
FB
1614 if (!p) {
1615 pd = IO_MEM_UNASSIGNED;
9fa3e853 1616 } else {
92e873b9 1617 PageDesc *p1;
9fa3e853 1618 pd = p->phys_offset;
92e873b9
FB
1619 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1620 /* NOTE: we also allocate the page at this stage */
1621 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1622 first_tb = p1->first_tb;
1623 }
9fa3e853
FB
1624 }
1625#if defined(DEBUG_TLB)
1626 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1627 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1628#endif
1629
1630 ret = 0;
1631#if !defined(CONFIG_SOFTMMU)
1632 if (is_softmmu)
1633#endif
1634 {
1635 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1636 /* IO memory case */
1637 address = vaddr | pd;
1638 addend = paddr;
1639 } else {
1640 /* standard memory */
1641 address = vaddr;
1642 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1643 }
1644
90f18422 1645 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1646 addend -= vaddr;
67b915a5 1647 if (prot & PAGE_READ) {
9fa3e853
FB
1648 env->tlb_read[is_user][index].address = address;
1649 env->tlb_read[is_user][index].addend = addend;
1650 } else {
1651 env->tlb_read[is_user][index].address = -1;
1652 env->tlb_read[is_user][index].addend = -1;
1653 }
67b915a5 1654 if (prot & PAGE_WRITE) {
9fa3e853
FB
1655 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1656 /* ROM: access is ignored (same as unassigned) */
1657 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1658 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1659 } else
1660 /* XXX: the PowerPC code seems not ready to handle
1661 self modifying code with DCBI */
1662#if defined(TARGET_HAS_SMC) || 1
1663 if (first_tb) {
9fa3e853
FB
1664 /* if code is present, we use a specific memory
1665 handler. It works only for physical memory access */
1666 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1667 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1668 } else
1669#endif
1670 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1671 !cpu_physical_memory_is_dirty(pd)) {
1672 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1673 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1674 } else {
1675 env->tlb_write[is_user][index].address = address;
1676 env->tlb_write[is_user][index].addend = addend;
1677 }
1678 } else {
1679 env->tlb_write[is_user][index].address = -1;
1680 env->tlb_write[is_user][index].addend = -1;
1681 }
1682 }
1683#if !defined(CONFIG_SOFTMMU)
1684 else {
1685 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1686 /* IO access: no mapping is done as it will be handled by the
1687 soft MMU */
1688 if (!(env->hflags & HF_SOFTMMU_MASK))
1689 ret = 2;
1690 } else {
1691 void *map_addr;
59817ccb
FB
1692
1693 if (vaddr >= MMAP_AREA_END) {
1694 ret = 2;
1695 } else {
1696 if (prot & PROT_WRITE) {
1697 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1698#if defined(TARGET_HAS_SMC) || 1
59817ccb 1699 first_tb ||
d720b93d 1700#endif
59817ccb
FB
1701 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1702 !cpu_physical_memory_is_dirty(pd))) {
1703 /* ROM: we do as if code was inside */
1704 /* if code is present, we only map as read only and save the
1705 original mapping */
1706 VirtPageDesc *vp;
1707
90f18422 1708 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1709 vp->phys_addr = pd;
1710 vp->prot = prot;
1711 vp->valid_tag = virt_valid_tag;
1712 prot &= ~PAGE_WRITE;
1713 }
1714 }
1715 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1716 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1717 if (map_addr == MAP_FAILED) {
1718 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1719 paddr, vaddr);
9fa3e853 1720 }
9fa3e853
FB
1721 }
1722 }
1723 }
1724#endif
1725 return ret;
1726}
1727
1728/* called from signal handler: invalidate the code and unprotect the
1729 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1730int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1731{
1732#if !defined(CONFIG_SOFTMMU)
1733 VirtPageDesc *vp;
1734
1735#if defined(DEBUG_TLB)
1736 printf("page_unprotect: addr=0x%08x\n", addr);
1737#endif
1738 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1739
1740 /* if it is not mapped, no need to worry here */
1741 if (addr >= MMAP_AREA_END)
1742 return 0;
9fa3e853
FB
1743 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1744 if (!vp)
1745 return 0;
1746 /* NOTE: in this case, validate_tag is _not_ tested as it
1747 validates only the code TLB */
1748 if (vp->valid_tag != virt_valid_tag)
1749 return 0;
1750 if (!(vp->prot & PAGE_WRITE))
1751 return 0;
1752#if defined(DEBUG_TLB)
1753 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1754 addr, vp->phys_addr, vp->prot);
1755#endif
59817ccb
FB
1756 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1757 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1758 (unsigned long)addr, vp->prot);
d720b93d 1759 /* set the dirty bit */
0a962c02 1760 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1761 /* flush the code inside */
1762 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1763 return 1;
1764#else
1765 return 0;
1766#endif
33417e70
FB
1767}
1768
0124311e
FB
1769#else
1770
ee8b7021 1771void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1772{
1773}
1774
2e12669a 1775void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1776{
1777}
1778
2e12669a
FB
1779int tlb_set_page(CPUState *env, target_ulong vaddr,
1780 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1781 int is_user, int is_softmmu)
1782{
1783 return 0;
1784}
0124311e 1785
9fa3e853
FB
1786/* dump memory mappings */
1787void page_dump(FILE *f)
33417e70 1788{
9fa3e853
FB
1789 unsigned long start, end;
1790 int i, j, prot, prot1;
1791 PageDesc *p;
33417e70 1792
9fa3e853
FB
1793 fprintf(f, "%-8s %-8s %-8s %s\n",
1794 "start", "end", "size", "prot");
1795 start = -1;
1796 end = -1;
1797 prot = 0;
1798 for(i = 0; i <= L1_SIZE; i++) {
1799 if (i < L1_SIZE)
1800 p = l1_map[i];
1801 else
1802 p = NULL;
1803 for(j = 0;j < L2_SIZE; j++) {
1804 if (!p)
1805 prot1 = 0;
1806 else
1807 prot1 = p[j].flags;
1808 if (prot1 != prot) {
1809 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1810 if (start != -1) {
1811 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1812 start, end, end - start,
1813 prot & PAGE_READ ? 'r' : '-',
1814 prot & PAGE_WRITE ? 'w' : '-',
1815 prot & PAGE_EXEC ? 'x' : '-');
1816 }
1817 if (prot1 != 0)
1818 start = end;
1819 else
1820 start = -1;
1821 prot = prot1;
1822 }
1823 if (!p)
1824 break;
1825 }
33417e70 1826 }
33417e70
FB
1827}
1828
9fa3e853 1829int page_get_flags(unsigned long address)
33417e70 1830{
9fa3e853
FB
1831 PageDesc *p;
1832
1833 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1834 if (!p)
9fa3e853
FB
1835 return 0;
1836 return p->flags;
1837}
1838
1839/* modify the flags of a page and invalidate the code if
1840 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1841 depending on PAGE_WRITE */
1842void page_set_flags(unsigned long start, unsigned long end, int flags)
1843{
1844 PageDesc *p;
1845 unsigned long addr;
1846
1847 start = start & TARGET_PAGE_MASK;
1848 end = TARGET_PAGE_ALIGN(end);
1849 if (flags & PAGE_WRITE)
1850 flags |= PAGE_WRITE_ORG;
1851 spin_lock(&tb_lock);
1852 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1853 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1854 /* if the write protection is set, then we invalidate the code
1855 inside */
1856 if (!(p->flags & PAGE_WRITE) &&
1857 (flags & PAGE_WRITE) &&
1858 p->first_tb) {
d720b93d 1859 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1860 }
1861 p->flags = flags;
1862 }
1863 spin_unlock(&tb_lock);
33417e70
FB
1864}
1865
9fa3e853
FB
1866/* called from signal handler: invalidate the code and unprotect the
1867 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1868int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1869{
1870 unsigned int page_index, prot, pindex;
1871 PageDesc *p, *p1;
1872 unsigned long host_start, host_end, addr;
1873
83fb7adf 1874 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1875 page_index = host_start >> TARGET_PAGE_BITS;
1876 p1 = page_find(page_index);
1877 if (!p1)
1878 return 0;
83fb7adf 1879 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1880 p = p1;
1881 prot = 0;
1882 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1883 prot |= p->flags;
1884 p++;
1885 }
1886 /* if the page was really writable, then we change its
1887 protection back to writable */
1888 if (prot & PAGE_WRITE_ORG) {
1889 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1890 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1891 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1892 (prot & PAGE_BITS) | PAGE_WRITE);
1893 p1[pindex].flags |= PAGE_WRITE;
1894 /* and since the content will be modified, we must invalidate
1895 the corresponding translated code. */
d720b93d 1896 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1897#ifdef DEBUG_TB_CHECK
1898 tb_invalidate_check(address);
1899#endif
1900 return 1;
1901 }
1902 }
1903 return 0;
1904}
1905
1906/* call this function when system calls directly modify a memory area */
1907void page_unprotect_range(uint8_t *data, unsigned long data_size)
1908{
1909 unsigned long start, end, addr;
1910
1911 start = (unsigned long)data;
1912 end = start + data_size;
1913 start &= TARGET_PAGE_MASK;
1914 end = TARGET_PAGE_ALIGN(end);
1915 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1916 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1917 }
1918}
1919
1ccde1cb
FB
1920static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1921{
1922}
9fa3e853
FB
1923#endif /* defined(CONFIG_USER_ONLY) */
1924
33417e70
FB
1925/* register physical memory. 'size' must be a multiple of the target
1926 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1927 io memory page */
2e12669a
FB
1928void cpu_register_physical_memory(target_phys_addr_t start_addr,
1929 unsigned long size,
1930 unsigned long phys_offset)
33417e70
FB
1931{
1932 unsigned long addr, end_addr;
92e873b9 1933 PhysPageDesc *p;
33417e70 1934
5fd386f6 1935 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1936 end_addr = start_addr + size;
5fd386f6 1937 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1938 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1939 p->phys_offset = phys_offset;
1940 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1941 phys_offset += TARGET_PAGE_SIZE;
1942 }
1943}
1944
a4193c8a 1945static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1946{
1947 return 0;
1948}
1949
a4193c8a 1950static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1951{
1952}
1953
1954static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1955 unassigned_mem_readb,
1956 unassigned_mem_readb,
1957 unassigned_mem_readb,
1958};
1959
1960static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1961 unassigned_mem_writeb,
1962 unassigned_mem_writeb,
1963 unassigned_mem_writeb,
1964};
1965
9fa3e853
FB
1966/* self modifying code support in soft mmu mode : writing to a page
1967 containing code comes to these functions */
1968
a4193c8a 1969static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1970{
1ccde1cb
FB
1971 unsigned long phys_addr;
1972
274da6b2 1973 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1974#if !defined(CONFIG_USER_ONLY)
d720b93d 1975 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1976#endif
c27004ec 1977 stb_p((uint8_t *)(long)addr, val);
0a962c02 1978 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
1979}
1980
a4193c8a 1981static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1982{
1ccde1cb
FB
1983 unsigned long phys_addr;
1984
274da6b2 1985 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1986#if !defined(CONFIG_USER_ONLY)
d720b93d 1987 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1988#endif
c27004ec 1989 stw_p((uint8_t *)(long)addr, val);
0a962c02 1990 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
1991}
1992
a4193c8a 1993static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1994{
1ccde1cb
FB
1995 unsigned long phys_addr;
1996
274da6b2 1997 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1998#if !defined(CONFIG_USER_ONLY)
d720b93d 1999 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 2000#endif
c27004ec 2001 stl_p((uint8_t *)(long)addr, val);
0a962c02 2002 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
2003}
2004
2005static CPUReadMemoryFunc *code_mem_read[3] = {
2006 NULL, /* never used */
2007 NULL, /* never used */
2008 NULL, /* never used */
2009};
2010
2011static CPUWriteMemoryFunc *code_mem_write[3] = {
2012 code_mem_writeb,
2013 code_mem_writew,
2014 code_mem_writel,
2015};
33417e70 2016
a4193c8a 2017static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 2018{
c27004ec 2019 stb_p((uint8_t *)(long)addr, val);
d720b93d 2020 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
2021}
2022
a4193c8a 2023static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 2024{
c27004ec 2025 stw_p((uint8_t *)(long)addr, val);
d720b93d 2026 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
2027}
2028
a4193c8a 2029static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 2030{
c27004ec 2031 stl_p((uint8_t *)(long)addr, val);
d720b93d 2032 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
2033}
2034
2035static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2036 notdirty_mem_writeb,
2037 notdirty_mem_writew,
2038 notdirty_mem_writel,
2039};
2040
33417e70
FB
2041static void io_mem_init(void)
2042{
a4193c8a
FB
2043 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
2044 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2045 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
2046 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2047 io_mem_nb = 5;
2048
2049 /* alloc dirty bits array */
0a962c02 2050 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2051}
2052
2053/* mem_read and mem_write are arrays of functions containing the
2054 function to access byte (index 0), word (index 1) and dword (index
2055 2). All functions must be supplied. If io_index is non zero, the
2056 corresponding io zone is modified. If it is zero, a new io zone is
2057 allocated. The return value can be used with
2058 cpu_register_physical_memory(). (-1) is returned if error. */
2059int cpu_register_io_memory(int io_index,
2060 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2061 CPUWriteMemoryFunc **mem_write,
2062 void *opaque)
33417e70
FB
2063{
2064 int i;
2065
2066 if (io_index <= 0) {
2067 if (io_index >= IO_MEM_NB_ENTRIES)
2068 return -1;
2069 io_index = io_mem_nb++;
2070 } else {
2071 if (io_index >= IO_MEM_NB_ENTRIES)
2072 return -1;
2073 }
2074
2075 for(i = 0;i < 3; i++) {
2076 io_mem_read[io_index][i] = mem_read[i];
2077 io_mem_write[io_index][i] = mem_write[i];
2078 }
a4193c8a 2079 io_mem_opaque[io_index] = opaque;
33417e70
FB
2080 return io_index << IO_MEM_SHIFT;
2081}
61382a50 2082
8926b517
FB
2083CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2084{
2085 return io_mem_write[io_index >> IO_MEM_SHIFT];
2086}
2087
2088CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2089{
2090 return io_mem_read[io_index >> IO_MEM_SHIFT];
2091}
2092
13eb76e0
FB
2093/* physical memory access (slow version, mainly for debug) */
2094#if defined(CONFIG_USER_ONLY)
2e12669a 2095void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2096 int len, int is_write)
2097{
2098 int l, flags;
2099 target_ulong page;
2100
2101 while (len > 0) {
2102 page = addr & TARGET_PAGE_MASK;
2103 l = (page + TARGET_PAGE_SIZE) - addr;
2104 if (l > len)
2105 l = len;
2106 flags = page_get_flags(page);
2107 if (!(flags & PAGE_VALID))
2108 return;
2109 if (is_write) {
2110 if (!(flags & PAGE_WRITE))
2111 return;
2112 memcpy((uint8_t *)addr, buf, len);
2113 } else {
2114 if (!(flags & PAGE_READ))
2115 return;
2116 memcpy(buf, (uint8_t *)addr, len);
2117 }
2118 len -= l;
2119 buf += l;
2120 addr += l;
2121 }
2122}
8df1cd07
FB
2123
2124/* never used */
2125uint32_t ldl_phys(target_phys_addr_t addr)
2126{
2127 return 0;
2128}
2129
2130void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2131{
2132}
2133
2134void stl_phys(target_phys_addr_t addr, uint32_t val)
2135{
2136}
2137
13eb76e0 2138#else
2e12669a 2139void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2140 int len, int is_write)
2141{
2142 int l, io_index;
2143 uint8_t *ptr;
2144 uint32_t val;
2e12669a
FB
2145 target_phys_addr_t page;
2146 unsigned long pd;
92e873b9 2147 PhysPageDesc *p;
13eb76e0
FB
2148
2149 while (len > 0) {
2150 page = addr & TARGET_PAGE_MASK;
2151 l = (page + TARGET_PAGE_SIZE) - addr;
2152 if (l > len)
2153 l = len;
92e873b9 2154 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2155 if (!p) {
2156 pd = IO_MEM_UNASSIGNED;
2157 } else {
2158 pd = p->phys_offset;
2159 }
2160
2161 if (is_write) {
2162 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2163 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2164 if (l >= 4 && ((addr & 3) == 0)) {
2165 /* 32 bit read access */
c27004ec 2166 val = ldl_p(buf);
a4193c8a 2167 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2168 l = 4;
2169 } else if (l >= 2 && ((addr & 1) == 0)) {
2170 /* 16 bit read access */
c27004ec 2171 val = lduw_p(buf);
a4193c8a 2172 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2173 l = 2;
2174 } else {
2175 /* 8 bit access */
c27004ec 2176 val = ldub_p(buf);
a4193c8a 2177 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2178 l = 1;
2179 }
2180 } else {
b448f2f3
FB
2181 unsigned long addr1;
2182 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2183 /* RAM case */
b448f2f3 2184 ptr = phys_ram_base + addr1;
13eb76e0 2185 memcpy(ptr, buf, l);
b448f2f3
FB
2186 /* invalidate code */
2187 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2188 /* set dirty bit */
0a962c02 2189 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
13eb76e0
FB
2190 }
2191 } else {
2192 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2193 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2194 /* I/O case */
2195 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2196 if (l >= 4 && ((addr & 3) == 0)) {
2197 /* 32 bit read access */
a4193c8a 2198 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2199 stl_p(buf, val);
13eb76e0
FB
2200 l = 4;
2201 } else if (l >= 2 && ((addr & 1) == 0)) {
2202 /* 16 bit read access */
a4193c8a 2203 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2204 stw_p(buf, val);
13eb76e0
FB
2205 l = 2;
2206 } else {
2207 /* 8 bit access */
a4193c8a 2208 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2209 stb_p(buf, val);
13eb76e0
FB
2210 l = 1;
2211 }
2212 } else {
2213 /* RAM case */
2214 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2215 (addr & ~TARGET_PAGE_MASK);
2216 memcpy(buf, ptr, l);
2217 }
2218 }
2219 len -= l;
2220 buf += l;
2221 addr += l;
2222 }
2223}
8df1cd07
FB
2224
2225/* warning: addr must be aligned */
2226uint32_t ldl_phys(target_phys_addr_t addr)
2227{
2228 int io_index;
2229 uint8_t *ptr;
2230 uint32_t val;
2231 unsigned long pd;
2232 PhysPageDesc *p;
2233
2234 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2235 if (!p) {
2236 pd = IO_MEM_UNASSIGNED;
2237 } else {
2238 pd = p->phys_offset;
2239 }
2240
2241 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2242 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2243 /* I/O case */
2244 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2245 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2246 } else {
2247 /* RAM case */
2248 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2249 (addr & ~TARGET_PAGE_MASK);
2250 val = ldl_p(ptr);
2251 }
2252 return val;
2253}
2254
2255/* warning: addr must be aligned. The ram page is not masked as dirty
2256 and the code inside is not invalidated. It is useful if the dirty
2257 bits are used to track modified PTEs */
2258void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2259{
2260 int io_index;
2261 uint8_t *ptr;
2262 unsigned long pd;
2263 PhysPageDesc *p;
2264
2265 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2266 if (!p) {
2267 pd = IO_MEM_UNASSIGNED;
2268 } else {
2269 pd = p->phys_offset;
2270 }
2271
2272 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2273 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2274 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2275 } else {
2276 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2277 (addr & ~TARGET_PAGE_MASK);
2278 stl_p(ptr, val);
2279 }
2280}
2281
2282/* warning: addr must be aligned */
2283/* XXX: optimize code invalidation test */
2284void stl_phys(target_phys_addr_t addr, uint32_t val)
2285{
2286 int io_index;
2287 uint8_t *ptr;
2288 unsigned long pd;
2289 PhysPageDesc *p;
2290
2291 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2292 if (!p) {
2293 pd = IO_MEM_UNASSIGNED;
2294 } else {
2295 pd = p->phys_offset;
2296 }
2297
2298 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2299 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2300 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2301 } else {
2302 unsigned long addr1;
2303 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2304 /* RAM case */
2305 ptr = phys_ram_base + addr1;
2306 stl_p(ptr, val);
2307 /* invalidate code */
2308 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2309 /* set dirty bit */
0a962c02 2310 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
8df1cd07
FB
2311 }
2312}
2313
13eb76e0
FB
2314#endif
2315
2316/* virtual memory access for debug */
b448f2f3
FB
2317int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2318 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2319{
2320 int l;
2321 target_ulong page, phys_addr;
2322
2323 while (len > 0) {
2324 page = addr & TARGET_PAGE_MASK;
2325 phys_addr = cpu_get_phys_page_debug(env, page);
2326 /* if no physical page mapped, return an error */
2327 if (phys_addr == -1)
2328 return -1;
2329 l = (page + TARGET_PAGE_SIZE) - addr;
2330 if (l > len)
2331 l = len;
b448f2f3
FB
2332 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2333 buf, l, is_write);
13eb76e0
FB
2334 len -= l;
2335 buf += l;
2336 addr += l;
2337 }
2338 return 0;
2339}
2340
e3db7226
FB
2341void dump_exec_info(FILE *f,
2342 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2343{
2344 int i, target_code_size, max_target_code_size;
2345 int direct_jmp_count, direct_jmp2_count, cross_page;
2346 TranslationBlock *tb;
2347
2348 target_code_size = 0;
2349 max_target_code_size = 0;
2350 cross_page = 0;
2351 direct_jmp_count = 0;
2352 direct_jmp2_count = 0;
2353 for(i = 0; i < nb_tbs; i++) {
2354 tb = &tbs[i];
2355 target_code_size += tb->size;
2356 if (tb->size > max_target_code_size)
2357 max_target_code_size = tb->size;
2358 if (tb->page_addr[1] != -1)
2359 cross_page++;
2360 if (tb->tb_next_offset[0] != 0xffff) {
2361 direct_jmp_count++;
2362 if (tb->tb_next_offset[1] != 0xffff) {
2363 direct_jmp2_count++;
2364 }
2365 }
2366 }
2367 /* XXX: avoid using doubles ? */
2368 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2369 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2370 nb_tbs ? target_code_size / nb_tbs : 0,
2371 max_target_code_size);
2372 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2373 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2374 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2375 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2376 cross_page,
2377 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2378 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2379 direct_jmp_count,
2380 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2381 direct_jmp2_count,
2382 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2383 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2384 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2385 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2386}
2387
61382a50
FB
2388#if !defined(CONFIG_USER_ONLY)
2389
2390#define MMUSUFFIX _cmmu
2391#define GETPC() NULL
2392#define env cpu_single_env
b769d8fe 2393#define SOFTMMU_CODE_ACCESS
61382a50
FB
2394
2395#define SHIFT 0
2396#include "softmmu_template.h"
2397
2398#define SHIFT 1
2399#include "softmmu_template.h"
2400
2401#define SHIFT 2
2402#include "softmmu_template.h"
2403
2404#define SHIFT 3
2405#include "softmmu_template.h"
2406
2407#undef env
2408
2409#endif