]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
Add a monitor command to raise NMI
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
53a5960a
PB
39#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
41#endif
54936004 42
fd6ce8f6 43//#define DEBUG_TB_INVALIDATE
66e85a21 44//#define DEBUG_FLUSH
9fa3e853 45//#define DEBUG_TLB
67d3b957 46//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
47
48/* make various TB consistency checks */
5fafdf24
TS
49//#define DEBUG_TB_CHECK
50//#define DEBUG_TLB_CHECK
fd6ce8f6 51
1196be37 52//#define DEBUG_IOPORT
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
99773bd4
PB
55#if !defined(CONFIG_USER_ONLY)
56/* TB consistency checks only implemented for usermode emulation. */
57#undef DEBUG_TB_CHECK
58#endif
59
fd6ce8f6 60/* threshold to flush the translated code buffer */
d07bde88 61#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
65#define MMAP_AREA_START 0x00000000
66#define MMAP_AREA_END 0xa8000000
fd6ce8f6 67
108c49b8
FB
68#if defined(TARGET_SPARC64)
69#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
70#elif defined(TARGET_SPARC)
71#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
72#elif defined(TARGET_ALPHA)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
74#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
75#elif defined(TARGET_PPC64)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
77#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 42
79#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
81#else
82/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83#define TARGET_PHYS_ADDR_SPACE_BITS 32
84#endif
85
fd6ce8f6 86TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 88int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
b8076a74 92uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
93uint8_t *code_gen_ptr;
94
00f82b8a 95ram_addr_t phys_ram_size;
9fa3e853
FB
96int phys_ram_fd;
97uint8_t *phys_ram_base;
1ccde1cb 98uint8_t *phys_ram_dirty;
e9a1ab19 99static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 100
6a00d601
FB
101CPUState *first_cpu;
102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
5fafdf24 104CPUState *cpu_single_env;
6a00d601 105
54936004 106typedef struct PageDesc {
92e873b9 107 /* list of TBs intersecting this ram page */
fd6ce8f6 108 TranslationBlock *first_tb;
9fa3e853
FB
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count;
112 uint8_t *code_bitmap;
113#if defined(CONFIG_USER_ONLY)
114 unsigned long flags;
115#endif
54936004
FB
116} PageDesc;
117
92e873b9
FB
118typedef struct PhysPageDesc {
119 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 120 ram_addr_t phys_offset;
92e873b9
FB
121} PhysPageDesc;
122
54936004 123#define L2_BITS 10
bedb69ea
JM
124#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125/* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
128 */
129#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
130#else
03875444 131#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 132#endif
54936004
FB
133
134#define L1_SIZE (1 << L1_BITS)
135#define L2_SIZE (1 << L2_BITS)
136
33417e70 137static void io_mem_init(void);
fd6ce8f6 138
83fb7adf
FB
139unsigned long qemu_real_host_page_size;
140unsigned long qemu_host_page_bits;
141unsigned long qemu_host_page_size;
142unsigned long qemu_host_page_mask;
54936004 143
92e873b9 144/* XXX: for system emulation, it could just be an array */
54936004 145static PageDesc *l1_map[L1_SIZE];
0a962c02 146PhysPageDesc **l1_phys_map;
54936004 147
33417e70 148/* io memory support */
33417e70
FB
149CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
150CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 151void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 152static int io_mem_nb;
6658ffb8
PB
153#if defined(CONFIG_SOFTMMU)
154static int io_mem_watch;
155#endif
33417e70 156
34865134
FB
157/* log support */
158char *logfilename = "/tmp/qemu.log";
159FILE *logfile;
160int loglevel;
e735b91c 161static int log_append = 0;
34865134 162
e3db7226
FB
163/* statistics */
164static int tlb_flush_count;
165static int tb_flush_count;
166static int tb_phys_invalidate_count;
167
db7b5426
BS
168#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169typedef struct subpage_t {
170 target_phys_addr_t base;
3ee89922
BS
171 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
172 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
173 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
174} subpage_t;
175
b346ff46 176static void page_init(void)
54936004 177{
83fb7adf 178 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 179 TARGET_PAGE_SIZE */
67b915a5 180#ifdef _WIN32
d5a8f07c
FB
181 {
182 SYSTEM_INFO system_info;
183 DWORD old_protect;
3b46e624 184
d5a8f07c
FB
185 GetSystemInfo(&system_info);
186 qemu_real_host_page_size = system_info.dwPageSize;
3b46e624 187
d5a8f07c
FB
188 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
189 PAGE_EXECUTE_READWRITE, &old_protect);
190 }
67b915a5 191#else
83fb7adf 192 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
193 {
194 unsigned long start, end;
195
196 start = (unsigned long)code_gen_buffer;
197 start &= ~(qemu_real_host_page_size - 1);
3b46e624 198
d5a8f07c
FB
199 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
200 end += qemu_real_host_page_size - 1;
201 end &= ~(qemu_real_host_page_size - 1);
3b46e624 202
5fafdf24 203 mprotect((void *)start, end - start,
d5a8f07c
FB
204 PROT_READ | PROT_WRITE | PROT_EXEC);
205 }
67b915a5 206#endif
d5a8f07c 207
83fb7adf
FB
208 if (qemu_host_page_size == 0)
209 qemu_host_page_size = qemu_real_host_page_size;
210 if (qemu_host_page_size < TARGET_PAGE_SIZE)
211 qemu_host_page_size = TARGET_PAGE_SIZE;
212 qemu_host_page_bits = 0;
213 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214 qemu_host_page_bits++;
215 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
216 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
218
219#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
220 {
221 long long startaddr, endaddr;
222 FILE *f;
223 int n;
224
225 f = fopen("/proc/self/maps", "r");
226 if (f) {
227 do {
228 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
229 if (n == 2) {
230 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
231 TARGET_PAGE_ALIGN(endaddr),
232 PAGE_RESERVED);
233 }
234 } while (!feof(f));
235 fclose(f);
236 }
237 }
238#endif
54936004
FB
239}
240
00f82b8a 241static inline PageDesc *page_find_alloc(target_ulong index)
54936004 242{
54936004
FB
243 PageDesc **lp, *p;
244
54936004
FB
245 lp = &l1_map[index >> L2_BITS];
246 p = *lp;
247 if (!p) {
248 /* allocate if not found */
59817ccb 249 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 250 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
251 *lp = p;
252 }
253 return p + (index & (L2_SIZE - 1));
254}
255
00f82b8a 256static inline PageDesc *page_find(target_ulong index)
54936004 257{
54936004
FB
258 PageDesc *p;
259
54936004
FB
260 p = l1_map[index >> L2_BITS];
261 if (!p)
262 return 0;
fd6ce8f6
FB
263 return p + (index & (L2_SIZE - 1));
264}
265
108c49b8 266static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 267{
108c49b8 268 void **lp, **p;
e3f4e2a4 269 PhysPageDesc *pd;
92e873b9 270
108c49b8
FB
271 p = (void **)l1_phys_map;
272#if TARGET_PHYS_ADDR_SPACE_BITS > 32
273
274#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
275#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
276#endif
277 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
278 p = *lp;
279 if (!p) {
280 /* allocate if not found */
108c49b8
FB
281 if (!alloc)
282 return NULL;
283 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
284 memset(p, 0, sizeof(void *) * L1_SIZE);
285 *lp = p;
286 }
287#endif
288 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
289 pd = *lp;
290 if (!pd) {
291 int i;
108c49b8
FB
292 /* allocate if not found */
293 if (!alloc)
294 return NULL;
e3f4e2a4
PB
295 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
296 *lp = pd;
297 for (i = 0; i < L2_SIZE; i++)
298 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 299 }
e3f4e2a4 300 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
301}
302
108c49b8 303static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 304{
108c49b8 305 return phys_page_find_alloc(index, 0);
92e873b9
FB
306}
307
9fa3e853 308#if !defined(CONFIG_USER_ONLY)
6a00d601 309static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 310static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 311 target_ulong vaddr);
9fa3e853 312#endif
fd6ce8f6 313
6a00d601 314void cpu_exec_init(CPUState *env)
fd6ce8f6 315{
6a00d601
FB
316 CPUState **penv;
317 int cpu_index;
318
fd6ce8f6 319 if (!code_gen_ptr) {
57fec1fe 320 cpu_gen_init();
fd6ce8f6 321 code_gen_ptr = code_gen_buffer;
b346ff46 322 page_init();
33417e70 323 io_mem_init();
fd6ce8f6 324 }
6a00d601
FB
325 env->next_cpu = NULL;
326 penv = &first_cpu;
327 cpu_index = 0;
328 while (*penv != NULL) {
329 penv = (CPUState **)&(*penv)->next_cpu;
330 cpu_index++;
331 }
332 env->cpu_index = cpu_index;
6658ffb8 333 env->nb_watchpoints = 0;
6a00d601 334 *penv = env;
fd6ce8f6
FB
335}
336
9fa3e853
FB
337static inline void invalidate_page_bitmap(PageDesc *p)
338{
339 if (p->code_bitmap) {
59817ccb 340 qemu_free(p->code_bitmap);
9fa3e853
FB
341 p->code_bitmap = NULL;
342 }
343 p->code_write_count = 0;
344}
345
fd6ce8f6
FB
346/* set to NULL all the 'first_tb' fields in all PageDescs */
347static void page_flush_tb(void)
348{
349 int i, j;
350 PageDesc *p;
351
352 for(i = 0; i < L1_SIZE; i++) {
353 p = l1_map[i];
354 if (p) {
9fa3e853
FB
355 for(j = 0; j < L2_SIZE; j++) {
356 p->first_tb = NULL;
357 invalidate_page_bitmap(p);
358 p++;
359 }
fd6ce8f6
FB
360 }
361 }
362}
363
364/* flush all the translation blocks */
d4e8164f 365/* XXX: tb_flush is currently not thread safe */
6a00d601 366void tb_flush(CPUState *env1)
fd6ce8f6 367{
6a00d601 368 CPUState *env;
0124311e 369#if defined(DEBUG_FLUSH)
ab3d1727
BS
370 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
371 (unsigned long)(code_gen_ptr - code_gen_buffer),
372 nb_tbs, nb_tbs > 0 ?
373 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 374#endif
a208e54a
PB
375 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
376 cpu_abort(env1, "Internal error: code buffer overflow\n");
377
fd6ce8f6 378 nb_tbs = 0;
3b46e624 379
6a00d601
FB
380 for(env = first_cpu; env != NULL; env = env->next_cpu) {
381 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
382 }
9fa3e853 383
8a8a608f 384 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 385 page_flush_tb();
9fa3e853 386
fd6ce8f6 387 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
388 /* XXX: flush processor icache at this point if cache flush is
389 expensive */
e3db7226 390 tb_flush_count++;
fd6ce8f6
FB
391}
392
393#ifdef DEBUG_TB_CHECK
394
bc98a7ef 395static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
396{
397 TranslationBlock *tb;
398 int i;
399 address &= TARGET_PAGE_MASK;
99773bd4
PB
400 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
401 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
402 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
403 address >= tb->pc + tb->size)) {
404 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 405 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
406 }
407 }
408 }
409}
410
411/* verify that all the pages have correct rights for code */
412static void tb_page_check(void)
413{
414 TranslationBlock *tb;
415 int i, flags1, flags2;
3b46e624 416
99773bd4
PB
417 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
418 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
419 flags1 = page_get_flags(tb->pc);
420 flags2 = page_get_flags(tb->pc + tb->size - 1);
421 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
422 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 423 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
424 }
425 }
426 }
427}
428
d4e8164f
FB
429void tb_jmp_check(TranslationBlock *tb)
430{
431 TranslationBlock *tb1;
432 unsigned int n1;
433
434 /* suppress any remaining jumps to this TB */
435 tb1 = tb->jmp_first;
436 for(;;) {
437 n1 = (long)tb1 & 3;
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 if (n1 == 2)
440 break;
441 tb1 = tb1->jmp_next[n1];
442 }
443 /* check end of list */
444 if (tb1 != tb) {
445 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
446 }
447}
448
fd6ce8f6
FB
449#endif
450
451/* invalidate one TB */
452static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
453 int next_offset)
454{
455 TranslationBlock *tb1;
456 for(;;) {
457 tb1 = *ptb;
458 if (tb1 == tb) {
459 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
460 break;
461 }
462 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
463 }
464}
465
9fa3e853
FB
466static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
467{
468 TranslationBlock *tb1;
469 unsigned int n1;
470
471 for(;;) {
472 tb1 = *ptb;
473 n1 = (long)tb1 & 3;
474 tb1 = (TranslationBlock *)((long)tb1 & ~3);
475 if (tb1 == tb) {
476 *ptb = tb1->page_next[n1];
477 break;
478 }
479 ptb = &tb1->page_next[n1];
480 }
481}
482
d4e8164f
FB
483static inline void tb_jmp_remove(TranslationBlock *tb, int n)
484{
485 TranslationBlock *tb1, **ptb;
486 unsigned int n1;
487
488 ptb = &tb->jmp_next[n];
489 tb1 = *ptb;
490 if (tb1) {
491 /* find tb(n) in circular list */
492 for(;;) {
493 tb1 = *ptb;
494 n1 = (long)tb1 & 3;
495 tb1 = (TranslationBlock *)((long)tb1 & ~3);
496 if (n1 == n && tb1 == tb)
497 break;
498 if (n1 == 2) {
499 ptb = &tb1->jmp_first;
500 } else {
501 ptb = &tb1->jmp_next[n1];
502 }
503 }
504 /* now we can suppress tb(n) from the list */
505 *ptb = tb->jmp_next[n];
506
507 tb->jmp_next[n] = NULL;
508 }
509}
510
511/* reset the jump entry 'n' of a TB so that it is not chained to
512 another TB */
513static inline void tb_reset_jump(TranslationBlock *tb, int n)
514{
515 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
516}
517
00f82b8a 518static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 519{
6a00d601 520 CPUState *env;
8a40a180 521 PageDesc *p;
d4e8164f 522 unsigned int h, n1;
00f82b8a 523 target_phys_addr_t phys_pc;
8a40a180 524 TranslationBlock *tb1, *tb2;
3b46e624 525
8a40a180
FB
526 /* remove the TB from the hash list */
527 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
528 h = tb_phys_hash_func(phys_pc);
5fafdf24 529 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
530 offsetof(TranslationBlock, phys_hash_next));
531
532 /* remove the TB from the page list */
533 if (tb->page_addr[0] != page_addr) {
534 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
535 tb_page_remove(&p->first_tb, tb);
536 invalidate_page_bitmap(p);
537 }
538 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
539 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
540 tb_page_remove(&p->first_tb, tb);
541 invalidate_page_bitmap(p);
542 }
543
36bdbe54 544 tb_invalidated_flag = 1;
59817ccb 545
fd6ce8f6 546 /* remove the TB from the hash list */
8a40a180 547 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
548 for(env = first_cpu; env != NULL; env = env->next_cpu) {
549 if (env->tb_jmp_cache[h] == tb)
550 env->tb_jmp_cache[h] = NULL;
551 }
d4e8164f
FB
552
553 /* suppress this TB from the two jump lists */
554 tb_jmp_remove(tb, 0);
555 tb_jmp_remove(tb, 1);
556
557 /* suppress any remaining jumps to this TB */
558 tb1 = tb->jmp_first;
559 for(;;) {
560 n1 = (long)tb1 & 3;
561 if (n1 == 2)
562 break;
563 tb1 = (TranslationBlock *)((long)tb1 & ~3);
564 tb2 = tb1->jmp_next[n1];
565 tb_reset_jump(tb1, n1);
566 tb1->jmp_next[n1] = NULL;
567 tb1 = tb2;
568 }
569 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 570
e3db7226 571 tb_phys_invalidate_count++;
9fa3e853
FB
572}
573
574static inline void set_bits(uint8_t *tab, int start, int len)
575{
576 int end, mask, end1;
577
578 end = start + len;
579 tab += start >> 3;
580 mask = 0xff << (start & 7);
581 if ((start & ~7) == (end & ~7)) {
582 if (start < end) {
583 mask &= ~(0xff << (end & 7));
584 *tab |= mask;
585 }
586 } else {
587 *tab++ |= mask;
588 start = (start + 8) & ~7;
589 end1 = end & ~7;
590 while (start < end1) {
591 *tab++ = 0xff;
592 start += 8;
593 }
594 if (start < end) {
595 mask = ~(0xff << (end & 7));
596 *tab |= mask;
597 }
598 }
599}
600
601static void build_page_bitmap(PageDesc *p)
602{
603 int n, tb_start, tb_end;
604 TranslationBlock *tb;
3b46e624 605
59817ccb 606 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
607 if (!p->code_bitmap)
608 return;
609 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
610
611 tb = p->first_tb;
612 while (tb != NULL) {
613 n = (long)tb & 3;
614 tb = (TranslationBlock *)((long)tb & ~3);
615 /* NOTE: this is subtle as a TB may span two physical pages */
616 if (n == 0) {
617 /* NOTE: tb_end may be after the end of the page, but
618 it is not a problem */
619 tb_start = tb->pc & ~TARGET_PAGE_MASK;
620 tb_end = tb_start + tb->size;
621 if (tb_end > TARGET_PAGE_SIZE)
622 tb_end = TARGET_PAGE_SIZE;
623 } else {
624 tb_start = 0;
625 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
626 }
627 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
628 tb = tb->page_next[n];
629 }
630}
631
d720b93d
FB
632#ifdef TARGET_HAS_PRECISE_SMC
633
5fafdf24 634static void tb_gen_code(CPUState *env,
d720b93d
FB
635 target_ulong pc, target_ulong cs_base, int flags,
636 int cflags)
637{
638 TranslationBlock *tb;
639 uint8_t *tc_ptr;
640 target_ulong phys_pc, phys_page2, virt_page2;
641 int code_gen_size;
642
c27004ec
FB
643 phys_pc = get_phys_addr_code(env, pc);
644 tb = tb_alloc(pc);
d720b93d
FB
645 if (!tb) {
646 /* flush must be done */
647 tb_flush(env);
648 /* cannot fail at this point */
c27004ec 649 tb = tb_alloc(pc);
d720b93d
FB
650 }
651 tc_ptr = code_gen_ptr;
652 tb->tc_ptr = tc_ptr;
653 tb->cs_base = cs_base;
654 tb->flags = flags;
655 tb->cflags = cflags;
d07bde88 656 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 657 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 658
d720b93d 659 /* check next page if needed */
c27004ec 660 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 661 phys_page2 = -1;
c27004ec 662 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
663 phys_page2 = get_phys_addr_code(env, virt_page2);
664 }
665 tb_link_phys(tb, phys_pc, phys_page2);
666}
667#endif
3b46e624 668
9fa3e853
FB
669/* invalidate all TBs which intersect with the target physical page
670 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
671 the same physical page. 'is_cpu_write_access' should be true if called
672 from a real cpu write access: the virtual CPU will exit the current
673 TB if code is modified inside this TB. */
00f82b8a 674void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
675 int is_cpu_write_access)
676{
677 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 678 CPUState *env = cpu_single_env;
9fa3e853 679 PageDesc *p;
ea1c1802 680 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 681 target_ulong tb_start, tb_end;
d720b93d 682 target_ulong current_pc, current_cs_base;
9fa3e853
FB
683
684 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 685 if (!p)
9fa3e853 686 return;
5fafdf24 687 if (!p->code_bitmap &&
d720b93d
FB
688 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
689 is_cpu_write_access) {
9fa3e853
FB
690 /* build code bitmap */
691 build_page_bitmap(p);
692 }
693
694 /* we remove all the TBs in the range [start, end[ */
695 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
696 current_tb_not_found = is_cpu_write_access;
697 current_tb_modified = 0;
698 current_tb = NULL; /* avoid warning */
699 current_pc = 0; /* avoid warning */
700 current_cs_base = 0; /* avoid warning */
701 current_flags = 0; /* avoid warning */
9fa3e853
FB
702 tb = p->first_tb;
703 while (tb != NULL) {
704 n = (long)tb & 3;
705 tb = (TranslationBlock *)((long)tb & ~3);
706 tb_next = tb->page_next[n];
707 /* NOTE: this is subtle as a TB may span two physical pages */
708 if (n == 0) {
709 /* NOTE: tb_end may be after the end of the page, but
710 it is not a problem */
711 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
712 tb_end = tb_start + tb->size;
713 } else {
714 tb_start = tb->page_addr[1];
715 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
716 }
717 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
718#ifdef TARGET_HAS_PRECISE_SMC
719 if (current_tb_not_found) {
720 current_tb_not_found = 0;
721 current_tb = NULL;
722 if (env->mem_write_pc) {
723 /* now we have a real cpu fault */
724 current_tb = tb_find_pc(env->mem_write_pc);
725 }
726 }
727 if (current_tb == tb &&
728 !(current_tb->cflags & CF_SINGLE_INSN)) {
729 /* If we are modifying the current TB, we must stop
730 its execution. We could be more precise by checking
731 that the modification is after the current PC, but it
732 would require a specialized function to partially
733 restore the CPU state */
3b46e624 734
d720b93d 735 current_tb_modified = 1;
5fafdf24 736 cpu_restore_state(current_tb, env,
d720b93d
FB
737 env->mem_write_pc, NULL);
738#if defined(TARGET_I386)
739 current_flags = env->hflags;
740 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
741 current_cs_base = (target_ulong)env->segs[R_CS].base;
742 current_pc = current_cs_base + env->eip;
743#else
744#error unsupported CPU
745#endif
746 }
747#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
748 /* we need to do that to handle the case where a signal
749 occurs while doing tb_phys_invalidate() */
750 saved_tb = NULL;
751 if (env) {
752 saved_tb = env->current_tb;
753 env->current_tb = NULL;
754 }
9fa3e853 755 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
756 if (env) {
757 env->current_tb = saved_tb;
758 if (env->interrupt_request && env->current_tb)
759 cpu_interrupt(env, env->interrupt_request);
760 }
9fa3e853
FB
761 }
762 tb = tb_next;
763 }
764#if !defined(CONFIG_USER_ONLY)
765 /* if no code remaining, no need to continue to use slow writes */
766 if (!p->first_tb) {
767 invalidate_page_bitmap(p);
d720b93d
FB
768 if (is_cpu_write_access) {
769 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
770 }
771 }
772#endif
773#ifdef TARGET_HAS_PRECISE_SMC
774 if (current_tb_modified) {
775 /* we generate a block containing just the instruction
776 modifying the memory. It will ensure that it cannot modify
777 itself */
ea1c1802 778 env->current_tb = NULL;
5fafdf24 779 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
780 CF_SINGLE_INSN);
781 cpu_resume_from_signal(env, NULL);
9fa3e853 782 }
fd6ce8f6 783#endif
9fa3e853 784}
fd6ce8f6 785
9fa3e853 786/* len must be <= 8 and start must be a multiple of len */
00f82b8a 787static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
788{
789 PageDesc *p;
790 int offset, b;
59817ccb 791#if 0
a4193c8a
FB
792 if (1) {
793 if (loglevel) {
5fafdf24
TS
794 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
795 cpu_single_env->mem_write_vaddr, len,
796 cpu_single_env->eip,
a4193c8a
FB
797 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
798 }
59817ccb
FB
799 }
800#endif
9fa3e853 801 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 802 if (!p)
9fa3e853
FB
803 return;
804 if (p->code_bitmap) {
805 offset = start & ~TARGET_PAGE_MASK;
806 b = p->code_bitmap[offset >> 3] >> (offset & 7);
807 if (b & ((1 << len) - 1))
808 goto do_invalidate;
809 } else {
810 do_invalidate:
d720b93d 811 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
812 }
813}
814
9fa3e853 815#if !defined(CONFIG_SOFTMMU)
00f82b8a 816static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 817 unsigned long pc, void *puc)
9fa3e853 818{
d720b93d
FB
819 int n, current_flags, current_tb_modified;
820 target_ulong current_pc, current_cs_base;
9fa3e853 821 PageDesc *p;
d720b93d
FB
822 TranslationBlock *tb, *current_tb;
823#ifdef TARGET_HAS_PRECISE_SMC
824 CPUState *env = cpu_single_env;
825#endif
9fa3e853
FB
826
827 addr &= TARGET_PAGE_MASK;
828 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 829 if (!p)
9fa3e853
FB
830 return;
831 tb = p->first_tb;
d720b93d
FB
832 current_tb_modified = 0;
833 current_tb = NULL;
834 current_pc = 0; /* avoid warning */
835 current_cs_base = 0; /* avoid warning */
836 current_flags = 0; /* avoid warning */
837#ifdef TARGET_HAS_PRECISE_SMC
838 if (tb && pc != 0) {
839 current_tb = tb_find_pc(pc);
840 }
841#endif
9fa3e853
FB
842 while (tb != NULL) {
843 n = (long)tb & 3;
844 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
845#ifdef TARGET_HAS_PRECISE_SMC
846 if (current_tb == tb &&
847 !(current_tb->cflags & CF_SINGLE_INSN)) {
848 /* If we are modifying the current TB, we must stop
849 its execution. We could be more precise by checking
850 that the modification is after the current PC, but it
851 would require a specialized function to partially
852 restore the CPU state */
3b46e624 853
d720b93d
FB
854 current_tb_modified = 1;
855 cpu_restore_state(current_tb, env, pc, puc);
856#if defined(TARGET_I386)
857 current_flags = env->hflags;
858 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
859 current_cs_base = (target_ulong)env->segs[R_CS].base;
860 current_pc = current_cs_base + env->eip;
861#else
862#error unsupported CPU
863#endif
864 }
865#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
866 tb_phys_invalidate(tb, addr);
867 tb = tb->page_next[n];
868 }
fd6ce8f6 869 p->first_tb = NULL;
d720b93d
FB
870#ifdef TARGET_HAS_PRECISE_SMC
871 if (current_tb_modified) {
872 /* we generate a block containing just the instruction
873 modifying the memory. It will ensure that it cannot modify
874 itself */
ea1c1802 875 env->current_tb = NULL;
5fafdf24 876 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
877 CF_SINGLE_INSN);
878 cpu_resume_from_signal(env, puc);
879 }
880#endif
fd6ce8f6 881}
9fa3e853 882#endif
fd6ce8f6
FB
883
884/* add the tb in the target page and protect it if necessary */
5fafdf24 885static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 886 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
887{
888 PageDesc *p;
9fa3e853
FB
889 TranslationBlock *last_first_tb;
890
891 tb->page_addr[n] = page_addr;
3a7d929e 892 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
893 tb->page_next[n] = p->first_tb;
894 last_first_tb = p->first_tb;
895 p->first_tb = (TranslationBlock *)((long)tb | n);
896 invalidate_page_bitmap(p);
fd6ce8f6 897
107db443 898#if defined(TARGET_HAS_SMC) || 1
d720b93d 899
9fa3e853 900#if defined(CONFIG_USER_ONLY)
fd6ce8f6 901 if (p->flags & PAGE_WRITE) {
53a5960a
PB
902 target_ulong addr;
903 PageDesc *p2;
9fa3e853
FB
904 int prot;
905
fd6ce8f6
FB
906 /* force the host page as non writable (writes will have a
907 page fault + mprotect overhead) */
53a5960a 908 page_addr &= qemu_host_page_mask;
fd6ce8f6 909 prot = 0;
53a5960a
PB
910 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
911 addr += TARGET_PAGE_SIZE) {
912
913 p2 = page_find (addr >> TARGET_PAGE_BITS);
914 if (!p2)
915 continue;
916 prot |= p2->flags;
917 p2->flags &= ~PAGE_WRITE;
918 page_get_flags(addr);
919 }
5fafdf24 920 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
921 (prot & PAGE_BITS) & ~PAGE_WRITE);
922#ifdef DEBUG_TB_INVALIDATE
ab3d1727 923 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 924 page_addr);
fd6ce8f6 925#endif
fd6ce8f6 926 }
9fa3e853
FB
927#else
928 /* if some code is already present, then the pages are already
929 protected. So we handle the case where only the first TB is
930 allocated in a physical page */
931 if (!last_first_tb) {
6a00d601 932 tlb_protect_code(page_addr);
9fa3e853
FB
933 }
934#endif
d720b93d
FB
935
936#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
937}
938
939/* Allocate a new translation block. Flush the translation buffer if
940 too many translation blocks or too much generated code. */
c27004ec 941TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
942{
943 TranslationBlock *tb;
fd6ce8f6 944
5fafdf24 945 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 946 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 947 return NULL;
fd6ce8f6
FB
948 tb = &tbs[nb_tbs++];
949 tb->pc = pc;
b448f2f3 950 tb->cflags = 0;
d4e8164f
FB
951 return tb;
952}
953
9fa3e853
FB
954/* add a new TB and link it to the physical page tables. phys_page2 is
955 (-1) to indicate that only one page contains the TB. */
5fafdf24 956void tb_link_phys(TranslationBlock *tb,
9fa3e853 957 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 958{
9fa3e853
FB
959 unsigned int h;
960 TranslationBlock **ptb;
961
962 /* add in the physical hash table */
963 h = tb_phys_hash_func(phys_pc);
964 ptb = &tb_phys_hash[h];
965 tb->phys_hash_next = *ptb;
966 *ptb = tb;
fd6ce8f6
FB
967
968 /* add in the page list */
9fa3e853
FB
969 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
970 if (phys_page2 != -1)
971 tb_alloc_page(tb, 1, phys_page2);
972 else
973 tb->page_addr[1] = -1;
9fa3e853 974
d4e8164f
FB
975 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
976 tb->jmp_next[0] = NULL;
977 tb->jmp_next[1] = NULL;
978
979 /* init original jump addresses */
980 if (tb->tb_next_offset[0] != 0xffff)
981 tb_reset_jump(tb, 0);
982 if (tb->tb_next_offset[1] != 0xffff)
983 tb_reset_jump(tb, 1);
8a40a180
FB
984
985#ifdef DEBUG_TB_CHECK
986 tb_page_check();
987#endif
fd6ce8f6
FB
988}
989
9fa3e853
FB
990/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
991 tb[1].tc_ptr. Return NULL if not found */
992TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 993{
9fa3e853
FB
994 int m_min, m_max, m;
995 unsigned long v;
996 TranslationBlock *tb;
a513fe19
FB
997
998 if (nb_tbs <= 0)
999 return NULL;
1000 if (tc_ptr < (unsigned long)code_gen_buffer ||
1001 tc_ptr >= (unsigned long)code_gen_ptr)
1002 return NULL;
1003 /* binary search (cf Knuth) */
1004 m_min = 0;
1005 m_max = nb_tbs - 1;
1006 while (m_min <= m_max) {
1007 m = (m_min + m_max) >> 1;
1008 tb = &tbs[m];
1009 v = (unsigned long)tb->tc_ptr;
1010 if (v == tc_ptr)
1011 return tb;
1012 else if (tc_ptr < v) {
1013 m_max = m - 1;
1014 } else {
1015 m_min = m + 1;
1016 }
5fafdf24 1017 }
a513fe19
FB
1018 return &tbs[m_max];
1019}
7501267e 1020
ea041c0e
FB
1021static void tb_reset_jump_recursive(TranslationBlock *tb);
1022
1023static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1024{
1025 TranslationBlock *tb1, *tb_next, **ptb;
1026 unsigned int n1;
1027
1028 tb1 = tb->jmp_next[n];
1029 if (tb1 != NULL) {
1030 /* find head of list */
1031 for(;;) {
1032 n1 = (long)tb1 & 3;
1033 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1034 if (n1 == 2)
1035 break;
1036 tb1 = tb1->jmp_next[n1];
1037 }
1038 /* we are now sure now that tb jumps to tb1 */
1039 tb_next = tb1;
1040
1041 /* remove tb from the jmp_first list */
1042 ptb = &tb_next->jmp_first;
1043 for(;;) {
1044 tb1 = *ptb;
1045 n1 = (long)tb1 & 3;
1046 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1047 if (n1 == n && tb1 == tb)
1048 break;
1049 ptb = &tb1->jmp_next[n1];
1050 }
1051 *ptb = tb->jmp_next[n];
1052 tb->jmp_next[n] = NULL;
3b46e624 1053
ea041c0e
FB
1054 /* suppress the jump to next tb in generated code */
1055 tb_reset_jump(tb, n);
1056
0124311e 1057 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1058 tb_reset_jump_recursive(tb_next);
1059 }
1060}
1061
1062static void tb_reset_jump_recursive(TranslationBlock *tb)
1063{
1064 tb_reset_jump_recursive2(tb, 0);
1065 tb_reset_jump_recursive2(tb, 1);
1066}
1067
1fddef4b 1068#if defined(TARGET_HAS_ICE)
d720b93d
FB
1069static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1070{
9b3c35e0
JM
1071 target_phys_addr_t addr;
1072 target_ulong pd;
c2f07f81
PB
1073 ram_addr_t ram_addr;
1074 PhysPageDesc *p;
d720b93d 1075
c2f07f81
PB
1076 addr = cpu_get_phys_page_debug(env, pc);
1077 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1078 if (!p) {
1079 pd = IO_MEM_UNASSIGNED;
1080 } else {
1081 pd = p->phys_offset;
1082 }
1083 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1084 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1085}
c27004ec 1086#endif
d720b93d 1087
6658ffb8
PB
1088/* Add a watchpoint. */
1089int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1090{
1091 int i;
1092
1093 for (i = 0; i < env->nb_watchpoints; i++) {
1094 if (addr == env->watchpoint[i].vaddr)
1095 return 0;
1096 }
1097 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1098 return -1;
1099
1100 i = env->nb_watchpoints++;
1101 env->watchpoint[i].vaddr = addr;
1102 tlb_flush_page(env, addr);
1103 /* FIXME: This flush is needed because of the hack to make memory ops
1104 terminate the TB. It can be removed once the proper IO trap and
1105 re-execute bits are in. */
1106 tb_flush(env);
1107 return i;
1108}
1109
1110/* Remove a watchpoint. */
1111int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1112{
1113 int i;
1114
1115 for (i = 0; i < env->nb_watchpoints; i++) {
1116 if (addr == env->watchpoint[i].vaddr) {
1117 env->nb_watchpoints--;
1118 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1119 tlb_flush_page(env, addr);
1120 return 0;
1121 }
1122 }
1123 return -1;
1124}
1125
c33a346e
FB
1126/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1127 breakpoint is reached */
2e12669a 1128int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1129{
1fddef4b 1130#if defined(TARGET_HAS_ICE)
4c3a88a2 1131 int i;
3b46e624 1132
4c3a88a2
FB
1133 for(i = 0; i < env->nb_breakpoints; i++) {
1134 if (env->breakpoints[i] == pc)
1135 return 0;
1136 }
1137
1138 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1139 return -1;
1140 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1141
d720b93d 1142 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1143 return 0;
1144#else
1145 return -1;
1146#endif
1147}
1148
1149/* remove a breakpoint */
2e12669a 1150int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1151{
1fddef4b 1152#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1153 int i;
1154 for(i = 0; i < env->nb_breakpoints; i++) {
1155 if (env->breakpoints[i] == pc)
1156 goto found;
1157 }
1158 return -1;
1159 found:
4c3a88a2 1160 env->nb_breakpoints--;
1fddef4b
FB
1161 if (i < env->nb_breakpoints)
1162 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1163
1164 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1165 return 0;
1166#else
1167 return -1;
1168#endif
1169}
1170
c33a346e
FB
1171/* enable or disable single step mode. EXCP_DEBUG is returned by the
1172 CPU loop after each instruction */
1173void cpu_single_step(CPUState *env, int enabled)
1174{
1fddef4b 1175#if defined(TARGET_HAS_ICE)
c33a346e
FB
1176 if (env->singlestep_enabled != enabled) {
1177 env->singlestep_enabled = enabled;
1178 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1179 /* XXX: only flush what is necessary */
0124311e 1180 tb_flush(env);
c33a346e
FB
1181 }
1182#endif
1183}
1184
34865134
FB
1185/* enable or disable low levels log */
1186void cpu_set_log(int log_flags)
1187{
1188 loglevel = log_flags;
1189 if (loglevel && !logfile) {
11fcfab4 1190 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1191 if (!logfile) {
1192 perror(logfilename);
1193 _exit(1);
1194 }
9fa3e853
FB
1195#if !defined(CONFIG_SOFTMMU)
1196 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1197 {
1198 static uint8_t logfile_buf[4096];
1199 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1200 }
1201#else
34865134 1202 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1203#endif
e735b91c
PB
1204 log_append = 1;
1205 }
1206 if (!loglevel && logfile) {
1207 fclose(logfile);
1208 logfile = NULL;
34865134
FB
1209 }
1210}
1211
1212void cpu_set_log_filename(const char *filename)
1213{
1214 logfilename = strdup(filename);
e735b91c
PB
1215 if (logfile) {
1216 fclose(logfile);
1217 logfile = NULL;
1218 }
1219 cpu_set_log(loglevel);
34865134 1220}
c33a346e 1221
0124311e 1222/* mask must never be zero, except for A20 change call */
68a79315 1223void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1224{
1225 TranslationBlock *tb;
15a51156 1226 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1227
68a79315 1228 env->interrupt_request |= mask;
ea041c0e
FB
1229 /* if the cpu is currently executing code, we must unlink it and
1230 all the potentially executing TB */
1231 tb = env->current_tb;
ee8b7021
FB
1232 if (tb && !testandset(&interrupt_lock)) {
1233 env->current_tb = NULL;
ea041c0e 1234 tb_reset_jump_recursive(tb);
15a51156 1235 resetlock(&interrupt_lock);
ea041c0e
FB
1236 }
1237}
1238
b54ad049
FB
1239void cpu_reset_interrupt(CPUState *env, int mask)
1240{
1241 env->interrupt_request &= ~mask;
1242}
1243
f193c797 1244CPULogItem cpu_log_items[] = {
5fafdf24 1245 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1246 "show generated host assembly code for each compiled TB" },
1247 { CPU_LOG_TB_IN_ASM, "in_asm",
1248 "show target assembly code for each compiled TB" },
5fafdf24 1249 { CPU_LOG_TB_OP, "op",
57fec1fe 1250 "show micro ops for each compiled TB" },
f193c797 1251 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1252 "show micro ops "
1253#ifdef TARGET_I386
1254 "before eflags optimization and "
f193c797 1255#endif
e01a1157 1256 "after liveness analysis" },
f193c797
FB
1257 { CPU_LOG_INT, "int",
1258 "show interrupts/exceptions in short format" },
1259 { CPU_LOG_EXEC, "exec",
1260 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1261 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1262 "show CPU state before block translation" },
f193c797
FB
1263#ifdef TARGET_I386
1264 { CPU_LOG_PCALL, "pcall",
1265 "show protected mode far calls/returns/exceptions" },
1266#endif
8e3a9fd2 1267#ifdef DEBUG_IOPORT
fd872598
FB
1268 { CPU_LOG_IOPORT, "ioport",
1269 "show all i/o ports accesses" },
8e3a9fd2 1270#endif
f193c797
FB
1271 { 0, NULL, NULL },
1272};
1273
1274static int cmp1(const char *s1, int n, const char *s2)
1275{
1276 if (strlen(s2) != n)
1277 return 0;
1278 return memcmp(s1, s2, n) == 0;
1279}
3b46e624 1280
f193c797
FB
1281/* takes a comma separated list of log masks. Return 0 if error. */
1282int cpu_str_to_log_mask(const char *str)
1283{
1284 CPULogItem *item;
1285 int mask;
1286 const char *p, *p1;
1287
1288 p = str;
1289 mask = 0;
1290 for(;;) {
1291 p1 = strchr(p, ',');
1292 if (!p1)
1293 p1 = p + strlen(p);
8e3a9fd2
FB
1294 if(cmp1(p,p1-p,"all")) {
1295 for(item = cpu_log_items; item->mask != 0; item++) {
1296 mask |= item->mask;
1297 }
1298 } else {
f193c797
FB
1299 for(item = cpu_log_items; item->mask != 0; item++) {
1300 if (cmp1(p, p1 - p, item->name))
1301 goto found;
1302 }
1303 return 0;
8e3a9fd2 1304 }
f193c797
FB
1305 found:
1306 mask |= item->mask;
1307 if (*p1 != ',')
1308 break;
1309 p = p1 + 1;
1310 }
1311 return mask;
1312}
ea041c0e 1313
7501267e
FB
1314void cpu_abort(CPUState *env, const char *fmt, ...)
1315{
1316 va_list ap;
493ae1f0 1317 va_list ap2;
7501267e
FB
1318
1319 va_start(ap, fmt);
493ae1f0 1320 va_copy(ap2, ap);
7501267e
FB
1321 fprintf(stderr, "qemu: fatal: ");
1322 vfprintf(stderr, fmt, ap);
1323 fprintf(stderr, "\n");
1324#ifdef TARGET_I386
0573fbfc
TS
1325 if(env->intercept & INTERCEPT_SVM_MASK) {
1326 /* most probably the virtual machine should not
1327 be shut down but rather caught by the VMM */
1328 vmexit(SVM_EXIT_SHUTDOWN, 0);
1329 }
7fe48483
FB
1330 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1331#else
1332 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1333#endif
924edcae 1334 if (logfile) {
f9373291 1335 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1336 vfprintf(logfile, fmt, ap2);
f9373291
JM
1337 fprintf(logfile, "\n");
1338#ifdef TARGET_I386
1339 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1340#else
1341 cpu_dump_state(env, logfile, fprintf, 0);
1342#endif
924edcae
AZ
1343 fflush(logfile);
1344 fclose(logfile);
1345 }
493ae1f0 1346 va_end(ap2);
f9373291 1347 va_end(ap);
7501267e
FB
1348 abort();
1349}
1350
c5be9f08
TS
1351CPUState *cpu_copy(CPUState *env)
1352{
01ba9816 1353 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1354 /* preserve chaining and index */
1355 CPUState *next_cpu = new_env->next_cpu;
1356 int cpu_index = new_env->cpu_index;
1357 memcpy(new_env, env, sizeof(CPUState));
1358 new_env->next_cpu = next_cpu;
1359 new_env->cpu_index = cpu_index;
1360 return new_env;
1361}
1362
0124311e
FB
1363#if !defined(CONFIG_USER_ONLY)
1364
ee8b7021
FB
1365/* NOTE: if flush_global is true, also flush global entries (not
1366 implemented yet) */
1367void tlb_flush(CPUState *env, int flush_global)
33417e70 1368{
33417e70 1369 int i;
0124311e 1370
9fa3e853
FB
1371#if defined(DEBUG_TLB)
1372 printf("tlb_flush:\n");
1373#endif
0124311e
FB
1374 /* must reset current TB so that interrupts cannot modify the
1375 links while we are modifying them */
1376 env->current_tb = NULL;
1377
33417e70 1378 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1379 env->tlb_table[0][i].addr_read = -1;
1380 env->tlb_table[0][i].addr_write = -1;
1381 env->tlb_table[0][i].addr_code = -1;
1382 env->tlb_table[1][i].addr_read = -1;
1383 env->tlb_table[1][i].addr_write = -1;
1384 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1385#if (NB_MMU_MODES >= 3)
1386 env->tlb_table[2][i].addr_read = -1;
1387 env->tlb_table[2][i].addr_write = -1;
1388 env->tlb_table[2][i].addr_code = -1;
1389#if (NB_MMU_MODES == 4)
1390 env->tlb_table[3][i].addr_read = -1;
1391 env->tlb_table[3][i].addr_write = -1;
1392 env->tlb_table[3][i].addr_code = -1;
1393#endif
1394#endif
33417e70 1395 }
9fa3e853 1396
8a40a180 1397 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1398
1399#if !defined(CONFIG_SOFTMMU)
1400 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1401#endif
1402#ifdef USE_KQEMU
1403 if (env->kqemu_enabled) {
1404 kqemu_flush(env, flush_global);
1405 }
9fa3e853 1406#endif
e3db7226 1407 tlb_flush_count++;
33417e70
FB
1408}
1409
274da6b2 1410static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1411{
5fafdf24 1412 if (addr == (tlb_entry->addr_read &
84b7b8e7 1413 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1414 addr == (tlb_entry->addr_write &
84b7b8e7 1415 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1416 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1417 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1418 tlb_entry->addr_read = -1;
1419 tlb_entry->addr_write = -1;
1420 tlb_entry->addr_code = -1;
1421 }
61382a50
FB
1422}
1423
2e12669a 1424void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1425{
8a40a180 1426 int i;
9fa3e853 1427 TranslationBlock *tb;
0124311e 1428
9fa3e853 1429#if defined(DEBUG_TLB)
108c49b8 1430 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1431#endif
0124311e
FB
1432 /* must reset current TB so that interrupts cannot modify the
1433 links while we are modifying them */
1434 env->current_tb = NULL;
61382a50
FB
1435
1436 addr &= TARGET_PAGE_MASK;
1437 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1438 tlb_flush_entry(&env->tlb_table[0][i], addr);
1439 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1440#if (NB_MMU_MODES >= 3)
1441 tlb_flush_entry(&env->tlb_table[2][i], addr);
1442#if (NB_MMU_MODES == 4)
1443 tlb_flush_entry(&env->tlb_table[3][i], addr);
1444#endif
1445#endif
0124311e 1446
b362e5e0
PB
1447 /* Discard jump cache entries for any tb which might potentially
1448 overlap the flushed page. */
1449 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1450 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1451
1452 i = tb_jmp_cache_hash_page(addr);
1453 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
9fa3e853 1454
0124311e 1455#if !defined(CONFIG_SOFTMMU)
9fa3e853 1456 if (addr < MMAP_AREA_END)
0124311e 1457 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1458#endif
0a962c02
FB
1459#ifdef USE_KQEMU
1460 if (env->kqemu_enabled) {
1461 kqemu_flush_page(env, addr);
1462 }
1463#endif
9fa3e853
FB
1464}
1465
9fa3e853
FB
1466/* update the TLBs so that writes to code in the virtual page 'addr'
1467 can be detected */
6a00d601 1468static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1469{
5fafdf24 1470 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1471 ram_addr + TARGET_PAGE_SIZE,
1472 CODE_DIRTY_FLAG);
9fa3e853
FB
1473}
1474
9fa3e853 1475/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1476 tested for self modifying code */
5fafdf24 1477static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1478 target_ulong vaddr)
9fa3e853 1479{
3a7d929e 1480 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1481}
1482
5fafdf24 1483static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1484 unsigned long start, unsigned long length)
1485{
1486 unsigned long addr;
84b7b8e7
FB
1487 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1488 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1489 if ((addr - start) < length) {
84b7b8e7 1490 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1491 }
1492 }
1493}
1494
3a7d929e 1495void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1496 int dirty_flags)
1ccde1cb
FB
1497{
1498 CPUState *env;
4f2ac237 1499 unsigned long length, start1;
0a962c02
FB
1500 int i, mask, len;
1501 uint8_t *p;
1ccde1cb
FB
1502
1503 start &= TARGET_PAGE_MASK;
1504 end = TARGET_PAGE_ALIGN(end);
1505
1506 length = end - start;
1507 if (length == 0)
1508 return;
0a962c02 1509 len = length >> TARGET_PAGE_BITS;
3a7d929e 1510#ifdef USE_KQEMU
6a00d601
FB
1511 /* XXX: should not depend on cpu context */
1512 env = first_cpu;
3a7d929e 1513 if (env->kqemu_enabled) {
f23db169
FB
1514 ram_addr_t addr;
1515 addr = start;
1516 for(i = 0; i < len; i++) {
1517 kqemu_set_notdirty(env, addr);
1518 addr += TARGET_PAGE_SIZE;
1519 }
3a7d929e
FB
1520 }
1521#endif
f23db169
FB
1522 mask = ~dirty_flags;
1523 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1524 for(i = 0; i < len; i++)
1525 p[i] &= mask;
1526
1ccde1cb
FB
1527 /* we modify the TLB cache so that the dirty bit will be set again
1528 when accessing the range */
59817ccb 1529 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1530 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1531 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1532 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1533 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1534 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1535#if (NB_MMU_MODES >= 3)
1536 for(i = 0; i < CPU_TLB_SIZE; i++)
1537 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1538#if (NB_MMU_MODES == 4)
1539 for(i = 0; i < CPU_TLB_SIZE; i++)
1540 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1541#endif
1542#endif
6a00d601 1543 }
59817ccb
FB
1544
1545#if !defined(CONFIG_SOFTMMU)
1546 /* XXX: this is expensive */
1547 {
1548 VirtPageDesc *p;
1549 int j;
1550 target_ulong addr;
1551
1552 for(i = 0; i < L1_SIZE; i++) {
1553 p = l1_virt_map[i];
1554 if (p) {
1555 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1556 for(j = 0; j < L2_SIZE; j++) {
1557 if (p->valid_tag == virt_valid_tag &&
1558 p->phys_addr >= start && p->phys_addr < end &&
1559 (p->prot & PROT_WRITE)) {
1560 if (addr < MMAP_AREA_END) {
5fafdf24 1561 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1562 p->prot & ~PROT_WRITE);
1563 }
1564 }
1565 addr += TARGET_PAGE_SIZE;
1566 p++;
1567 }
1568 }
1569 }
1570 }
1571#endif
1ccde1cb
FB
1572}
1573
3a7d929e
FB
1574static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1575{
1576 ram_addr_t ram_addr;
1577
84b7b8e7 1578 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1579 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1580 tlb_entry->addend - (unsigned long)phys_ram_base;
1581 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1582 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1583 }
1584 }
1585}
1586
1587/* update the TLB according to the current state of the dirty bits */
1588void cpu_tlb_update_dirty(CPUState *env)
1589{
1590 int i;
1591 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1592 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1593 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1594 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1595#if (NB_MMU_MODES >= 3)
1596 for(i = 0; i < CPU_TLB_SIZE; i++)
1597 tlb_update_dirty(&env->tlb_table[2][i]);
1598#if (NB_MMU_MODES == 4)
1599 for(i = 0; i < CPU_TLB_SIZE; i++)
1600 tlb_update_dirty(&env->tlb_table[3][i]);
1601#endif
1602#endif
3a7d929e
FB
1603}
1604
5fafdf24 1605static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1606 unsigned long start)
1ccde1cb
FB
1607{
1608 unsigned long addr;
84b7b8e7
FB
1609 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1610 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1611 if (addr == start) {
84b7b8e7 1612 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1613 }
1614 }
1615}
1616
1617/* update the TLB corresponding to virtual page vaddr and phys addr
1618 addr so that it is no longer dirty */
6a00d601
FB
1619static inline void tlb_set_dirty(CPUState *env,
1620 unsigned long addr, target_ulong vaddr)
1ccde1cb 1621{
1ccde1cb
FB
1622 int i;
1623
1ccde1cb
FB
1624 addr &= TARGET_PAGE_MASK;
1625 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1626 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1627 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1628#if (NB_MMU_MODES >= 3)
1629 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1630#if (NB_MMU_MODES == 4)
1631 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1632#endif
1633#endif
9fa3e853
FB
1634}
1635
59817ccb
FB
1636/* add a new TLB entry. At most one entry for a given virtual address
1637 is permitted. Return 0 if OK or 2 if the page could not be mapped
1638 (can only happen in non SOFTMMU mode for I/O pages or pages
1639 conflicting with the host address space). */
5fafdf24
TS
1640int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1641 target_phys_addr_t paddr, int prot,
6ebbf390 1642 int mmu_idx, int is_softmmu)
9fa3e853 1643{
92e873b9 1644 PhysPageDesc *p;
4f2ac237 1645 unsigned long pd;
9fa3e853 1646 unsigned int index;
4f2ac237 1647 target_ulong address;
108c49b8 1648 target_phys_addr_t addend;
9fa3e853 1649 int ret;
84b7b8e7 1650 CPUTLBEntry *te;
6658ffb8 1651 int i;
9fa3e853 1652
92e873b9 1653 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1654 if (!p) {
1655 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1656 } else {
1657 pd = p->phys_offset;
9fa3e853
FB
1658 }
1659#if defined(DEBUG_TLB)
6ebbf390
JM
1660 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1661 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1662#endif
1663
1664 ret = 0;
1665#if !defined(CONFIG_SOFTMMU)
5fafdf24 1666 if (is_softmmu)
9fa3e853
FB
1667#endif
1668 {
2a4188a3 1669 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1670 /* IO memory case */
1671 address = vaddr | pd;
1672 addend = paddr;
1673 } else {
1674 /* standard memory */
1675 address = vaddr;
1676 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1677 }
6658ffb8
PB
1678
1679 /* Make accesses to pages with watchpoints go via the
1680 watchpoint trap routines. */
1681 for (i = 0; i < env->nb_watchpoints; i++) {
1682 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1683 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1684 env->watchpoint[i].addend = 0;
6658ffb8
PB
1685 address = vaddr | io_mem_watch;
1686 } else {
d79acba4
AZ
1687 env->watchpoint[i].addend = pd - paddr +
1688 (unsigned long) phys_ram_base;
6658ffb8
PB
1689 /* TODO: Figure out how to make read watchpoints coexist
1690 with code. */
1691 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1692 }
1693 }
1694 }
d79acba4 1695
90f18422 1696 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1697 addend -= vaddr;
6ebbf390 1698 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1699 te->addend = addend;
67b915a5 1700 if (prot & PAGE_READ) {
84b7b8e7
FB
1701 te->addr_read = address;
1702 } else {
1703 te->addr_read = -1;
1704 }
1705 if (prot & PAGE_EXEC) {
1706 te->addr_code = address;
9fa3e853 1707 } else {
84b7b8e7 1708 te->addr_code = -1;
9fa3e853 1709 }
67b915a5 1710 if (prot & PAGE_WRITE) {
5fafdf24 1711 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1712 (pd & IO_MEM_ROMD)) {
1713 /* write access calls the I/O callback */
5fafdf24 1714 te->addr_write = vaddr |
856074ec 1715 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1716 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1717 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1718 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1719 } else {
84b7b8e7 1720 te->addr_write = address;
9fa3e853
FB
1721 }
1722 } else {
84b7b8e7 1723 te->addr_write = -1;
9fa3e853
FB
1724 }
1725 }
1726#if !defined(CONFIG_SOFTMMU)
1727 else {
1728 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1729 /* IO access: no mapping is done as it will be handled by the
1730 soft MMU */
1731 if (!(env->hflags & HF_SOFTMMU_MASK))
1732 ret = 2;
1733 } else {
1734 void *map_addr;
59817ccb
FB
1735
1736 if (vaddr >= MMAP_AREA_END) {
1737 ret = 2;
1738 } else {
1739 if (prot & PROT_WRITE) {
5fafdf24 1740 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1741#if defined(TARGET_HAS_SMC) || 1
59817ccb 1742 first_tb ||
d720b93d 1743#endif
5fafdf24 1744 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1745 !cpu_physical_memory_is_dirty(pd))) {
1746 /* ROM: we do as if code was inside */
1747 /* if code is present, we only map as read only and save the
1748 original mapping */
1749 VirtPageDesc *vp;
3b46e624 1750
90f18422 1751 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1752 vp->phys_addr = pd;
1753 vp->prot = prot;
1754 vp->valid_tag = virt_valid_tag;
1755 prot &= ~PAGE_WRITE;
1756 }
1757 }
5fafdf24 1758 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1759 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1760 if (map_addr == MAP_FAILED) {
1761 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1762 paddr, vaddr);
9fa3e853 1763 }
9fa3e853
FB
1764 }
1765 }
1766 }
1767#endif
1768 return ret;
1769}
1770
1771/* called from signal handler: invalidate the code and unprotect the
1772 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1773int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1774{
1775#if !defined(CONFIG_SOFTMMU)
1776 VirtPageDesc *vp;
1777
1778#if defined(DEBUG_TLB)
1779 printf("page_unprotect: addr=0x%08x\n", addr);
1780#endif
1781 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1782
1783 /* if it is not mapped, no need to worry here */
1784 if (addr >= MMAP_AREA_END)
1785 return 0;
9fa3e853
FB
1786 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1787 if (!vp)
1788 return 0;
1789 /* NOTE: in this case, validate_tag is _not_ tested as it
1790 validates only the code TLB */
1791 if (vp->valid_tag != virt_valid_tag)
1792 return 0;
1793 if (!(vp->prot & PAGE_WRITE))
1794 return 0;
1795#if defined(DEBUG_TLB)
5fafdf24 1796 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1797 addr, vp->phys_addr, vp->prot);
1798#endif
59817ccb
FB
1799 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1800 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1801 (unsigned long)addr, vp->prot);
d720b93d 1802 /* set the dirty bit */
0a962c02 1803 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1804 /* flush the code inside */
1805 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1806 return 1;
1807#else
1808 return 0;
1809#endif
33417e70
FB
1810}
1811
0124311e
FB
1812#else
1813
ee8b7021 1814void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1815{
1816}
1817
2e12669a 1818void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1819{
1820}
1821
5fafdf24
TS
1822int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1823 target_phys_addr_t paddr, int prot,
6ebbf390 1824 int mmu_idx, int is_softmmu)
9fa3e853
FB
1825{
1826 return 0;
1827}
0124311e 1828
9fa3e853
FB
1829/* dump memory mappings */
1830void page_dump(FILE *f)
33417e70 1831{
9fa3e853
FB
1832 unsigned long start, end;
1833 int i, j, prot, prot1;
1834 PageDesc *p;
33417e70 1835
9fa3e853
FB
1836 fprintf(f, "%-8s %-8s %-8s %s\n",
1837 "start", "end", "size", "prot");
1838 start = -1;
1839 end = -1;
1840 prot = 0;
1841 for(i = 0; i <= L1_SIZE; i++) {
1842 if (i < L1_SIZE)
1843 p = l1_map[i];
1844 else
1845 p = NULL;
1846 for(j = 0;j < L2_SIZE; j++) {
1847 if (!p)
1848 prot1 = 0;
1849 else
1850 prot1 = p[j].flags;
1851 if (prot1 != prot) {
1852 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1853 if (start != -1) {
1854 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1855 start, end, end - start,
9fa3e853
FB
1856 prot & PAGE_READ ? 'r' : '-',
1857 prot & PAGE_WRITE ? 'w' : '-',
1858 prot & PAGE_EXEC ? 'x' : '-');
1859 }
1860 if (prot1 != 0)
1861 start = end;
1862 else
1863 start = -1;
1864 prot = prot1;
1865 }
1866 if (!p)
1867 break;
1868 }
33417e70 1869 }
33417e70
FB
1870}
1871
53a5960a 1872int page_get_flags(target_ulong address)
33417e70 1873{
9fa3e853
FB
1874 PageDesc *p;
1875
1876 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1877 if (!p)
9fa3e853
FB
1878 return 0;
1879 return p->flags;
1880}
1881
1882/* modify the flags of a page and invalidate the code if
1883 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1884 depending on PAGE_WRITE */
53a5960a 1885void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1886{
1887 PageDesc *p;
53a5960a 1888 target_ulong addr;
9fa3e853
FB
1889
1890 start = start & TARGET_PAGE_MASK;
1891 end = TARGET_PAGE_ALIGN(end);
1892 if (flags & PAGE_WRITE)
1893 flags |= PAGE_WRITE_ORG;
1894 spin_lock(&tb_lock);
1895 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1896 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1897 /* if the write protection is set, then we invalidate the code
1898 inside */
5fafdf24 1899 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1900 (flags & PAGE_WRITE) &&
1901 p->first_tb) {
d720b93d 1902 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1903 }
1904 p->flags = flags;
1905 }
1906 spin_unlock(&tb_lock);
33417e70
FB
1907}
1908
3d97b40b
TS
1909int page_check_range(target_ulong start, target_ulong len, int flags)
1910{
1911 PageDesc *p;
1912 target_ulong end;
1913 target_ulong addr;
1914
1915 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1916 start = start & TARGET_PAGE_MASK;
1917
1918 if( end < start )
1919 /* we've wrapped around */
1920 return -1;
1921 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1922 p = page_find(addr >> TARGET_PAGE_BITS);
1923 if( !p )
1924 return -1;
1925 if( !(p->flags & PAGE_VALID) )
1926 return -1;
1927
dae3270c 1928 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1929 return -1;
dae3270c
FB
1930 if (flags & PAGE_WRITE) {
1931 if (!(p->flags & PAGE_WRITE_ORG))
1932 return -1;
1933 /* unprotect the page if it was put read-only because it
1934 contains translated code */
1935 if (!(p->flags & PAGE_WRITE)) {
1936 if (!page_unprotect(addr, 0, NULL))
1937 return -1;
1938 }
1939 return 0;
1940 }
3d97b40b
TS
1941 }
1942 return 0;
1943}
1944
9fa3e853
FB
1945/* called from signal handler: invalidate the code and unprotect the
1946 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1947int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1948{
1949 unsigned int page_index, prot, pindex;
1950 PageDesc *p, *p1;
53a5960a 1951 target_ulong host_start, host_end, addr;
9fa3e853 1952
83fb7adf 1953 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1954 page_index = host_start >> TARGET_PAGE_BITS;
1955 p1 = page_find(page_index);
1956 if (!p1)
1957 return 0;
83fb7adf 1958 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1959 p = p1;
1960 prot = 0;
1961 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1962 prot |= p->flags;
1963 p++;
1964 }
1965 /* if the page was really writable, then we change its
1966 protection back to writable */
1967 if (prot & PAGE_WRITE_ORG) {
1968 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1969 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 1970 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1971 (prot & PAGE_BITS) | PAGE_WRITE);
1972 p1[pindex].flags |= PAGE_WRITE;
1973 /* and since the content will be modified, we must invalidate
1974 the corresponding translated code. */
d720b93d 1975 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1976#ifdef DEBUG_TB_CHECK
1977 tb_invalidate_check(address);
1978#endif
1979 return 1;
1980 }
1981 }
1982 return 0;
1983}
1984
6a00d601
FB
1985static inline void tlb_set_dirty(CPUState *env,
1986 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1987{
1988}
9fa3e853
FB
1989#endif /* defined(CONFIG_USER_ONLY) */
1990
db7b5426 1991static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
1992 ram_addr_t memory);
1993static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
1994 ram_addr_t orig_memory);
db7b5426
BS
1995#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1996 need_subpage) \
1997 do { \
1998 if (addr > start_addr) \
1999 start_addr2 = 0; \
2000 else { \
2001 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2002 if (start_addr2 > 0) \
2003 need_subpage = 1; \
2004 } \
2005 \
49e9fba2 2006 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2007 end_addr2 = TARGET_PAGE_SIZE - 1; \
2008 else { \
2009 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2010 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2011 need_subpage = 1; \
2012 } \
2013 } while (0)
2014
33417e70
FB
2015/* register physical memory. 'size' must be a multiple of the target
2016 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2017 io memory page */
5fafdf24 2018void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2019 ram_addr_t size,
2020 ram_addr_t phys_offset)
33417e70 2021{
108c49b8 2022 target_phys_addr_t addr, end_addr;
92e873b9 2023 PhysPageDesc *p;
9d42037b 2024 CPUState *env;
00f82b8a 2025 ram_addr_t orig_size = size;
db7b5426 2026 void *subpage;
33417e70 2027
5fd386f6 2028 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2029 end_addr = start_addr + (target_phys_addr_t)size;
2030 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2031 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2032 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2033 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2034 target_phys_addr_t start_addr2, end_addr2;
2035 int need_subpage = 0;
2036
2037 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2038 need_subpage);
4254fab8 2039 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2040 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2041 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2042 &p->phys_offset, orig_memory);
2043 } else {
2044 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2045 >> IO_MEM_SHIFT];
2046 }
2047 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2048 } else {
2049 p->phys_offset = phys_offset;
2050 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2051 (phys_offset & IO_MEM_ROMD))
2052 phys_offset += TARGET_PAGE_SIZE;
2053 }
2054 } else {
2055 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2056 p->phys_offset = phys_offset;
2057 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2058 (phys_offset & IO_MEM_ROMD))
2059 phys_offset += TARGET_PAGE_SIZE;
2060 else {
2061 target_phys_addr_t start_addr2, end_addr2;
2062 int need_subpage = 0;
2063
2064 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2065 end_addr2, need_subpage);
2066
4254fab8 2067 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2068 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2069 &p->phys_offset, IO_MEM_UNASSIGNED);
2070 subpage_register(subpage, start_addr2, end_addr2,
2071 phys_offset);
2072 }
2073 }
2074 }
33417e70 2075 }
3b46e624 2076
9d42037b
FB
2077 /* since each CPU stores ram addresses in its TLB cache, we must
2078 reset the modified entries */
2079 /* XXX: slow ! */
2080 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2081 tlb_flush(env, 1);
2082 }
33417e70
FB
2083}
2084
ba863458 2085/* XXX: temporary until new memory mapping API */
00f82b8a 2086ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2087{
2088 PhysPageDesc *p;
2089
2090 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2091 if (!p)
2092 return IO_MEM_UNASSIGNED;
2093 return p->phys_offset;
2094}
2095
e9a1ab19 2096/* XXX: better than nothing */
00f82b8a 2097ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2098{
2099 ram_addr_t addr;
7fb4fdcf 2100 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
00f82b8a 2101 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
03875444 2102 size, phys_ram_size);
e9a1ab19
FB
2103 abort();
2104 }
2105 addr = phys_ram_alloc_offset;
2106 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2107 return addr;
2108}
2109
2110void qemu_ram_free(ram_addr_t addr)
2111{
2112}
2113
a4193c8a 2114static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2115{
67d3b957 2116#ifdef DEBUG_UNASSIGNED
ab3d1727 2117 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2118#endif
2119#ifdef TARGET_SPARC
6c36d3fa 2120 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2121#elif TARGET_CRIS
2122 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2123#endif
33417e70
FB
2124 return 0;
2125}
2126
a4193c8a 2127static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2128{
67d3b957 2129#ifdef DEBUG_UNASSIGNED
ab3d1727 2130 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2131#endif
b4f0a316 2132#ifdef TARGET_SPARC
6c36d3fa 2133 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2134#elif TARGET_CRIS
2135 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2136#endif
33417e70
FB
2137}
2138
2139static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2140 unassigned_mem_readb,
2141 unassigned_mem_readb,
2142 unassigned_mem_readb,
2143};
2144
2145static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2146 unassigned_mem_writeb,
2147 unassigned_mem_writeb,
2148 unassigned_mem_writeb,
2149};
2150
3a7d929e 2151static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2152{
3a7d929e
FB
2153 unsigned long ram_addr;
2154 int dirty_flags;
2155 ram_addr = addr - (unsigned long)phys_ram_base;
2156 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2157 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2158#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2159 tb_invalidate_phys_page_fast(ram_addr, 1);
2160 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2161#endif
3a7d929e 2162 }
c27004ec 2163 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2164#ifdef USE_KQEMU
2165 if (cpu_single_env->kqemu_enabled &&
2166 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2167 kqemu_modify_page(cpu_single_env, ram_addr);
2168#endif
f23db169
FB
2169 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2170 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2171 /* we remove the notdirty callback only if the code has been
2172 flushed */
2173 if (dirty_flags == 0xff)
6a00d601 2174 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2175}
2176
3a7d929e 2177static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2178{
3a7d929e
FB
2179 unsigned long ram_addr;
2180 int dirty_flags;
2181 ram_addr = addr - (unsigned long)phys_ram_base;
2182 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2183 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2184#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2185 tb_invalidate_phys_page_fast(ram_addr, 2);
2186 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2187#endif
3a7d929e 2188 }
c27004ec 2189 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2190#ifdef USE_KQEMU
2191 if (cpu_single_env->kqemu_enabled &&
2192 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2193 kqemu_modify_page(cpu_single_env, ram_addr);
2194#endif
f23db169
FB
2195 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2196 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2197 /* we remove the notdirty callback only if the code has been
2198 flushed */
2199 if (dirty_flags == 0xff)
6a00d601 2200 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2201}
2202
3a7d929e 2203static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2204{
3a7d929e
FB
2205 unsigned long ram_addr;
2206 int dirty_flags;
2207 ram_addr = addr - (unsigned long)phys_ram_base;
2208 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2209 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2210#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2211 tb_invalidate_phys_page_fast(ram_addr, 4);
2212 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2213#endif
3a7d929e 2214 }
c27004ec 2215 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2216#ifdef USE_KQEMU
2217 if (cpu_single_env->kqemu_enabled &&
2218 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2219 kqemu_modify_page(cpu_single_env, ram_addr);
2220#endif
f23db169
FB
2221 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2222 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2223 /* we remove the notdirty callback only if the code has been
2224 flushed */
2225 if (dirty_flags == 0xff)
6a00d601 2226 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2227}
2228
3a7d929e 2229static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2230 NULL, /* never used */
2231 NULL, /* never used */
2232 NULL, /* never used */
2233};
2234
1ccde1cb
FB
2235static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2236 notdirty_mem_writeb,
2237 notdirty_mem_writew,
2238 notdirty_mem_writel,
2239};
2240
6658ffb8
PB
2241#if defined(CONFIG_SOFTMMU)
2242/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2243 so these check for a hit then pass through to the normal out-of-line
2244 phys routines. */
2245static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2246{
2247 return ldub_phys(addr);
2248}
2249
2250static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2251{
2252 return lduw_phys(addr);
2253}
2254
2255static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2256{
2257 return ldl_phys(addr);
2258}
2259
2260/* Generate a debug exception if a watchpoint has been hit.
2261 Returns the real physical address of the access. addr will be a host
d79acba4 2262 address in case of a RAM location. */
6658ffb8
PB
2263static target_ulong check_watchpoint(target_phys_addr_t addr)
2264{
2265 CPUState *env = cpu_single_env;
2266 target_ulong watch;
2267 target_ulong retaddr;
2268 int i;
2269
2270 retaddr = addr;
2271 for (i = 0; i < env->nb_watchpoints; i++) {
2272 watch = env->watchpoint[i].vaddr;
2273 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2274 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2275 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2276 cpu_single_env->watchpoint_hit = i + 1;
2277 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2278 break;
2279 }
2280 }
2281 }
2282 return retaddr;
2283}
2284
2285static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2286 uint32_t val)
2287{
2288 addr = check_watchpoint(addr);
2289 stb_phys(addr, val);
2290}
2291
2292static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2293 uint32_t val)
2294{
2295 addr = check_watchpoint(addr);
2296 stw_phys(addr, val);
2297}
2298
2299static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2300 uint32_t val)
2301{
2302 addr = check_watchpoint(addr);
2303 stl_phys(addr, val);
2304}
2305
2306static CPUReadMemoryFunc *watch_mem_read[3] = {
2307 watch_mem_readb,
2308 watch_mem_readw,
2309 watch_mem_readl,
2310};
2311
2312static CPUWriteMemoryFunc *watch_mem_write[3] = {
2313 watch_mem_writeb,
2314 watch_mem_writew,
2315 watch_mem_writel,
2316};
2317#endif
2318
db7b5426
BS
2319static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2320 unsigned int len)
2321{
db7b5426
BS
2322 uint32_t ret;
2323 unsigned int idx;
2324
2325 idx = SUBPAGE_IDX(addr - mmio->base);
2326#if defined(DEBUG_SUBPAGE)
2327 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2328 mmio, len, addr, idx);
2329#endif
3ee89922 2330 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2331
2332 return ret;
2333}
2334
2335static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2336 uint32_t value, unsigned int len)
2337{
db7b5426
BS
2338 unsigned int idx;
2339
2340 idx = SUBPAGE_IDX(addr - mmio->base);
2341#if defined(DEBUG_SUBPAGE)
2342 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2343 mmio, len, addr, idx, value);
2344#endif
3ee89922 2345 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2346}
2347
2348static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2349{
2350#if defined(DEBUG_SUBPAGE)
2351 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2352#endif
2353
2354 return subpage_readlen(opaque, addr, 0);
2355}
2356
2357static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2358 uint32_t value)
2359{
2360#if defined(DEBUG_SUBPAGE)
2361 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2362#endif
2363 subpage_writelen(opaque, addr, value, 0);
2364}
2365
2366static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2367{
2368#if defined(DEBUG_SUBPAGE)
2369 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2370#endif
2371
2372 return subpage_readlen(opaque, addr, 1);
2373}
2374
2375static void subpage_writew (void *opaque, target_phys_addr_t addr,
2376 uint32_t value)
2377{
2378#if defined(DEBUG_SUBPAGE)
2379 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2380#endif
2381 subpage_writelen(opaque, addr, value, 1);
2382}
2383
2384static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2385{
2386#if defined(DEBUG_SUBPAGE)
2387 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2388#endif
2389
2390 return subpage_readlen(opaque, addr, 2);
2391}
2392
2393static void subpage_writel (void *opaque,
2394 target_phys_addr_t addr, uint32_t value)
2395{
2396#if defined(DEBUG_SUBPAGE)
2397 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2398#endif
2399 subpage_writelen(opaque, addr, value, 2);
2400}
2401
2402static CPUReadMemoryFunc *subpage_read[] = {
2403 &subpage_readb,
2404 &subpage_readw,
2405 &subpage_readl,
2406};
2407
2408static CPUWriteMemoryFunc *subpage_write[] = {
2409 &subpage_writeb,
2410 &subpage_writew,
2411 &subpage_writel,
2412};
2413
2414static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2415 ram_addr_t memory)
db7b5426
BS
2416{
2417 int idx, eidx;
4254fab8 2418 unsigned int i;
db7b5426
BS
2419
2420 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2421 return -1;
2422 idx = SUBPAGE_IDX(start);
2423 eidx = SUBPAGE_IDX(end);
2424#if defined(DEBUG_SUBPAGE)
2425 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2426 mmio, start, end, idx, eidx, memory);
2427#endif
2428 memory >>= IO_MEM_SHIFT;
2429 for (; idx <= eidx; idx++) {
4254fab8 2430 for (i = 0; i < 4; i++) {
3ee89922
BS
2431 if (io_mem_read[memory][i]) {
2432 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2433 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2434 }
2435 if (io_mem_write[memory][i]) {
2436 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2437 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2438 }
4254fab8 2439 }
db7b5426
BS
2440 }
2441
2442 return 0;
2443}
2444
00f82b8a
AJ
2445static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2446 ram_addr_t orig_memory)
db7b5426
BS
2447{
2448 subpage_t *mmio;
2449 int subpage_memory;
2450
2451 mmio = qemu_mallocz(sizeof(subpage_t));
2452 if (mmio != NULL) {
2453 mmio->base = base;
2454 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2455#if defined(DEBUG_SUBPAGE)
2456 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2457 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2458#endif
2459 *phys = subpage_memory | IO_MEM_SUBPAGE;
2460 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2461 }
2462
2463 return mmio;
2464}
2465
33417e70
FB
2466static void io_mem_init(void)
2467{
3a7d929e 2468 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2469 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2470 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2471 io_mem_nb = 5;
2472
6658ffb8
PB
2473#if defined(CONFIG_SOFTMMU)
2474 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2475 watch_mem_write, NULL);
2476#endif
1ccde1cb 2477 /* alloc dirty bits array */
0a962c02 2478 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2479 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2480}
2481
2482/* mem_read and mem_write are arrays of functions containing the
2483 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2484 2). Functions can be omitted with a NULL function pointer. The
2485 registered functions may be modified dynamically later.
2486 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2487 modified. If it is zero, a new io zone is allocated. The return
2488 value can be used with cpu_register_physical_memory(). (-1) is
2489 returned if error. */
33417e70
FB
2490int cpu_register_io_memory(int io_index,
2491 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2492 CPUWriteMemoryFunc **mem_write,
2493 void *opaque)
33417e70 2494{
4254fab8 2495 int i, subwidth = 0;
33417e70
FB
2496
2497 if (io_index <= 0) {
b5ff1b31 2498 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2499 return -1;
2500 io_index = io_mem_nb++;
2501 } else {
2502 if (io_index >= IO_MEM_NB_ENTRIES)
2503 return -1;
2504 }
b5ff1b31 2505
33417e70 2506 for(i = 0;i < 3; i++) {
4254fab8
BS
2507 if (!mem_read[i] || !mem_write[i])
2508 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2509 io_mem_read[io_index][i] = mem_read[i];
2510 io_mem_write[io_index][i] = mem_write[i];
2511 }
a4193c8a 2512 io_mem_opaque[io_index] = opaque;
4254fab8 2513 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2514}
61382a50 2515
8926b517
FB
2516CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2517{
2518 return io_mem_write[io_index >> IO_MEM_SHIFT];
2519}
2520
2521CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2522{
2523 return io_mem_read[io_index >> IO_MEM_SHIFT];
2524}
2525
13eb76e0
FB
2526/* physical memory access (slow version, mainly for debug) */
2527#if defined(CONFIG_USER_ONLY)
5fafdf24 2528void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2529 int len, int is_write)
2530{
2531 int l, flags;
2532 target_ulong page;
53a5960a 2533 void * p;
13eb76e0
FB
2534
2535 while (len > 0) {
2536 page = addr & TARGET_PAGE_MASK;
2537 l = (page + TARGET_PAGE_SIZE) - addr;
2538 if (l > len)
2539 l = len;
2540 flags = page_get_flags(page);
2541 if (!(flags & PAGE_VALID))
2542 return;
2543 if (is_write) {
2544 if (!(flags & PAGE_WRITE))
2545 return;
579a97f7
FB
2546 /* XXX: this code should not depend on lock_user */
2547 if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2548 /* FIXME - should this return an error rather than just fail? */
2549 return;
53a5960a
PB
2550 memcpy(p, buf, len);
2551 unlock_user(p, addr, len);
13eb76e0
FB
2552 } else {
2553 if (!(flags & PAGE_READ))
2554 return;
579a97f7
FB
2555 /* XXX: this code should not depend on lock_user */
2556 if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2557 /* FIXME - should this return an error rather than just fail? */
2558 return;
53a5960a
PB
2559 memcpy(buf, p, len);
2560 unlock_user(p, addr, 0);
13eb76e0
FB
2561 }
2562 len -= l;
2563 buf += l;
2564 addr += l;
2565 }
2566}
8df1cd07 2567
13eb76e0 2568#else
5fafdf24 2569void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2570 int len, int is_write)
2571{
2572 int l, io_index;
2573 uint8_t *ptr;
2574 uint32_t val;
2e12669a
FB
2575 target_phys_addr_t page;
2576 unsigned long pd;
92e873b9 2577 PhysPageDesc *p;
3b46e624 2578
13eb76e0
FB
2579 while (len > 0) {
2580 page = addr & TARGET_PAGE_MASK;
2581 l = (page + TARGET_PAGE_SIZE) - addr;
2582 if (l > len)
2583 l = len;
92e873b9 2584 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2585 if (!p) {
2586 pd = IO_MEM_UNASSIGNED;
2587 } else {
2588 pd = p->phys_offset;
2589 }
3b46e624 2590
13eb76e0 2591 if (is_write) {
3a7d929e 2592 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2593 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2594 /* XXX: could force cpu_single_env to NULL to avoid
2595 potential bugs */
13eb76e0 2596 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2597 /* 32 bit write access */
c27004ec 2598 val = ldl_p(buf);
a4193c8a 2599 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2600 l = 4;
2601 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2602 /* 16 bit write access */
c27004ec 2603 val = lduw_p(buf);
a4193c8a 2604 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2605 l = 2;
2606 } else {
1c213d19 2607 /* 8 bit write access */
c27004ec 2608 val = ldub_p(buf);
a4193c8a 2609 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2610 l = 1;
2611 }
2612 } else {
b448f2f3
FB
2613 unsigned long addr1;
2614 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2615 /* RAM case */
b448f2f3 2616 ptr = phys_ram_base + addr1;
13eb76e0 2617 memcpy(ptr, buf, l);
3a7d929e
FB
2618 if (!cpu_physical_memory_is_dirty(addr1)) {
2619 /* invalidate code */
2620 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2621 /* set dirty bit */
5fafdf24 2622 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2623 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2624 }
13eb76e0
FB
2625 }
2626 } else {
5fafdf24 2627 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2628 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2629 /* I/O case */
2630 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2631 if (l >= 4 && ((addr & 3) == 0)) {
2632 /* 32 bit read access */
a4193c8a 2633 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2634 stl_p(buf, val);
13eb76e0
FB
2635 l = 4;
2636 } else if (l >= 2 && ((addr & 1) == 0)) {
2637 /* 16 bit read access */
a4193c8a 2638 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2639 stw_p(buf, val);
13eb76e0
FB
2640 l = 2;
2641 } else {
1c213d19 2642 /* 8 bit read access */
a4193c8a 2643 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2644 stb_p(buf, val);
13eb76e0
FB
2645 l = 1;
2646 }
2647 } else {
2648 /* RAM case */
5fafdf24 2649 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2650 (addr & ~TARGET_PAGE_MASK);
2651 memcpy(buf, ptr, l);
2652 }
2653 }
2654 len -= l;
2655 buf += l;
2656 addr += l;
2657 }
2658}
8df1cd07 2659
d0ecd2aa 2660/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2661void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2662 const uint8_t *buf, int len)
2663{
2664 int l;
2665 uint8_t *ptr;
2666 target_phys_addr_t page;
2667 unsigned long pd;
2668 PhysPageDesc *p;
3b46e624 2669
d0ecd2aa
FB
2670 while (len > 0) {
2671 page = addr & TARGET_PAGE_MASK;
2672 l = (page + TARGET_PAGE_SIZE) - addr;
2673 if (l > len)
2674 l = len;
2675 p = phys_page_find(page >> TARGET_PAGE_BITS);
2676 if (!p) {
2677 pd = IO_MEM_UNASSIGNED;
2678 } else {
2679 pd = p->phys_offset;
2680 }
3b46e624 2681
d0ecd2aa 2682 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2683 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2684 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2685 /* do nothing */
2686 } else {
2687 unsigned long addr1;
2688 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2689 /* ROM/RAM case */
2690 ptr = phys_ram_base + addr1;
2691 memcpy(ptr, buf, l);
2692 }
2693 len -= l;
2694 buf += l;
2695 addr += l;
2696 }
2697}
2698
2699
8df1cd07
FB
2700/* warning: addr must be aligned */
2701uint32_t ldl_phys(target_phys_addr_t addr)
2702{
2703 int io_index;
2704 uint8_t *ptr;
2705 uint32_t val;
2706 unsigned long pd;
2707 PhysPageDesc *p;
2708
2709 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2710 if (!p) {
2711 pd = IO_MEM_UNASSIGNED;
2712 } else {
2713 pd = p->phys_offset;
2714 }
3b46e624 2715
5fafdf24 2716 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2717 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2718 /* I/O case */
2719 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2720 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2721 } else {
2722 /* RAM case */
5fafdf24 2723 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2724 (addr & ~TARGET_PAGE_MASK);
2725 val = ldl_p(ptr);
2726 }
2727 return val;
2728}
2729
84b7b8e7
FB
2730/* warning: addr must be aligned */
2731uint64_t ldq_phys(target_phys_addr_t addr)
2732{
2733 int io_index;
2734 uint8_t *ptr;
2735 uint64_t val;
2736 unsigned long pd;
2737 PhysPageDesc *p;
2738
2739 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2740 if (!p) {
2741 pd = IO_MEM_UNASSIGNED;
2742 } else {
2743 pd = p->phys_offset;
2744 }
3b46e624 2745
2a4188a3
FB
2746 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2747 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2748 /* I/O case */
2749 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2750#ifdef TARGET_WORDS_BIGENDIAN
2751 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2752 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2753#else
2754 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2755 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2756#endif
2757 } else {
2758 /* RAM case */
5fafdf24 2759 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2760 (addr & ~TARGET_PAGE_MASK);
2761 val = ldq_p(ptr);
2762 }
2763 return val;
2764}
2765
aab33094
FB
2766/* XXX: optimize */
2767uint32_t ldub_phys(target_phys_addr_t addr)
2768{
2769 uint8_t val;
2770 cpu_physical_memory_read(addr, &val, 1);
2771 return val;
2772}
2773
2774/* XXX: optimize */
2775uint32_t lduw_phys(target_phys_addr_t addr)
2776{
2777 uint16_t val;
2778 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2779 return tswap16(val);
2780}
2781
8df1cd07
FB
2782/* warning: addr must be aligned. The ram page is not masked as dirty
2783 and the code inside is not invalidated. It is useful if the dirty
2784 bits are used to track modified PTEs */
2785void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2786{
2787 int io_index;
2788 uint8_t *ptr;
2789 unsigned long pd;
2790 PhysPageDesc *p;
2791
2792 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2793 if (!p) {
2794 pd = IO_MEM_UNASSIGNED;
2795 } else {
2796 pd = p->phys_offset;
2797 }
3b46e624 2798
3a7d929e 2799 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2800 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2801 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2802 } else {
5fafdf24 2803 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2804 (addr & ~TARGET_PAGE_MASK);
2805 stl_p(ptr, val);
2806 }
2807}
2808
bc98a7ef
JM
2809void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2810{
2811 int io_index;
2812 uint8_t *ptr;
2813 unsigned long pd;
2814 PhysPageDesc *p;
2815
2816 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2817 if (!p) {
2818 pd = IO_MEM_UNASSIGNED;
2819 } else {
2820 pd = p->phys_offset;
2821 }
3b46e624 2822
bc98a7ef
JM
2823 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2824 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2825#ifdef TARGET_WORDS_BIGENDIAN
2826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2827 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2828#else
2829 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2830 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2831#endif
2832 } else {
5fafdf24 2833 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2834 (addr & ~TARGET_PAGE_MASK);
2835 stq_p(ptr, val);
2836 }
2837}
2838
8df1cd07 2839/* warning: addr must be aligned */
8df1cd07
FB
2840void stl_phys(target_phys_addr_t addr, uint32_t val)
2841{
2842 int io_index;
2843 uint8_t *ptr;
2844 unsigned long pd;
2845 PhysPageDesc *p;
2846
2847 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2848 if (!p) {
2849 pd = IO_MEM_UNASSIGNED;
2850 } else {
2851 pd = p->phys_offset;
2852 }
3b46e624 2853
3a7d929e 2854 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2855 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2856 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2857 } else {
2858 unsigned long addr1;
2859 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2860 /* RAM case */
2861 ptr = phys_ram_base + addr1;
2862 stl_p(ptr, val);
3a7d929e
FB
2863 if (!cpu_physical_memory_is_dirty(addr1)) {
2864 /* invalidate code */
2865 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2866 /* set dirty bit */
f23db169
FB
2867 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2868 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2869 }
8df1cd07
FB
2870 }
2871}
2872
aab33094
FB
2873/* XXX: optimize */
2874void stb_phys(target_phys_addr_t addr, uint32_t val)
2875{
2876 uint8_t v = val;
2877 cpu_physical_memory_write(addr, &v, 1);
2878}
2879
2880/* XXX: optimize */
2881void stw_phys(target_phys_addr_t addr, uint32_t val)
2882{
2883 uint16_t v = tswap16(val);
2884 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2885}
2886
2887/* XXX: optimize */
2888void stq_phys(target_phys_addr_t addr, uint64_t val)
2889{
2890 val = tswap64(val);
2891 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2892}
2893
13eb76e0
FB
2894#endif
2895
2896/* virtual memory access for debug */
5fafdf24 2897int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2898 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2899{
2900 int l;
9b3c35e0
JM
2901 target_phys_addr_t phys_addr;
2902 target_ulong page;
13eb76e0
FB
2903
2904 while (len > 0) {
2905 page = addr & TARGET_PAGE_MASK;
2906 phys_addr = cpu_get_phys_page_debug(env, page);
2907 /* if no physical page mapped, return an error */
2908 if (phys_addr == -1)
2909 return -1;
2910 l = (page + TARGET_PAGE_SIZE) - addr;
2911 if (l > len)
2912 l = len;
5fafdf24 2913 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2914 buf, l, is_write);
13eb76e0
FB
2915 len -= l;
2916 buf += l;
2917 addr += l;
2918 }
2919 return 0;
2920}
2921
e3db7226
FB
2922void dump_exec_info(FILE *f,
2923 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2924{
2925 int i, target_code_size, max_target_code_size;
2926 int direct_jmp_count, direct_jmp2_count, cross_page;
2927 TranslationBlock *tb;
3b46e624 2928
e3db7226
FB
2929 target_code_size = 0;
2930 max_target_code_size = 0;
2931 cross_page = 0;
2932 direct_jmp_count = 0;
2933 direct_jmp2_count = 0;
2934 for(i = 0; i < nb_tbs; i++) {
2935 tb = &tbs[i];
2936 target_code_size += tb->size;
2937 if (tb->size > max_target_code_size)
2938 max_target_code_size = tb->size;
2939 if (tb->page_addr[1] != -1)
2940 cross_page++;
2941 if (tb->tb_next_offset[0] != 0xffff) {
2942 direct_jmp_count++;
2943 if (tb->tb_next_offset[1] != 0xffff) {
2944 direct_jmp2_count++;
2945 }
2946 }
2947 }
2948 /* XXX: avoid using doubles ? */
57fec1fe 2949 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2950 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2951 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2952 nb_tbs ? target_code_size / nb_tbs : 0,
2953 max_target_code_size);
5fafdf24 2954 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2955 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2956 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
2957 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2958 cross_page,
e3db7226
FB
2959 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2960 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 2961 direct_jmp_count,
e3db7226
FB
2962 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2963 direct_jmp2_count,
2964 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 2965 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
2966 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2967 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2968 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
2969#ifdef CONFIG_PROFILER
2970 {
2971 int64_t tot;
2972 tot = dyngen_interm_time + dyngen_code_time;
2973 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2974 tot, tot / 2.4e9);
2975 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
2976 dyngen_tb_count,
2977 dyngen_tb_count1 - dyngen_tb_count,
2978 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2979 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
2980 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2981 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
2982 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2983 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
2984 dyngen_tb_count ?
2985 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2986 cpu_fprintf(f, "cycles/op %0.1f\n",
2987 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2988 cpu_fprintf(f, "cycles/in byte %0.1f\n",
2989 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2990 cpu_fprintf(f, "cycles/out byte %0.1f\n",
2991 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2992 if (tot == 0)
2993 tot = 1;
2994 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
2995 (double)dyngen_interm_time / tot * 100.0);
2996 cpu_fprintf(f, " gen_code time %0.1f%%\n",
2997 (double)dyngen_code_time / tot * 100.0);
2998 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
2999 dyngen_restore_count);
3000 cpu_fprintf(f, " avg cycles %0.1f\n",
3001 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3002 {
3003 extern void dump_op_count(void);
3004 dump_op_count();
3005 }
3006 }
3007#endif
e3db7226
FB
3008}
3009
5fafdf24 3010#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3011
3012#define MMUSUFFIX _cmmu
3013#define GETPC() NULL
3014#define env cpu_single_env
b769d8fe 3015#define SOFTMMU_CODE_ACCESS
61382a50
FB
3016
3017#define SHIFT 0
3018#include "softmmu_template.h"
3019
3020#define SHIFT 1
3021#include "softmmu_template.h"
3022
3023#define SHIFT 2
3024#include "softmmu_template.h"
3025
3026#define SHIFT 3
3027#include "softmmu_template.h"
3028
3029#undef env
3030
3031#endif