2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 static TranslationBlock
*tbs
;
83 int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 uint8_t *phys_ram_dirty
;
115 static int in_migration
;
117 typedef struct RAMBlock
{
121 struct RAMBlock
*next
;
124 static RAMBlock
*ram_blocks
;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset
;
132 /* current CPU in the current thread. It is only valid inside
134 CPUState
*cpu_single_env
;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
143 typedef struct PageDesc
{
144 /* list of TBs intersecting this ram page */
145 TranslationBlock
*first_tb
;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count
;
149 uint8_t *code_bitmap
;
150 #if defined(CONFIG_USER_ONLY)
155 typedef struct PhysPageDesc
{
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset
;
158 ram_addr_t region_offset
;
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
175 unsigned long qemu_real_host_page_size
;
176 unsigned long qemu_host_page_bits
;
177 unsigned long qemu_host_page_size
;
178 unsigned long qemu_host_page_mask
;
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc
*l1_map
[L1_SIZE
];
183 #if !defined(CONFIG_USER_ONLY)
184 static PhysPageDesc
**l1_phys_map
;
186 static void io_mem_init(void);
188 /* io memory support */
189 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
190 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
191 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
192 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
193 static int io_mem_watch
;
198 static const char *logfilename
= "qemu.log";
200 static const char *logfilename
= "/tmp/qemu.log";
204 static int log_append
= 0;
207 static int tlb_flush_count
;
208 static int tb_flush_count
;
209 static int tb_phys_invalidate_count
;
211 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
212 typedef struct subpage_t
{
213 target_phys_addr_t base
;
214 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
215 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
216 void *opaque
[TARGET_PAGE_SIZE
][2][4];
217 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
221 static void map_exec(void *addr
, long size
)
224 VirtualProtect(addr
, size
,
225 PAGE_EXECUTE_READWRITE
, &old_protect
);
229 static void map_exec(void *addr
, long size
)
231 unsigned long start
, end
, page_size
;
233 page_size
= getpagesize();
234 start
= (unsigned long)addr
;
235 start
&= ~(page_size
- 1);
237 end
= (unsigned long)addr
+ size
;
238 end
+= page_size
- 1;
239 end
&= ~(page_size
- 1);
241 mprotect((void *)start
, end
- start
,
242 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
246 static void page_init(void)
248 /* NOTE: we can always suppose that qemu_host_page_size >=
252 SYSTEM_INFO system_info
;
254 GetSystemInfo(&system_info
);
255 qemu_real_host_page_size
= system_info
.dwPageSize
;
258 qemu_real_host_page_size
= getpagesize();
260 if (qemu_host_page_size
== 0)
261 qemu_host_page_size
= qemu_real_host_page_size
;
262 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
263 qemu_host_page_size
= TARGET_PAGE_SIZE
;
264 qemu_host_page_bits
= 0;
265 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
266 qemu_host_page_bits
++;
267 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
268 #if !defined(CONFIG_USER_ONLY)
269 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
270 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
273 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
275 long long startaddr
, endaddr
;
280 last_brk
= (unsigned long)sbrk(0);
281 f
= fopen("/proc/self/maps", "r");
284 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
286 startaddr
= MIN(startaddr
,
287 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
288 endaddr
= MIN(endaddr
,
289 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
290 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
291 TARGET_PAGE_ALIGN(endaddr
),
302 static inline PageDesc
**page_l1_map(target_ulong index
)
304 #if TARGET_LONG_BITS > 32
305 /* Host memory outside guest VM. For 32-bit targets we have already
306 excluded high addresses. */
307 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
310 return &l1_map
[index
>> L2_BITS
];
313 static inline PageDesc
*page_find_alloc(target_ulong index
)
316 lp
= page_l1_map(index
);
322 /* allocate if not found */
323 #if defined(CONFIG_USER_ONLY)
324 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
325 /* Don't use qemu_malloc because it may recurse. */
326 p
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
327 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
330 unsigned long addr
= h2g(p
);
331 page_set_flags(addr
& TARGET_PAGE_MASK
,
332 TARGET_PAGE_ALIGN(addr
+ len
),
336 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
340 return p
+ (index
& (L2_SIZE
- 1));
343 static inline PageDesc
*page_find(target_ulong index
)
346 lp
= page_l1_map(index
);
354 return p
+ (index
& (L2_SIZE
- 1));
357 #if !defined(CONFIG_USER_ONLY)
358 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
363 p
= (void **)l1_phys_map
;
364 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
366 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
367 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
369 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
372 /* allocate if not found */
375 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
376 memset(p
, 0, sizeof(void *) * L1_SIZE
);
380 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
384 /* allocate if not found */
387 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
389 for (i
= 0; i
< L2_SIZE
; i
++) {
390 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
391 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
394 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
397 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
399 return phys_page_find_alloc(index
, 0);
402 static void tlb_protect_code(ram_addr_t ram_addr
);
403 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
405 #define mmap_lock() do { } while(0)
406 #define mmap_unlock() do { } while(0)
409 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
411 #if defined(CONFIG_USER_ONLY)
412 /* Currently it is not recommended to allocate big chunks of data in
413 user mode. It will change when a dedicated libc will be used */
414 #define USE_STATIC_CODE_GEN_BUFFER
417 #ifdef USE_STATIC_CODE_GEN_BUFFER
418 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
421 static void code_gen_alloc(unsigned long tb_size
)
423 #ifdef USE_STATIC_CODE_GEN_BUFFER
424 code_gen_buffer
= static_code_gen_buffer
;
425 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
426 map_exec(code_gen_buffer
, code_gen_buffer_size
);
428 code_gen_buffer_size
= tb_size
;
429 if (code_gen_buffer_size
== 0) {
430 #if defined(CONFIG_USER_ONLY)
431 /* in user mode, phys_ram_size is not meaningful */
432 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
434 /* XXX: needs adjustments */
435 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
438 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
439 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
440 /* The code gen buffer location may have constraints depending on
441 the host cpu and OS */
442 #if defined(__linux__)
447 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
448 #if defined(__x86_64__)
450 /* Cannot map more than that */
451 if (code_gen_buffer_size
> (800 * 1024 * 1024))
452 code_gen_buffer_size
= (800 * 1024 * 1024);
453 #elif defined(__sparc_v9__)
454 // Map the buffer below 2G, so we can use direct calls and branches
456 start
= (void *) 0x60000000UL
;
457 if (code_gen_buffer_size
> (512 * 1024 * 1024))
458 code_gen_buffer_size
= (512 * 1024 * 1024);
459 #elif defined(__arm__)
460 /* Map the buffer below 32M, so we can use direct calls and branches */
462 start
= (void *) 0x01000000UL
;
463 if (code_gen_buffer_size
> 16 * 1024 * 1024)
464 code_gen_buffer_size
= 16 * 1024 * 1024;
466 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
467 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
469 if (code_gen_buffer
== MAP_FAILED
) {
470 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
474 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
478 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
479 #if defined(__x86_64__)
480 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
481 * 0x40000000 is free */
483 addr
= (void *)0x40000000;
484 /* Cannot map more than that */
485 if (code_gen_buffer_size
> (800 * 1024 * 1024))
486 code_gen_buffer_size
= (800 * 1024 * 1024);
488 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
489 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
491 if (code_gen_buffer
== MAP_FAILED
) {
492 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
497 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
498 map_exec(code_gen_buffer
, code_gen_buffer_size
);
500 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
501 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
502 code_gen_buffer_max_size
= code_gen_buffer_size
-
503 code_gen_max_block_size();
504 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
505 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
508 /* Must be called before using the QEMU cpus. 'tb_size' is the size
509 (in bytes) allocated to the translation buffer. Zero means default
511 void cpu_exec_init_all(unsigned long tb_size
)
514 code_gen_alloc(tb_size
);
515 code_gen_ptr
= code_gen_buffer
;
517 #if !defined(CONFIG_USER_ONLY)
522 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
524 static void cpu_common_pre_save(void *opaque
)
526 CPUState
*env
= opaque
;
528 cpu_synchronize_state(env
);
531 static int cpu_common_pre_load(void *opaque
)
533 CPUState
*env
= opaque
;
535 cpu_synchronize_state(env
);
539 static int cpu_common_post_load(void *opaque
, int version_id
)
541 CPUState
*env
= opaque
;
543 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
544 version_id is increased. */
545 env
->interrupt_request
&= ~0x01;
551 static const VMStateDescription vmstate_cpu_common
= {
552 .name
= "cpu_common",
554 .minimum_version_id
= 1,
555 .minimum_version_id_old
= 1,
556 .pre_save
= cpu_common_pre_save
,
557 .pre_load
= cpu_common_pre_load
,
558 .post_load
= cpu_common_post_load
,
559 .fields
= (VMStateField
[]) {
560 VMSTATE_UINT32(halted
, CPUState
),
561 VMSTATE_UINT32(interrupt_request
, CPUState
),
562 VMSTATE_END_OF_LIST()
567 CPUState
*qemu_get_cpu(int cpu
)
569 CPUState
*env
= first_cpu
;
572 if (env
->cpu_index
== cpu
)
580 void cpu_exec_init(CPUState
*env
)
585 #if defined(CONFIG_USER_ONLY)
588 env
->next_cpu
= NULL
;
591 while (*penv
!= NULL
) {
592 penv
= &(*penv
)->next_cpu
;
595 env
->cpu_index
= cpu_index
;
597 QTAILQ_INIT(&env
->breakpoints
);
598 QTAILQ_INIT(&env
->watchpoints
);
600 #if defined(CONFIG_USER_ONLY)
603 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
604 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
605 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
606 cpu_save
, cpu_load
, env
);
610 static inline void invalidate_page_bitmap(PageDesc
*p
)
612 if (p
->code_bitmap
) {
613 qemu_free(p
->code_bitmap
);
614 p
->code_bitmap
= NULL
;
616 p
->code_write_count
= 0;
619 /* set to NULL all the 'first_tb' fields in all PageDescs */
620 static void page_flush_tb(void)
625 for(i
= 0; i
< L1_SIZE
; i
++) {
628 for(j
= 0; j
< L2_SIZE
; j
++) {
630 invalidate_page_bitmap(p
);
637 /* flush all the translation blocks */
638 /* XXX: tb_flush is currently not thread safe */
639 void tb_flush(CPUState
*env1
)
642 #if defined(DEBUG_FLUSH)
643 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
644 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
646 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
648 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
649 cpu_abort(env1
, "Internal error: code buffer overflow\n");
653 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
654 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
657 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
660 code_gen_ptr
= code_gen_buffer
;
661 /* XXX: flush processor icache at this point if cache flush is
666 #ifdef DEBUG_TB_CHECK
668 static void tb_invalidate_check(target_ulong address
)
670 TranslationBlock
*tb
;
672 address
&= TARGET_PAGE_MASK
;
673 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
674 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
675 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
676 address
>= tb
->pc
+ tb
->size
)) {
677 printf("ERROR invalidate: address=" TARGET_FMT_lx
678 " PC=%08lx size=%04x\n",
679 address
, (long)tb
->pc
, tb
->size
);
685 /* verify that all the pages have correct rights for code */
686 static void tb_page_check(void)
688 TranslationBlock
*tb
;
689 int i
, flags1
, flags2
;
691 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
692 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
693 flags1
= page_get_flags(tb
->pc
);
694 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
695 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
696 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
697 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
705 /* invalidate one TB */
706 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
709 TranslationBlock
*tb1
;
713 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
716 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
720 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
722 TranslationBlock
*tb1
;
728 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
730 *ptb
= tb1
->page_next
[n1
];
733 ptb
= &tb1
->page_next
[n1
];
737 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
739 TranslationBlock
*tb1
, **ptb
;
742 ptb
= &tb
->jmp_next
[n
];
745 /* find tb(n) in circular list */
749 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
750 if (n1
== n
&& tb1
== tb
)
753 ptb
= &tb1
->jmp_first
;
755 ptb
= &tb1
->jmp_next
[n1
];
758 /* now we can suppress tb(n) from the list */
759 *ptb
= tb
->jmp_next
[n
];
761 tb
->jmp_next
[n
] = NULL
;
765 /* reset the jump entry 'n' of a TB so that it is not chained to
767 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
769 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
772 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
777 target_phys_addr_t phys_pc
;
778 TranslationBlock
*tb1
, *tb2
;
780 /* remove the TB from the hash list */
781 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
782 h
= tb_phys_hash_func(phys_pc
);
783 tb_remove(&tb_phys_hash
[h
], tb
,
784 offsetof(TranslationBlock
, phys_hash_next
));
786 /* remove the TB from the page list */
787 if (tb
->page_addr
[0] != page_addr
) {
788 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
789 tb_page_remove(&p
->first_tb
, tb
);
790 invalidate_page_bitmap(p
);
792 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
793 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
794 tb_page_remove(&p
->first_tb
, tb
);
795 invalidate_page_bitmap(p
);
798 tb_invalidated_flag
= 1;
800 /* remove the TB from the hash list */
801 h
= tb_jmp_cache_hash_func(tb
->pc
);
802 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
803 if (env
->tb_jmp_cache
[h
] == tb
)
804 env
->tb_jmp_cache
[h
] = NULL
;
807 /* suppress this TB from the two jump lists */
808 tb_jmp_remove(tb
, 0);
809 tb_jmp_remove(tb
, 1);
811 /* suppress any remaining jumps to this TB */
817 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
818 tb2
= tb1
->jmp_next
[n1
];
819 tb_reset_jump(tb1
, n1
);
820 tb1
->jmp_next
[n1
] = NULL
;
823 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
825 tb_phys_invalidate_count
++;
828 static inline void set_bits(uint8_t *tab
, int start
, int len
)
834 mask
= 0xff << (start
& 7);
835 if ((start
& ~7) == (end
& ~7)) {
837 mask
&= ~(0xff << (end
& 7));
842 start
= (start
+ 8) & ~7;
844 while (start
< end1
) {
849 mask
= ~(0xff << (end
& 7));
855 static void build_page_bitmap(PageDesc
*p
)
857 int n
, tb_start
, tb_end
;
858 TranslationBlock
*tb
;
860 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
865 tb
= (TranslationBlock
*)((long)tb
& ~3);
866 /* NOTE: this is subtle as a TB may span two physical pages */
868 /* NOTE: tb_end may be after the end of the page, but
869 it is not a problem */
870 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
871 tb_end
= tb_start
+ tb
->size
;
872 if (tb_end
> TARGET_PAGE_SIZE
)
873 tb_end
= TARGET_PAGE_SIZE
;
876 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
878 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
879 tb
= tb
->page_next
[n
];
883 TranslationBlock
*tb_gen_code(CPUState
*env
,
884 target_ulong pc
, target_ulong cs_base
,
885 int flags
, int cflags
)
887 TranslationBlock
*tb
;
889 target_ulong phys_pc
, phys_page2
, virt_page2
;
892 phys_pc
= get_phys_addr_code(env
, pc
);
895 /* flush must be done */
897 /* cannot fail at this point */
899 /* Don't forget to invalidate previous TB info. */
900 tb_invalidated_flag
= 1;
902 tc_ptr
= code_gen_ptr
;
904 tb
->cs_base
= cs_base
;
907 cpu_gen_code(env
, tb
, &code_gen_size
);
908 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
910 /* check next page if needed */
911 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
913 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
914 phys_page2
= get_phys_addr_code(env
, virt_page2
);
916 tb_link_phys(tb
, phys_pc
, phys_page2
);
920 /* invalidate all TBs which intersect with the target physical page
921 starting in range [start;end[. NOTE: start and end must refer to
922 the same physical page. 'is_cpu_write_access' should be true if called
923 from a real cpu write access: the virtual CPU will exit the current
924 TB if code is modified inside this TB. */
925 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
926 int is_cpu_write_access
)
928 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
929 CPUState
*env
= cpu_single_env
;
930 target_ulong tb_start
, tb_end
;
933 #ifdef TARGET_HAS_PRECISE_SMC
934 int current_tb_not_found
= is_cpu_write_access
;
935 TranslationBlock
*current_tb
= NULL
;
936 int current_tb_modified
= 0;
937 target_ulong current_pc
= 0;
938 target_ulong current_cs_base
= 0;
939 int current_flags
= 0;
940 #endif /* TARGET_HAS_PRECISE_SMC */
942 p
= page_find(start
>> TARGET_PAGE_BITS
);
945 if (!p
->code_bitmap
&&
946 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
947 is_cpu_write_access
) {
948 /* build code bitmap */
949 build_page_bitmap(p
);
952 /* we remove all the TBs in the range [start, end[ */
953 /* XXX: see if in some cases it could be faster to invalidate all the code */
957 tb
= (TranslationBlock
*)((long)tb
& ~3);
958 tb_next
= tb
->page_next
[n
];
959 /* NOTE: this is subtle as a TB may span two physical pages */
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
964 tb_end
= tb_start
+ tb
->size
;
966 tb_start
= tb
->page_addr
[1];
967 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
969 if (!(tb_end
<= start
|| tb_start
>= end
)) {
970 #ifdef TARGET_HAS_PRECISE_SMC
971 if (current_tb_not_found
) {
972 current_tb_not_found
= 0;
974 if (env
->mem_io_pc
) {
975 /* now we have a real cpu fault */
976 current_tb
= tb_find_pc(env
->mem_io_pc
);
979 if (current_tb
== tb
&&
980 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
981 /* If we are modifying the current TB, we must stop
982 its execution. We could be more precise by checking
983 that the modification is after the current PC, but it
984 would require a specialized function to partially
985 restore the CPU state */
987 current_tb_modified
= 1;
988 cpu_restore_state(current_tb
, env
,
989 env
->mem_io_pc
, NULL
);
990 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
993 #endif /* TARGET_HAS_PRECISE_SMC */
994 /* we need to do that to handle the case where a signal
995 occurs while doing tb_phys_invalidate() */
998 saved_tb
= env
->current_tb
;
999 env
->current_tb
= NULL
;
1001 tb_phys_invalidate(tb
, -1);
1003 env
->current_tb
= saved_tb
;
1004 if (env
->interrupt_request
&& env
->current_tb
)
1005 cpu_interrupt(env
, env
->interrupt_request
);
1010 #if !defined(CONFIG_USER_ONLY)
1011 /* if no code remaining, no need to continue to use slow writes */
1013 invalidate_page_bitmap(p
);
1014 if (is_cpu_write_access
) {
1015 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1019 #ifdef TARGET_HAS_PRECISE_SMC
1020 if (current_tb_modified
) {
1021 /* we generate a block containing just the instruction
1022 modifying the memory. It will ensure that it cannot modify
1024 env
->current_tb
= NULL
;
1025 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1026 cpu_resume_from_signal(env
, NULL
);
1031 /* len must be <= 8 and start must be a multiple of len */
1032 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1038 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1039 cpu_single_env
->mem_io_vaddr
, len
,
1040 cpu_single_env
->eip
,
1041 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1044 p
= page_find(start
>> TARGET_PAGE_BITS
);
1047 if (p
->code_bitmap
) {
1048 offset
= start
& ~TARGET_PAGE_MASK
;
1049 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1050 if (b
& ((1 << len
) - 1))
1054 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1058 #if !defined(CONFIG_SOFTMMU)
1059 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1060 unsigned long pc
, void *puc
)
1062 TranslationBlock
*tb
;
1065 #ifdef TARGET_HAS_PRECISE_SMC
1066 TranslationBlock
*current_tb
= NULL
;
1067 CPUState
*env
= cpu_single_env
;
1068 int current_tb_modified
= 0;
1069 target_ulong current_pc
= 0;
1070 target_ulong current_cs_base
= 0;
1071 int current_flags
= 0;
1074 addr
&= TARGET_PAGE_MASK
;
1075 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1079 #ifdef TARGET_HAS_PRECISE_SMC
1080 if (tb
&& pc
!= 0) {
1081 current_tb
= tb_find_pc(pc
);
1084 while (tb
!= NULL
) {
1086 tb
= (TranslationBlock
*)((long)tb
& ~3);
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 if (current_tb
== tb
&&
1089 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1090 /* If we are modifying the current TB, we must stop
1091 its execution. We could be more precise by checking
1092 that the modification is after the current PC, but it
1093 would require a specialized function to partially
1094 restore the CPU state */
1096 current_tb_modified
= 1;
1097 cpu_restore_state(current_tb
, env
, pc
, puc
);
1098 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1101 #endif /* TARGET_HAS_PRECISE_SMC */
1102 tb_phys_invalidate(tb
, addr
);
1103 tb
= tb
->page_next
[n
];
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified
) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1111 env
->current_tb
= NULL
;
1112 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1113 cpu_resume_from_signal(env
, puc
);
1119 /* add the tb in the target page and protect it if necessary */
1120 static inline void tb_alloc_page(TranslationBlock
*tb
,
1121 unsigned int n
, target_ulong page_addr
)
1124 TranslationBlock
*last_first_tb
;
1126 tb
->page_addr
[n
] = page_addr
;
1127 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1128 tb
->page_next
[n
] = p
->first_tb
;
1129 last_first_tb
= p
->first_tb
;
1130 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1131 invalidate_page_bitmap(p
);
1133 #if defined(TARGET_HAS_SMC) || 1
1135 #if defined(CONFIG_USER_ONLY)
1136 if (p
->flags
& PAGE_WRITE
) {
1141 /* force the host page as non writable (writes will have a
1142 page fault + mprotect overhead) */
1143 page_addr
&= qemu_host_page_mask
;
1145 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1146 addr
+= TARGET_PAGE_SIZE
) {
1148 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1152 p2
->flags
&= ~PAGE_WRITE
;
1153 page_get_flags(addr
);
1155 mprotect(g2h(page_addr
), qemu_host_page_size
,
1156 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1157 #ifdef DEBUG_TB_INVALIDATE
1158 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1163 /* if some code is already present, then the pages are already
1164 protected. So we handle the case where only the first TB is
1165 allocated in a physical page */
1166 if (!last_first_tb
) {
1167 tlb_protect_code(page_addr
);
1171 #endif /* TARGET_HAS_SMC */
1174 /* Allocate a new translation block. Flush the translation buffer if
1175 too many translation blocks or too much generated code. */
1176 TranslationBlock
*tb_alloc(target_ulong pc
)
1178 TranslationBlock
*tb
;
1180 if (nb_tbs
>= code_gen_max_blocks
||
1181 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1183 tb
= &tbs
[nb_tbs
++];
1189 void tb_free(TranslationBlock
*tb
)
1191 /* In practice this is mostly used for single use temporary TB
1192 Ignore the hard cases and just back up if this TB happens to
1193 be the last one generated. */
1194 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1195 code_gen_ptr
= tb
->tc_ptr
;
1200 /* add a new TB and link it to the physical page tables. phys_page2 is
1201 (-1) to indicate that only one page contains the TB. */
1202 void tb_link_phys(TranslationBlock
*tb
,
1203 target_ulong phys_pc
, target_ulong phys_page2
)
1206 TranslationBlock
**ptb
;
1208 /* Grab the mmap lock to stop another thread invalidating this TB
1209 before we are done. */
1211 /* add in the physical hash table */
1212 h
= tb_phys_hash_func(phys_pc
);
1213 ptb
= &tb_phys_hash
[h
];
1214 tb
->phys_hash_next
= *ptb
;
1217 /* add in the page list */
1218 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1219 if (phys_page2
!= -1)
1220 tb_alloc_page(tb
, 1, phys_page2
);
1222 tb
->page_addr
[1] = -1;
1224 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1225 tb
->jmp_next
[0] = NULL
;
1226 tb
->jmp_next
[1] = NULL
;
1228 /* init original jump addresses */
1229 if (tb
->tb_next_offset
[0] != 0xffff)
1230 tb_reset_jump(tb
, 0);
1231 if (tb
->tb_next_offset
[1] != 0xffff)
1232 tb_reset_jump(tb
, 1);
1234 #ifdef DEBUG_TB_CHECK
1240 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1241 tb[1].tc_ptr. Return NULL if not found */
1242 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1244 int m_min
, m_max
, m
;
1246 TranslationBlock
*tb
;
1250 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1251 tc_ptr
>= (unsigned long)code_gen_ptr
)
1253 /* binary search (cf Knuth) */
1256 while (m_min
<= m_max
) {
1257 m
= (m_min
+ m_max
) >> 1;
1259 v
= (unsigned long)tb
->tc_ptr
;
1262 else if (tc_ptr
< v
) {
1271 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1273 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1275 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1278 tb1
= tb
->jmp_next
[n
];
1280 /* find head of list */
1283 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1286 tb1
= tb1
->jmp_next
[n1
];
1288 /* we are now sure now that tb jumps to tb1 */
1291 /* remove tb from the jmp_first list */
1292 ptb
= &tb_next
->jmp_first
;
1296 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1297 if (n1
== n
&& tb1
== tb
)
1299 ptb
= &tb1
->jmp_next
[n1
];
1301 *ptb
= tb
->jmp_next
[n
];
1302 tb
->jmp_next
[n
] = NULL
;
1304 /* suppress the jump to next tb in generated code */
1305 tb_reset_jump(tb
, n
);
1307 /* suppress jumps in the tb on which we could have jumped */
1308 tb_reset_jump_recursive(tb_next
);
1312 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1314 tb_reset_jump_recursive2(tb
, 0);
1315 tb_reset_jump_recursive2(tb
, 1);
1318 #if defined(TARGET_HAS_ICE)
1319 #if defined(CONFIG_USER_ONLY)
1320 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1322 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1325 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1327 target_phys_addr_t addr
;
1329 ram_addr_t ram_addr
;
1332 addr
= cpu_get_phys_page_debug(env
, pc
);
1333 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1335 pd
= IO_MEM_UNASSIGNED
;
1337 pd
= p
->phys_offset
;
1339 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1340 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1343 #endif /* TARGET_HAS_ICE */
1345 /* Add a watchpoint. */
1346 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1347 int flags
, CPUWatchpoint
**watchpoint
)
1349 target_ulong len_mask
= ~(len
- 1);
1352 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1353 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1354 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1355 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1358 wp
= qemu_malloc(sizeof(*wp
));
1361 wp
->len_mask
= len_mask
;
1364 /* keep all GDB-injected watchpoints in front */
1366 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1368 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1370 tlb_flush_page(env
, addr
);
1377 /* Remove a specific watchpoint. */
1378 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1381 target_ulong len_mask
= ~(len
- 1);
1384 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1385 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1386 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1387 cpu_watchpoint_remove_by_ref(env
, wp
);
1394 /* Remove a specific watchpoint by reference. */
1395 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1397 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1399 tlb_flush_page(env
, watchpoint
->vaddr
);
1401 qemu_free(watchpoint
);
1404 /* Remove all matching watchpoints. */
1405 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1407 CPUWatchpoint
*wp
, *next
;
1409 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1410 if (wp
->flags
& mask
)
1411 cpu_watchpoint_remove_by_ref(env
, wp
);
1415 /* Add a breakpoint. */
1416 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1417 CPUBreakpoint
**breakpoint
)
1419 #if defined(TARGET_HAS_ICE)
1422 bp
= qemu_malloc(sizeof(*bp
));
1427 /* keep all GDB-injected breakpoints in front */
1429 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1431 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1433 breakpoint_invalidate(env
, pc
);
1443 /* Remove a specific breakpoint. */
1444 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1446 #if defined(TARGET_HAS_ICE)
1449 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1450 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1451 cpu_breakpoint_remove_by_ref(env
, bp
);
1461 /* Remove a specific breakpoint by reference. */
1462 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1464 #if defined(TARGET_HAS_ICE)
1465 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1467 breakpoint_invalidate(env
, breakpoint
->pc
);
1469 qemu_free(breakpoint
);
1473 /* Remove all matching breakpoints. */
1474 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1476 #if defined(TARGET_HAS_ICE)
1477 CPUBreakpoint
*bp
, *next
;
1479 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1480 if (bp
->flags
& mask
)
1481 cpu_breakpoint_remove_by_ref(env
, bp
);
1486 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1487 CPU loop after each instruction */
1488 void cpu_single_step(CPUState
*env
, int enabled
)
1490 #if defined(TARGET_HAS_ICE)
1491 if (env
->singlestep_enabled
!= enabled
) {
1492 env
->singlestep_enabled
= enabled
;
1494 kvm_update_guest_debug(env
, 0);
1496 /* must flush all the translated code to avoid inconsistencies */
1497 /* XXX: only flush what is necessary */
1504 /* enable or disable low levels log */
1505 void cpu_set_log(int log_flags
)
1507 loglevel
= log_flags
;
1508 if (loglevel
&& !logfile
) {
1509 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1511 perror(logfilename
);
1514 #if !defined(CONFIG_SOFTMMU)
1515 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1517 static char logfile_buf
[4096];
1518 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1520 #elif !defined(_WIN32)
1521 /* Win32 doesn't support line-buffering and requires size >= 2 */
1522 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1526 if (!loglevel
&& logfile
) {
1532 void cpu_set_log_filename(const char *filename
)
1534 logfilename
= strdup(filename
);
1539 cpu_set_log(loglevel
);
1542 static void cpu_unlink_tb(CPUState
*env
)
1544 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1545 problem and hope the cpu will stop of its own accord. For userspace
1546 emulation this often isn't actually as bad as it sounds. Often
1547 signals are used primarily to interrupt blocking syscalls. */
1548 TranslationBlock
*tb
;
1549 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1551 spin_lock(&interrupt_lock
);
1552 tb
= env
->current_tb
;
1553 /* if the cpu is currently executing code, we must unlink it and
1554 all the potentially executing TB */
1556 env
->current_tb
= NULL
;
1557 tb_reset_jump_recursive(tb
);
1559 spin_unlock(&interrupt_lock
);
1562 /* mask must never be zero, except for A20 change call */
1563 void cpu_interrupt(CPUState
*env
, int mask
)
1567 old_mask
= env
->interrupt_request
;
1568 env
->interrupt_request
|= mask
;
1570 #ifndef CONFIG_USER_ONLY
1572 * If called from iothread context, wake the target cpu in
1575 if (!qemu_cpu_self(env
)) {
1582 env
->icount_decr
.u16
.high
= 0xffff;
1583 #ifndef CONFIG_USER_ONLY
1585 && (mask
& ~old_mask
) != 0) {
1586 cpu_abort(env
, "Raised interrupt while not in I/O function");
1594 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1596 env
->interrupt_request
&= ~mask
;
1599 void cpu_exit(CPUState
*env
)
1601 env
->exit_request
= 1;
1605 const CPULogItem cpu_log_items
[] = {
1606 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1607 "show generated host assembly code for each compiled TB" },
1608 { CPU_LOG_TB_IN_ASM
, "in_asm",
1609 "show target assembly code for each compiled TB" },
1610 { CPU_LOG_TB_OP
, "op",
1611 "show micro ops for each compiled TB" },
1612 { CPU_LOG_TB_OP_OPT
, "op_opt",
1615 "before eflags optimization and "
1617 "after liveness analysis" },
1618 { CPU_LOG_INT
, "int",
1619 "show interrupts/exceptions in short format" },
1620 { CPU_LOG_EXEC
, "exec",
1621 "show trace before each executed TB (lots of logs)" },
1622 { CPU_LOG_TB_CPU
, "cpu",
1623 "show CPU state before block translation" },
1625 { CPU_LOG_PCALL
, "pcall",
1626 "show protected mode far calls/returns/exceptions" },
1627 { CPU_LOG_RESET
, "cpu_reset",
1628 "show CPU state before CPU resets" },
1631 { CPU_LOG_IOPORT
, "ioport",
1632 "show all i/o ports accesses" },
1637 #ifndef CONFIG_USER_ONLY
1638 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1639 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1641 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1643 ram_addr_t phys_offset
)
1645 CPUPhysMemoryClient
*client
;
1646 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1647 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1651 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1652 target_phys_addr_t end
)
1654 CPUPhysMemoryClient
*client
;
1655 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1656 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1663 static int cpu_notify_migration_log(int enable
)
1665 CPUPhysMemoryClient
*client
;
1666 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1667 int r
= client
->migration_log(client
, enable
);
1674 static void phys_page_for_each_in_l1_map(PhysPageDesc
**phys_map
,
1675 CPUPhysMemoryClient
*client
)
1680 for (l1
= 0; l1
< L1_SIZE
; ++l1
) {
1685 for (l2
= 0; l2
< L2_SIZE
; ++l2
) {
1686 if (pd
[l2
].phys_offset
== IO_MEM_UNASSIGNED
) {
1689 client
->set_memory(client
, pd
[l2
].region_offset
,
1690 TARGET_PAGE_SIZE
, pd
[l2
].phys_offset
);
1695 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1697 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
1699 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1700 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1702 void **phys_map
= (void **)l1_phys_map
;
1707 for (l1
= 0; l1
< L1_SIZE
; ++l1
) {
1709 phys_page_for_each_in_l1_map(phys_map
[l1
], client
);
1716 phys_page_for_each_in_l1_map(l1_phys_map
, client
);
1720 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1722 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1723 phys_page_for_each(client
);
1726 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1728 QLIST_REMOVE(client
, list
);
1732 static int cmp1(const char *s1
, int n
, const char *s2
)
1734 if (strlen(s2
) != n
)
1736 return memcmp(s1
, s2
, n
) == 0;
1739 /* takes a comma separated list of log masks. Return 0 if error. */
1740 int cpu_str_to_log_mask(const char *str
)
1742 const CPULogItem
*item
;
1749 p1
= strchr(p
, ',');
1752 if(cmp1(p
,p1
-p
,"all")) {
1753 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1757 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1758 if (cmp1(p
, p1
- p
, item
->name
))
1772 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1779 fprintf(stderr
, "qemu: fatal: ");
1780 vfprintf(stderr
, fmt
, ap
);
1781 fprintf(stderr
, "\n");
1783 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1785 cpu_dump_state(env
, stderr
, fprintf
, 0);
1787 if (qemu_log_enabled()) {
1788 qemu_log("qemu: fatal: ");
1789 qemu_log_vprintf(fmt
, ap2
);
1792 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1794 log_cpu_state(env
, 0);
1801 #if defined(CONFIG_USER_ONLY)
1803 struct sigaction act
;
1804 sigfillset(&act
.sa_mask
);
1805 act
.sa_handler
= SIG_DFL
;
1806 sigaction(SIGABRT
, &act
, NULL
);
1812 CPUState
*cpu_copy(CPUState
*env
)
1814 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1815 CPUState
*next_cpu
= new_env
->next_cpu
;
1816 int cpu_index
= new_env
->cpu_index
;
1817 #if defined(TARGET_HAS_ICE)
1822 memcpy(new_env
, env
, sizeof(CPUState
));
1824 /* Preserve chaining and index. */
1825 new_env
->next_cpu
= next_cpu
;
1826 new_env
->cpu_index
= cpu_index
;
1828 /* Clone all break/watchpoints.
1829 Note: Once we support ptrace with hw-debug register access, make sure
1830 BP_CPU break/watchpoints are handled correctly on clone. */
1831 QTAILQ_INIT(&env
->breakpoints
);
1832 QTAILQ_INIT(&env
->watchpoints
);
1833 #if defined(TARGET_HAS_ICE)
1834 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1835 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1837 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1838 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1846 #if !defined(CONFIG_USER_ONLY)
1848 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1852 /* Discard jump cache entries for any tb which might potentially
1853 overlap the flushed page. */
1854 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1855 memset (&env
->tb_jmp_cache
[i
], 0,
1856 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1858 i
= tb_jmp_cache_hash_page(addr
);
1859 memset (&env
->tb_jmp_cache
[i
], 0,
1860 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1863 static CPUTLBEntry s_cputlb_empty_entry
= {
1870 /* NOTE: if flush_global is true, also flush global entries (not
1872 void tlb_flush(CPUState
*env
, int flush_global
)
1876 #if defined(DEBUG_TLB)
1877 printf("tlb_flush:\n");
1879 /* must reset current TB so that interrupts cannot modify the
1880 links while we are modifying them */
1881 env
->current_tb
= NULL
;
1883 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1885 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1886 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1890 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1895 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1897 if (addr
== (tlb_entry
->addr_read
&
1898 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1899 addr
== (tlb_entry
->addr_write
&
1900 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1901 addr
== (tlb_entry
->addr_code
&
1902 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1903 *tlb_entry
= s_cputlb_empty_entry
;
1907 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1912 #if defined(DEBUG_TLB)
1913 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1915 /* must reset current TB so that interrupts cannot modify the
1916 links while we are modifying them */
1917 env
->current_tb
= NULL
;
1919 addr
&= TARGET_PAGE_MASK
;
1920 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1921 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1922 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1924 tlb_flush_jmp_cache(env
, addr
);
1927 /* update the TLBs so that writes to code in the virtual page 'addr'
1929 static void tlb_protect_code(ram_addr_t ram_addr
)
1931 cpu_physical_memory_reset_dirty(ram_addr
,
1932 ram_addr
+ TARGET_PAGE_SIZE
,
1936 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1937 tested for self modifying code */
1938 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1941 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1944 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1945 unsigned long start
, unsigned long length
)
1948 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1949 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1950 if ((addr
- start
) < length
) {
1951 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1956 /* Note: start and end must be within the same ram block. */
1957 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1961 unsigned long length
, start1
;
1965 start
&= TARGET_PAGE_MASK
;
1966 end
= TARGET_PAGE_ALIGN(end
);
1968 length
= end
- start
;
1971 len
= length
>> TARGET_PAGE_BITS
;
1972 mask
= ~dirty_flags
;
1973 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1974 for(i
= 0; i
< len
; i
++)
1977 /* we modify the TLB cache so that the dirty bit will be set again
1978 when accessing the range */
1979 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1980 /* Chek that we don't span multiple blocks - this breaks the
1981 address comparisons below. */
1982 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1983 != (end
- 1) - start
) {
1987 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1989 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1990 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1991 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1997 int cpu_physical_memory_set_dirty_tracking(int enable
)
2000 in_migration
= enable
;
2001 ret
= cpu_notify_migration_log(!!enable
);
2005 int cpu_physical_memory_get_dirty_tracking(void)
2007 return in_migration
;
2010 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2011 target_phys_addr_t end_addr
)
2015 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2019 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2021 ram_addr_t ram_addr
;
2024 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2025 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2026 + tlb_entry
->addend
);
2027 ram_addr
= qemu_ram_addr_from_host(p
);
2028 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2029 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2034 /* update the TLB according to the current state of the dirty bits */
2035 void cpu_tlb_update_dirty(CPUState
*env
)
2039 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2040 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2041 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2045 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2047 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2048 tlb_entry
->addr_write
= vaddr
;
2051 /* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2058 vaddr
&= TARGET_PAGE_MASK
;
2059 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2060 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2061 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2064 /* add a new TLB entry. At most one entry for a given virtual address
2065 is permitted. Return 0 if OK or 2 if the page could not be mapped
2066 (can only happen in non SOFTMMU mode for I/O pages or pages
2067 conflicting with the host address space). */
2068 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2069 target_phys_addr_t paddr
, int prot
,
2070 int mmu_idx
, int is_softmmu
)
2075 target_ulong address
;
2076 target_ulong code_address
;
2077 target_phys_addr_t addend
;
2081 target_phys_addr_t iotlb
;
2083 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2085 pd
= IO_MEM_UNASSIGNED
;
2087 pd
= p
->phys_offset
;
2089 #if defined(DEBUG_TLB)
2090 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2091 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2096 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2097 /* IO memory case (romd handled later) */
2098 address
|= TLB_MMIO
;
2100 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2101 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2103 iotlb
= pd
& TARGET_PAGE_MASK
;
2104 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2105 iotlb
|= IO_MEM_NOTDIRTY
;
2107 iotlb
|= IO_MEM_ROM
;
2109 /* IO handlers are currently passed a physical address.
2110 It would be nice to pass an offset from the base address
2111 of that region. This would avoid having to special case RAM,
2112 and avoid full address decoding in every device.
2113 We can't use the high bits of pd for this because
2114 IO_MEM_ROMD uses these as a ram address. */
2115 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2117 iotlb
+= p
->region_offset
;
2123 code_address
= address
;
2124 /* Make accesses to pages with watchpoints go via the
2125 watchpoint trap routines. */
2126 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2127 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2128 iotlb
= io_mem_watch
+ paddr
;
2129 /* TODO: The memory case can be optimized by not trapping
2130 reads of pages with a write breakpoint. */
2131 address
|= TLB_MMIO
;
2135 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2136 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2137 te
= &env
->tlb_table
[mmu_idx
][index
];
2138 te
->addend
= addend
- vaddr
;
2139 if (prot
& PAGE_READ
) {
2140 te
->addr_read
= address
;
2145 if (prot
& PAGE_EXEC
) {
2146 te
->addr_code
= code_address
;
2150 if (prot
& PAGE_WRITE
) {
2151 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2152 (pd
& IO_MEM_ROMD
)) {
2153 /* Write access calls the I/O callback. */
2154 te
->addr_write
= address
| TLB_MMIO
;
2155 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2156 !cpu_physical_memory_is_dirty(pd
)) {
2157 te
->addr_write
= address
| TLB_NOTDIRTY
;
2159 te
->addr_write
= address
;
2162 te
->addr_write
= -1;
2169 void tlb_flush(CPUState
*env
, int flush_global
)
2173 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2177 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2178 target_phys_addr_t paddr
, int prot
,
2179 int mmu_idx
, int is_softmmu
)
2185 * Walks guest process memory "regions" one by one
2186 * and calls callback function 'fn' for each region.
2188 int walk_memory_regions(void *priv
,
2189 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2191 unsigned long start
, end
;
2193 int i
, j
, prot
, prot1
;
2199 for (i
= 0; i
<= L1_SIZE
; i
++) {
2200 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2201 for (j
= 0; j
< L2_SIZE
; j
++) {
2202 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2204 * "region" is one continuous chunk of memory
2205 * that has same protection flags set.
2207 if (prot1
!= prot
) {
2208 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2210 rc
= (*fn
)(priv
, start
, end
, prot
);
2211 /* callback can stop iteration by returning != 0 */
2228 static int dump_region(void *priv
, unsigned long start
,
2229 unsigned long end
, unsigned long prot
)
2231 FILE *f
= (FILE *)priv
;
2233 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2234 start
, end
, end
- start
,
2235 ((prot
& PAGE_READ
) ? 'r' : '-'),
2236 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2237 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2242 /* dump memory mappings */
2243 void page_dump(FILE *f
)
2245 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2246 "start", "end", "size", "prot");
2247 walk_memory_regions(f
, dump_region
);
2250 int page_get_flags(target_ulong address
)
2254 p
= page_find(address
>> TARGET_PAGE_BITS
);
2260 /* modify the flags of a page and invalidate the code if
2261 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2262 depending on PAGE_WRITE */
2263 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2268 /* mmap_lock should already be held. */
2269 start
= start
& TARGET_PAGE_MASK
;
2270 end
= TARGET_PAGE_ALIGN(end
);
2271 if (flags
& PAGE_WRITE
)
2272 flags
|= PAGE_WRITE_ORG
;
2273 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2274 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2275 /* We may be called for host regions that are outside guest
2279 /* if the write protection is set, then we invalidate the code
2281 if (!(p
->flags
& PAGE_WRITE
) &&
2282 (flags
& PAGE_WRITE
) &&
2284 tb_invalidate_phys_page(addr
, 0, NULL
);
2290 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2296 if (start
+ len
< start
)
2297 /* we've wrapped around */
2300 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2301 start
= start
& TARGET_PAGE_MASK
;
2303 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2304 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2307 if( !(p
->flags
& PAGE_VALID
) )
2310 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2312 if (flags
& PAGE_WRITE
) {
2313 if (!(p
->flags
& PAGE_WRITE_ORG
))
2315 /* unprotect the page if it was put read-only because it
2316 contains translated code */
2317 if (!(p
->flags
& PAGE_WRITE
)) {
2318 if (!page_unprotect(addr
, 0, NULL
))
2327 /* called from signal handler: invalidate the code and unprotect the
2328 page. Return TRUE if the fault was successfully handled. */
2329 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2331 unsigned int page_index
, prot
, pindex
;
2333 target_ulong host_start
, host_end
, addr
;
2335 /* Technically this isn't safe inside a signal handler. However we
2336 know this only ever happens in a synchronous SEGV handler, so in
2337 practice it seems to be ok. */
2340 host_start
= address
& qemu_host_page_mask
;
2341 page_index
= host_start
>> TARGET_PAGE_BITS
;
2342 p1
= page_find(page_index
);
2347 host_end
= host_start
+ qemu_host_page_size
;
2350 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2354 /* if the page was really writable, then we change its
2355 protection back to writable */
2356 if (prot
& PAGE_WRITE_ORG
) {
2357 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2358 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2359 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2360 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2361 p1
[pindex
].flags
|= PAGE_WRITE
;
2362 /* and since the content will be modified, we must invalidate
2363 the corresponding translated code. */
2364 tb_invalidate_phys_page(address
, pc
, puc
);
2365 #ifdef DEBUG_TB_CHECK
2366 tb_invalidate_check(address
);
2376 static inline void tlb_set_dirty(CPUState
*env
,
2377 unsigned long addr
, target_ulong vaddr
)
2380 #endif /* defined(CONFIG_USER_ONLY) */
2382 #if !defined(CONFIG_USER_ONLY)
2384 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2385 ram_addr_t memory
, ram_addr_t region_offset
);
2386 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2387 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2388 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2391 if (addr > start_addr) \
2394 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2395 if (start_addr2 > 0) \
2399 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2400 end_addr2 = TARGET_PAGE_SIZE - 1; \
2402 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2403 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2408 /* register physical memory.
2409 For RAM, 'size' must be a multiple of the target page size.
2410 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2411 io memory page. The address used when calling the IO function is
2412 the offset from the start of the region, plus region_offset. Both
2413 start_addr and region_offset are rounded down to a page boundary
2414 before calculating this offset. This should not be a problem unless
2415 the low bits of start_addr and region_offset differ. */
2416 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2418 ram_addr_t phys_offset
,
2419 ram_addr_t region_offset
)
2421 target_phys_addr_t addr
, end_addr
;
2424 ram_addr_t orig_size
= size
;
2427 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2429 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2430 region_offset
= start_addr
;
2432 region_offset
&= TARGET_PAGE_MASK
;
2433 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2434 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2435 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2436 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2437 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2438 ram_addr_t orig_memory
= p
->phys_offset
;
2439 target_phys_addr_t start_addr2
, end_addr2
;
2440 int need_subpage
= 0;
2442 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2444 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2445 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2446 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2447 &p
->phys_offset
, orig_memory
,
2450 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2453 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2455 p
->region_offset
= 0;
2457 p
->phys_offset
= phys_offset
;
2458 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2459 (phys_offset
& IO_MEM_ROMD
))
2460 phys_offset
+= TARGET_PAGE_SIZE
;
2463 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2464 p
->phys_offset
= phys_offset
;
2465 p
->region_offset
= region_offset
;
2466 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2467 (phys_offset
& IO_MEM_ROMD
)) {
2468 phys_offset
+= TARGET_PAGE_SIZE
;
2470 target_phys_addr_t start_addr2
, end_addr2
;
2471 int need_subpage
= 0;
2473 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2474 end_addr2
, need_subpage
);
2476 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2477 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2478 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2479 addr
& TARGET_PAGE_MASK
);
2480 subpage_register(subpage
, start_addr2
, end_addr2
,
2481 phys_offset
, region_offset
);
2482 p
->region_offset
= 0;
2486 region_offset
+= TARGET_PAGE_SIZE
;
2489 /* since each CPU stores ram addresses in its TLB cache, we must
2490 reset the modified entries */
2492 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2497 /* XXX: temporary until new memory mapping API */
2498 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2502 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2504 return IO_MEM_UNASSIGNED
;
2505 return p
->phys_offset
;
2508 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2511 kvm_coalesce_mmio_region(addr
, size
);
2514 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2517 kvm_uncoalesce_mmio_region(addr
, size
);
2520 void qemu_flush_coalesced_mmio_buffer(void)
2523 kvm_flush_coalesced_mmio_buffer();
2526 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2528 RAMBlock
*new_block
;
2530 size
= TARGET_PAGE_ALIGN(size
);
2531 new_block
= qemu_malloc(sizeof(*new_block
));
2533 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2534 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2535 new_block
->host
= mmap((void*)0x1000000, size
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2536 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2538 new_block
->host
= qemu_vmalloc(size
);
2540 #ifdef MADV_MERGEABLE
2541 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2543 new_block
->offset
= last_ram_offset
;
2544 new_block
->length
= size
;
2546 new_block
->next
= ram_blocks
;
2547 ram_blocks
= new_block
;
2549 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2550 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2551 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2552 0xff, size
>> TARGET_PAGE_BITS
);
2554 last_ram_offset
+= size
;
2557 kvm_setup_guest_memory(new_block
->host
, size
);
2559 return new_block
->offset
;
2562 void qemu_ram_free(ram_addr_t addr
)
2564 /* TODO: implement this. */
2567 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2568 With the exception of the softmmu code in this file, this should
2569 only be used for local memory (e.g. video ram) that the device owns,
2570 and knows it isn't going to access beyond the end of the block.
2572 It should not be used for general purpose DMA.
2573 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2575 void *qemu_get_ram_ptr(ram_addr_t addr
)
2582 prevp
= &ram_blocks
;
2584 while (block
&& (block
->offset
> addr
2585 || block
->offset
+ block
->length
<= addr
)) {
2587 prevp
= &prev
->next
;
2589 block
= block
->next
;
2592 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2595 /* Move this entry to to start of the list. */
2597 prev
->next
= block
->next
;
2598 block
->next
= *prevp
;
2601 return block
->host
+ (addr
- block
->offset
);
2604 /* Some of the softmmu routines need to translate from a host pointer
2605 (typically a TLB entry) back to a ram offset. */
2606 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2610 uint8_t *host
= ptr
;
2614 while (block
&& (block
->host
> host
2615 || block
->host
+ block
->length
<= host
)) {
2617 block
= block
->next
;
2620 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2623 return block
->offset
+ (host
- block
->host
);
2626 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2628 #ifdef DEBUG_UNASSIGNED
2629 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2631 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2632 do_unassigned_access(addr
, 0, 0, 0, 1);
2637 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2639 #ifdef DEBUG_UNASSIGNED
2640 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2642 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2643 do_unassigned_access(addr
, 0, 0, 0, 2);
2648 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2650 #ifdef DEBUG_UNASSIGNED
2651 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2653 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2654 do_unassigned_access(addr
, 0, 0, 0, 4);
2659 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2661 #ifdef DEBUG_UNASSIGNED
2662 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2664 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2665 do_unassigned_access(addr
, 1, 0, 0, 1);
2669 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2671 #ifdef DEBUG_UNASSIGNED
2672 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2674 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2675 do_unassigned_access(addr
, 1, 0, 0, 2);
2679 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2681 #ifdef DEBUG_UNASSIGNED
2682 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2684 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2685 do_unassigned_access(addr
, 1, 0, 0, 4);
2689 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2690 unassigned_mem_readb
,
2691 unassigned_mem_readw
,
2692 unassigned_mem_readl
,
2695 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2696 unassigned_mem_writeb
,
2697 unassigned_mem_writew
,
2698 unassigned_mem_writel
,
2701 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2705 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2706 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2707 #if !defined(CONFIG_USER_ONLY)
2708 tb_invalidate_phys_page_fast(ram_addr
, 1);
2709 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2712 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2713 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2714 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2715 /* we remove the notdirty callback only if the code has been
2717 if (dirty_flags
== 0xff)
2718 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2721 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2725 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2726 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2727 #if !defined(CONFIG_USER_ONLY)
2728 tb_invalidate_phys_page_fast(ram_addr
, 2);
2729 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2732 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2733 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2734 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2735 /* we remove the notdirty callback only if the code has been
2737 if (dirty_flags
== 0xff)
2738 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2741 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2745 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2746 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2747 #if !defined(CONFIG_USER_ONLY)
2748 tb_invalidate_phys_page_fast(ram_addr
, 4);
2749 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2752 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2753 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2754 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2755 /* we remove the notdirty callback only if the code has been
2757 if (dirty_flags
== 0xff)
2758 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2761 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2762 NULL
, /* never used */
2763 NULL
, /* never used */
2764 NULL
, /* never used */
2767 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2768 notdirty_mem_writeb
,
2769 notdirty_mem_writew
,
2770 notdirty_mem_writel
,
2773 /* Generate a debug exception if a watchpoint has been hit. */
2774 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2776 CPUState
*env
= cpu_single_env
;
2777 target_ulong pc
, cs_base
;
2778 TranslationBlock
*tb
;
2783 if (env
->watchpoint_hit
) {
2784 /* We re-entered the check after replacing the TB. Now raise
2785 * the debug interrupt so that is will trigger after the
2786 * current instruction. */
2787 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2790 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2791 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2792 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2793 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2794 wp
->flags
|= BP_WATCHPOINT_HIT
;
2795 if (!env
->watchpoint_hit
) {
2796 env
->watchpoint_hit
= wp
;
2797 tb
= tb_find_pc(env
->mem_io_pc
);
2799 cpu_abort(env
, "check_watchpoint: could not find TB for "
2800 "pc=%p", (void *)env
->mem_io_pc
);
2802 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2803 tb_phys_invalidate(tb
, -1);
2804 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2805 env
->exception_index
= EXCP_DEBUG
;
2807 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2808 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2810 cpu_resume_from_signal(env
, NULL
);
2813 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2818 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2819 so these check for a hit then pass through to the normal out-of-line
2821 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2823 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2824 return ldub_phys(addr
);
2827 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2829 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2830 return lduw_phys(addr
);
2833 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2835 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2836 return ldl_phys(addr
);
2839 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2842 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2843 stb_phys(addr
, val
);
2846 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2849 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2850 stw_phys(addr
, val
);
2853 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2856 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2857 stl_phys(addr
, val
);
2860 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
2866 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
2872 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2878 idx
= SUBPAGE_IDX(addr
);
2879 #if defined(DEBUG_SUBPAGE)
2880 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2881 mmio
, len
, addr
, idx
);
2883 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2884 addr
+ mmio
->region_offset
[idx
][0][len
]);
2889 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2890 uint32_t value
, unsigned int len
)
2894 idx
= SUBPAGE_IDX(addr
);
2895 #if defined(DEBUG_SUBPAGE)
2896 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2897 mmio
, len
, addr
, idx
, value
);
2899 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2900 addr
+ mmio
->region_offset
[idx
][1][len
],
2904 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2906 #if defined(DEBUG_SUBPAGE)
2907 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2910 return subpage_readlen(opaque
, addr
, 0);
2913 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2916 #if defined(DEBUG_SUBPAGE)
2917 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2919 subpage_writelen(opaque
, addr
, value
, 0);
2922 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2924 #if defined(DEBUG_SUBPAGE)
2925 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2928 return subpage_readlen(opaque
, addr
, 1);
2931 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2934 #if defined(DEBUG_SUBPAGE)
2935 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2937 subpage_writelen(opaque
, addr
, value
, 1);
2940 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2942 #if defined(DEBUG_SUBPAGE)
2943 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2946 return subpage_readlen(opaque
, addr
, 2);
2949 static void subpage_writel (void *opaque
,
2950 target_phys_addr_t addr
, uint32_t value
)
2952 #if defined(DEBUG_SUBPAGE)
2953 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2955 subpage_writelen(opaque
, addr
, value
, 2);
2958 static CPUReadMemoryFunc
* const subpage_read
[] = {
2964 static CPUWriteMemoryFunc
* const subpage_write
[] = {
2970 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2971 ram_addr_t memory
, ram_addr_t region_offset
)
2976 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2978 idx
= SUBPAGE_IDX(start
);
2979 eidx
= SUBPAGE_IDX(end
);
2980 #if defined(DEBUG_SUBPAGE)
2981 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
2982 mmio
, start
, end
, idx
, eidx
, memory
);
2984 memory
>>= IO_MEM_SHIFT
;
2985 for (; idx
<= eidx
; idx
++) {
2986 for (i
= 0; i
< 4; i
++) {
2987 if (io_mem_read
[memory
][i
]) {
2988 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2989 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2990 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2992 if (io_mem_write
[memory
][i
]) {
2993 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2994 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2995 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3003 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3004 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3009 mmio
= qemu_mallocz(sizeof(subpage_t
));
3012 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3013 #if defined(DEBUG_SUBPAGE)
3014 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3015 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3017 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3018 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3024 static int get_free_io_mem_idx(void)
3028 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3029 if (!io_mem_used
[i
]) {
3033 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3037 /* mem_read and mem_write are arrays of functions containing the
3038 function to access byte (index 0), word (index 1) and dword (index
3039 2). Functions can be omitted with a NULL function pointer.
3040 If io_index is non zero, the corresponding io zone is
3041 modified. If it is zero, a new io zone is allocated. The return
3042 value can be used with cpu_register_physical_memory(). (-1) is
3043 returned if error. */
3044 static int cpu_register_io_memory_fixed(int io_index
,
3045 CPUReadMemoryFunc
* const *mem_read
,
3046 CPUWriteMemoryFunc
* const *mem_write
,
3049 int i
, subwidth
= 0;
3051 if (io_index
<= 0) {
3052 io_index
= get_free_io_mem_idx();
3056 io_index
>>= IO_MEM_SHIFT
;
3057 if (io_index
>= IO_MEM_NB_ENTRIES
)
3061 for(i
= 0;i
< 3; i
++) {
3062 if (!mem_read
[i
] || !mem_write
[i
])
3063 subwidth
= IO_MEM_SUBWIDTH
;
3064 io_mem_read
[io_index
][i
] = mem_read
[i
];
3065 io_mem_write
[io_index
][i
] = mem_write
[i
];
3067 io_mem_opaque
[io_index
] = opaque
;
3068 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3071 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3072 CPUWriteMemoryFunc
* const *mem_write
,
3075 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3078 void cpu_unregister_io_memory(int io_table_address
)
3081 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3083 for (i
=0;i
< 3; i
++) {
3084 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3085 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3087 io_mem_opaque
[io_index
] = NULL
;
3088 io_mem_used
[io_index
] = 0;
3091 static void io_mem_init(void)
3095 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3096 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3097 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3101 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3102 watch_mem_write
, NULL
);
3105 #endif /* !defined(CONFIG_USER_ONLY) */
3107 /* physical memory access (slow version, mainly for debug) */
3108 #if defined(CONFIG_USER_ONLY)
3109 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3110 int len
, int is_write
)
3117 page
= addr
& TARGET_PAGE_MASK
;
3118 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3121 flags
= page_get_flags(page
);
3122 if (!(flags
& PAGE_VALID
))
3125 if (!(flags
& PAGE_WRITE
))
3127 /* XXX: this code should not depend on lock_user */
3128 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3129 /* FIXME - should this return an error rather than just fail? */
3132 unlock_user(p
, addr
, l
);
3134 if (!(flags
& PAGE_READ
))
3136 /* XXX: this code should not depend on lock_user */
3137 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3138 /* FIXME - should this return an error rather than just fail? */
3141 unlock_user(p
, addr
, 0);
3150 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3151 int len
, int is_write
)
3156 target_phys_addr_t page
;
3161 page
= addr
& TARGET_PAGE_MASK
;
3162 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3165 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3167 pd
= IO_MEM_UNASSIGNED
;
3169 pd
= p
->phys_offset
;
3173 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3174 target_phys_addr_t addr1
= addr
;
3175 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3177 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3178 /* XXX: could force cpu_single_env to NULL to avoid
3180 if (l
>= 4 && ((addr1
& 3) == 0)) {
3181 /* 32 bit write access */
3183 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3185 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3186 /* 16 bit write access */
3188 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3191 /* 8 bit write access */
3193 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3197 unsigned long addr1
;
3198 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3200 ptr
= qemu_get_ram_ptr(addr1
);
3201 memcpy(ptr
, buf
, l
);
3202 if (!cpu_physical_memory_is_dirty(addr1
)) {
3203 /* invalidate code */
3204 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3206 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3207 (0xff & ~CODE_DIRTY_FLAG
);
3211 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3212 !(pd
& IO_MEM_ROMD
)) {
3213 target_phys_addr_t addr1
= addr
;
3215 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3217 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3218 if (l
>= 4 && ((addr1
& 3) == 0)) {
3219 /* 32 bit read access */
3220 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3223 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3224 /* 16 bit read access */
3225 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3229 /* 8 bit read access */
3230 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3236 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3237 (addr
& ~TARGET_PAGE_MASK
);
3238 memcpy(buf
, ptr
, l
);
3247 /* used for ROM loading : can write in RAM and ROM */
3248 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3249 const uint8_t *buf
, int len
)
3253 target_phys_addr_t page
;
3258 page
= addr
& TARGET_PAGE_MASK
;
3259 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3262 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3264 pd
= IO_MEM_UNASSIGNED
;
3266 pd
= p
->phys_offset
;
3269 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3270 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3271 !(pd
& IO_MEM_ROMD
)) {
3274 unsigned long addr1
;
3275 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3277 ptr
= qemu_get_ram_ptr(addr1
);
3278 memcpy(ptr
, buf
, l
);
3288 target_phys_addr_t addr
;
3289 target_phys_addr_t len
;
3292 static BounceBuffer bounce
;
3294 typedef struct MapClient
{
3296 void (*callback
)(void *opaque
);
3297 QLIST_ENTRY(MapClient
) link
;
3300 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3301 = QLIST_HEAD_INITIALIZER(map_client_list
);
3303 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3305 MapClient
*client
= qemu_malloc(sizeof(*client
));
3307 client
->opaque
= opaque
;
3308 client
->callback
= callback
;
3309 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3313 void cpu_unregister_map_client(void *_client
)
3315 MapClient
*client
= (MapClient
*)_client
;
3317 QLIST_REMOVE(client
, link
);
3321 static void cpu_notify_map_clients(void)
3325 while (!QLIST_EMPTY(&map_client_list
)) {
3326 client
= QLIST_FIRST(&map_client_list
);
3327 client
->callback(client
->opaque
);
3328 cpu_unregister_map_client(client
);
3332 /* Map a physical memory region into a host virtual address.
3333 * May map a subset of the requested range, given by and returned in *plen.
3334 * May return NULL if resources needed to perform the mapping are exhausted.
3335 * Use only for reads OR writes - not for read-modify-write operations.
3336 * Use cpu_register_map_client() to know when retrying the map operation is
3337 * likely to succeed.
3339 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3340 target_phys_addr_t
*plen
,
3343 target_phys_addr_t len
= *plen
;
3344 target_phys_addr_t done
= 0;
3346 uint8_t *ret
= NULL
;
3348 target_phys_addr_t page
;
3351 unsigned long addr1
;
3354 page
= addr
& TARGET_PAGE_MASK
;
3355 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3358 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3360 pd
= IO_MEM_UNASSIGNED
;
3362 pd
= p
->phys_offset
;
3365 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3366 if (done
|| bounce
.buffer
) {
3369 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3373 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3375 ptr
= bounce
.buffer
;
3377 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3378 ptr
= qemu_get_ram_ptr(addr1
);
3382 } else if (ret
+ done
!= ptr
) {
3394 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3395 * Will also mark the memory as dirty if is_write == 1. access_len gives
3396 * the amount of memory that was actually read or written by the caller.
3398 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3399 int is_write
, target_phys_addr_t access_len
)
3401 if (buffer
!= bounce
.buffer
) {
3403 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3404 while (access_len
) {
3406 l
= TARGET_PAGE_SIZE
;
3409 if (!cpu_physical_memory_is_dirty(addr1
)) {
3410 /* invalidate code */
3411 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3413 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3414 (0xff & ~CODE_DIRTY_FLAG
);
3423 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3425 qemu_vfree(bounce
.buffer
);
3426 bounce
.buffer
= NULL
;
3427 cpu_notify_map_clients();
3430 /* warning: addr must be aligned */
3431 uint32_t ldl_phys(target_phys_addr_t addr
)
3439 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3441 pd
= IO_MEM_UNASSIGNED
;
3443 pd
= p
->phys_offset
;
3446 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3447 !(pd
& IO_MEM_ROMD
)) {
3449 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3451 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3452 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3455 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3456 (addr
& ~TARGET_PAGE_MASK
);
3462 /* warning: addr must be aligned */
3463 uint64_t ldq_phys(target_phys_addr_t addr
)
3471 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3473 pd
= IO_MEM_UNASSIGNED
;
3475 pd
= p
->phys_offset
;
3478 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3479 !(pd
& IO_MEM_ROMD
)) {
3481 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3483 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3484 #ifdef TARGET_WORDS_BIGENDIAN
3485 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3486 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3488 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3489 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3493 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3494 (addr
& ~TARGET_PAGE_MASK
);
3501 uint32_t ldub_phys(target_phys_addr_t addr
)
3504 cpu_physical_memory_read(addr
, &val
, 1);
3509 uint32_t lduw_phys(target_phys_addr_t addr
)
3512 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3513 return tswap16(val
);
3516 /* warning: addr must be aligned. The ram page is not masked as dirty
3517 and the code inside is not invalidated. It is useful if the dirty
3518 bits are used to track modified PTEs */
3519 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3526 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3528 pd
= IO_MEM_UNASSIGNED
;
3530 pd
= p
->phys_offset
;
3533 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3534 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3536 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3537 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3539 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3540 ptr
= qemu_get_ram_ptr(addr1
);
3543 if (unlikely(in_migration
)) {
3544 if (!cpu_physical_memory_is_dirty(addr1
)) {
3545 /* invalidate code */
3546 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3548 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3549 (0xff & ~CODE_DIRTY_FLAG
);
3555 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3562 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3564 pd
= IO_MEM_UNASSIGNED
;
3566 pd
= p
->phys_offset
;
3569 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3570 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3572 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3573 #ifdef TARGET_WORDS_BIGENDIAN
3574 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3575 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3577 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3578 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3581 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3582 (addr
& ~TARGET_PAGE_MASK
);
3587 /* warning: addr must be aligned */
3588 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3595 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3597 pd
= IO_MEM_UNASSIGNED
;
3599 pd
= p
->phys_offset
;
3602 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3603 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3605 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3606 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3608 unsigned long addr1
;
3609 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3611 ptr
= qemu_get_ram_ptr(addr1
);
3613 if (!cpu_physical_memory_is_dirty(addr1
)) {
3614 /* invalidate code */
3615 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3617 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3618 (0xff & ~CODE_DIRTY_FLAG
);
3624 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3627 cpu_physical_memory_write(addr
, &v
, 1);
3631 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3633 uint16_t v
= tswap16(val
);
3634 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3638 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3641 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3646 /* virtual memory access for debug (includes writing to ROM) */
3647 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3648 uint8_t *buf
, int len
, int is_write
)
3651 target_phys_addr_t phys_addr
;
3655 page
= addr
& TARGET_PAGE_MASK
;
3656 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3657 /* if no physical page mapped, return an error */
3658 if (phys_addr
== -1)
3660 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3663 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3664 #if !defined(CONFIG_USER_ONLY)
3666 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3669 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3677 /* in deterministic execution mode, instructions doing device I/Os
3678 must be at the end of the TB */
3679 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3681 TranslationBlock
*tb
;
3683 target_ulong pc
, cs_base
;
3686 tb
= tb_find_pc((unsigned long)retaddr
);
3688 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3691 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3692 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3693 /* Calculate how many instructions had been executed before the fault
3695 n
= n
- env
->icount_decr
.u16
.low
;
3696 /* Generate a new TB ending on the I/O insn. */
3698 /* On MIPS and SH, delay slot instructions can only be restarted if
3699 they were already the first instruction in the TB. If this is not
3700 the first instruction in a TB then re-execute the preceding
3702 #if defined(TARGET_MIPS)
3703 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3704 env
->active_tc
.PC
-= 4;
3705 env
->icount_decr
.u16
.low
++;
3706 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3708 #elif defined(TARGET_SH4)
3709 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3712 env
->icount_decr
.u16
.low
++;
3713 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3716 /* This should never happen. */
3717 if (n
> CF_COUNT_MASK
)
3718 cpu_abort(env
, "TB too big during recompile");
3720 cflags
= n
| CF_LAST_IO
;
3722 cs_base
= tb
->cs_base
;
3724 tb_phys_invalidate(tb
, -1);
3725 /* FIXME: In theory this could raise an exception. In practice
3726 we have already translated the block once so it's probably ok. */
3727 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3728 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3729 the first in the TB) then we end up generating a whole new TB and
3730 repeating the fault, which is horribly inefficient.
3731 Better would be to execute just this insn uncached, or generate a
3733 cpu_resume_from_signal(env
, NULL
);
3736 void dump_exec_info(FILE *f
,
3737 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3739 int i
, target_code_size
, max_target_code_size
;
3740 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3741 TranslationBlock
*tb
;
3743 target_code_size
= 0;
3744 max_target_code_size
= 0;
3746 direct_jmp_count
= 0;
3747 direct_jmp2_count
= 0;
3748 for(i
= 0; i
< nb_tbs
; i
++) {
3750 target_code_size
+= tb
->size
;
3751 if (tb
->size
> max_target_code_size
)
3752 max_target_code_size
= tb
->size
;
3753 if (tb
->page_addr
[1] != -1)
3755 if (tb
->tb_next_offset
[0] != 0xffff) {
3757 if (tb
->tb_next_offset
[1] != 0xffff) {
3758 direct_jmp2_count
++;
3762 /* XXX: avoid using doubles ? */
3763 cpu_fprintf(f
, "Translation buffer state:\n");
3764 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3765 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3766 cpu_fprintf(f
, "TB count %d/%d\n",
3767 nb_tbs
, code_gen_max_blocks
);
3768 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3769 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3770 max_target_code_size
);
3771 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3772 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3773 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3774 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3776 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3777 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3779 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3781 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3782 cpu_fprintf(f
, "\nStatistics:\n");
3783 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3784 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3785 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3786 tcg_dump_info(f
, cpu_fprintf
);
3789 #if !defined(CONFIG_USER_ONLY)
3791 #define MMUSUFFIX _cmmu
3792 #define GETPC() NULL
3793 #define env cpu_single_env
3794 #define SOFTMMU_CODE_ACCESS
3797 #include "softmmu_template.h"
3800 #include "softmmu_template.h"
3803 #include "softmmu_template.h"
3806 #include "softmmu_template.h"