]>
git.ipfire.org Git - thirdparty/qemu.git/blob - exec.c
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 //#define DEBUG_TB_INVALIDATE
37 /* make various TB consistency checks */
38 //#define DEBUG_TB_CHECK
40 /* threshold to flush the translated code buffer */
41 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
43 #define SMC_BITMAP_USE_THRESHOLD 10
45 #define MMAP_AREA_START 0x00000000
46 #define MMAP_AREA_END 0xa8000000
48 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
49 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
50 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
52 /* any access to the tbs or the page table must use this lock */
53 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
55 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
56 uint8_t *code_gen_ptr
;
60 uint8_t *phys_ram_base
;
62 typedef struct PageDesc
{
63 /* offset in memory of the page + io_index in the low 12 bits */
64 unsigned long phys_offset
;
65 /* list of TBs intersecting this physical page */
66 TranslationBlock
*first_tb
;
67 /* in order to optimize self modifying code, we count the number
68 of lookups we do to a given page to use a bitmap */
69 unsigned int code_write_count
;
71 #if defined(CONFIG_USER_ONLY)
76 typedef struct VirtPageDesc
{
77 /* physical address of code page. It is valid only if 'valid_tag'
78 matches 'virt_valid_tag' */
79 target_ulong phys_addr
;
80 unsigned int valid_tag
;
81 #if !defined(CONFIG_SOFTMMU)
82 /* original page access rights. It is valid only if 'valid_tag'
83 matches 'virt_valid_tag' */
89 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
91 #define L1_SIZE (1 << L1_BITS)
92 #define L2_SIZE (1 << L2_BITS)
94 static void io_mem_init(void);
96 unsigned long real_host_page_size
;
97 unsigned long host_page_bits
;
98 unsigned long host_page_size
;
99 unsigned long host_page_mask
;
101 static PageDesc
*l1_map
[L1_SIZE
];
103 #if !defined(CONFIG_USER_ONLY)
104 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
105 static unsigned int virt_valid_tag
;
108 /* io memory support */
109 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
110 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
111 static int io_mem_nb
;
114 char *logfilename
= "/tmp/qemu.log";
118 static void page_init(void)
120 /* NOTE: we can always suppose that host_page_size >=
122 real_host_page_size
= getpagesize();
123 if (host_page_size
== 0)
124 host_page_size
= real_host_page_size
;
125 if (host_page_size
< TARGET_PAGE_SIZE
)
126 host_page_size
= TARGET_PAGE_SIZE
;
128 while ((1 << host_page_bits
) < host_page_size
)
130 host_page_mask
= ~(host_page_size
- 1);
131 #if !defined(CONFIG_USER_ONLY)
136 static inline PageDesc
*page_find_alloc(unsigned int index
)
140 lp
= &l1_map
[index
>> L2_BITS
];
143 /* allocate if not found */
144 p
= malloc(sizeof(PageDesc
) * L2_SIZE
);
145 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
148 return p
+ (index
& (L2_SIZE
- 1));
151 static inline PageDesc
*page_find(unsigned int index
)
155 p
= l1_map
[index
>> L2_BITS
];
158 return p
+ (index
& (L2_SIZE
- 1));
161 #if !defined(CONFIG_USER_ONLY)
162 static void tlb_protect_code(CPUState
*env
, uint32_t addr
);
163 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
);
164 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
);
166 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
168 VirtPageDesc
**lp
, *p
;
170 lp
= &l1_virt_map
[index
>> L2_BITS
];
173 /* allocate if not found */
174 p
= malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
175 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
178 return p
+ (index
& (L2_SIZE
- 1));
181 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
185 p
= l1_virt_map
[index
>> L2_BITS
];
188 return p
+ (index
& (L2_SIZE
- 1));
191 static void virt_page_flush(void)
198 if (virt_valid_tag
== 0) {
200 for(i
= 0; i
< L1_SIZE
; i
++) {
203 for(j
= 0; j
< L2_SIZE
; j
++)
210 static void virt_page_flush(void)
215 void cpu_exec_init(void)
218 code_gen_ptr
= code_gen_buffer
;
224 static inline void invalidate_page_bitmap(PageDesc
*p
)
226 if (p
->code_bitmap
) {
227 free(p
->code_bitmap
);
228 p
->code_bitmap
= NULL
;
230 p
->code_write_count
= 0;
233 /* set to NULL all the 'first_tb' fields in all PageDescs */
234 static void page_flush_tb(void)
239 for(i
= 0; i
< L1_SIZE
; i
++) {
242 for(j
= 0; j
< L2_SIZE
; j
++) {
244 invalidate_page_bitmap(p
);
251 /* flush all the translation blocks */
252 /* XXX: tb_flush is currently not thread safe */
253 void tb_flush(CPUState
*env
)
256 #if defined(DEBUG_FLUSH)
257 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
258 code_gen_ptr
- code_gen_buffer
,
260 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
262 /* must reset current TB so that interrupts cannot modify the
263 links while we are modifying them */
264 env
->current_tb
= NULL
;
267 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
271 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
272 tb_phys_hash
[i
] = NULL
;
275 code_gen_ptr
= code_gen_buffer
;
276 /* XXX: flush processor icache at this point if cache flush is
280 #ifdef DEBUG_TB_CHECK
282 static void tb_invalidate_check(unsigned long address
)
284 TranslationBlock
*tb
;
286 address
&= TARGET_PAGE_MASK
;
287 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
288 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
289 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
290 address
>= tb
->pc
+ tb
->size
)) {
291 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
292 address
, tb
->pc
, tb
->size
);
298 /* verify that all the pages have correct rights for code */
299 static void tb_page_check(void)
301 TranslationBlock
*tb
;
302 int i
, flags1
, flags2
;
304 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
305 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
306 flags1
= page_get_flags(tb
->pc
);
307 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
308 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
309 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
310 tb
->pc
, tb
->size
, flags1
, flags2
);
316 void tb_jmp_check(TranslationBlock
*tb
)
318 TranslationBlock
*tb1
;
321 /* suppress any remaining jumps to this TB */
325 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
328 tb1
= tb1
->jmp_next
[n1
];
330 /* check end of list */
332 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
338 /* invalidate one TB */
339 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
342 TranslationBlock
*tb1
;
346 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
349 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
353 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
355 TranslationBlock
*tb1
;
361 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
363 *ptb
= tb1
->page_next
[n1
];
366 ptb
= &tb1
->page_next
[n1
];
370 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
372 TranslationBlock
*tb1
, **ptb
;
375 ptb
= &tb
->jmp_next
[n
];
378 /* find tb(n) in circular list */
382 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
383 if (n1
== n
&& tb1
== tb
)
386 ptb
= &tb1
->jmp_first
;
388 ptb
= &tb1
->jmp_next
[n1
];
391 /* now we can suppress tb(n) from the list */
392 *ptb
= tb
->jmp_next
[n
];
394 tb
->jmp_next
[n
] = NULL
;
398 /* reset the jump entry 'n' of a TB so that it is not chained to
400 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
402 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
405 static inline void tb_invalidate(TranslationBlock
*tb
)
408 TranslationBlock
*tb1
, *tb2
, **ptb
;
410 tb_invalidated_flag
= 1;
412 /* remove the TB from the hash list */
413 h
= tb_hash_func(tb
->pc
);
417 /* NOTE: the TB is not necessarily linked in the hash. It
418 indicates that it is not currently used */
422 *ptb
= tb1
->hash_next
;
425 ptb
= &tb1
->hash_next
;
428 /* suppress this TB from the two jump lists */
429 tb_jmp_remove(tb
, 0);
430 tb_jmp_remove(tb
, 1);
432 /* suppress any remaining jumps to this TB */
438 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
439 tb2
= tb1
->jmp_next
[n1
];
440 tb_reset_jump(tb1
, n1
);
441 tb1
->jmp_next
[n1
] = NULL
;
444 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
447 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
451 target_ulong phys_pc
;
453 /* remove the TB from the hash list */
454 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
455 h
= tb_phys_hash_func(phys_pc
);
456 tb_remove(&tb_phys_hash
[h
], tb
,
457 offsetof(TranslationBlock
, phys_hash_next
));
459 /* remove the TB from the page list */
460 if (tb
->page_addr
[0] != page_addr
) {
461 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
462 tb_page_remove(&p
->first_tb
, tb
);
463 invalidate_page_bitmap(p
);
465 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
466 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
467 tb_page_remove(&p
->first_tb
, tb
);
468 invalidate_page_bitmap(p
);
474 static inline void set_bits(uint8_t *tab
, int start
, int len
)
480 mask
= 0xff << (start
& 7);
481 if ((start
& ~7) == (end
& ~7)) {
483 mask
&= ~(0xff << (end
& 7));
488 start
= (start
+ 8) & ~7;
490 while (start
< end1
) {
495 mask
= ~(0xff << (end
& 7));
501 static void build_page_bitmap(PageDesc
*p
)
503 int n
, tb_start
, tb_end
;
504 TranslationBlock
*tb
;
506 p
->code_bitmap
= malloc(TARGET_PAGE_SIZE
/ 8);
509 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
514 tb
= (TranslationBlock
*)((long)tb
& ~3);
515 /* NOTE: this is subtle as a TB may span two physical pages */
517 /* NOTE: tb_end may be after the end of the page, but
518 it is not a problem */
519 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
520 tb_end
= tb_start
+ tb
->size
;
521 if (tb_end
> TARGET_PAGE_SIZE
)
522 tb_end
= TARGET_PAGE_SIZE
;
525 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
527 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
528 tb
= tb
->page_next
[n
];
532 /* invalidate all TBs which intersect with the target physical page
533 starting in range [start;end[. NOTE: start and end must refer to
534 the same physical page */
535 static void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
)
539 TranslationBlock
*tb
, *tb_next
;
540 target_ulong tb_start
, tb_end
;
542 p
= page_find(start
>> TARGET_PAGE_BITS
);
545 if (!p
->code_bitmap
&&
546 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
547 /* build code bitmap */
548 build_page_bitmap(p
);
551 /* we remove all the TBs in the range [start, end[ */
552 /* XXX: see if in some cases it could be faster to invalidate all the code */
556 tb
= (TranslationBlock
*)((long)tb
& ~3);
557 tb_next
= tb
->page_next
[n
];
558 /* NOTE: this is subtle as a TB may span two physical pages */
560 /* NOTE: tb_end may be after the end of the page, but
561 it is not a problem */
562 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
563 tb_end
= tb_start
+ tb
->size
;
565 tb_start
= tb
->page_addr
[1];
566 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
568 if (!(tb_end
<= start
|| tb_start
>= end
)) {
569 tb_phys_invalidate(tb
, -1);
573 #if !defined(CONFIG_USER_ONLY)
574 /* if no code remaining, no need to continue to use slow writes */
576 invalidate_page_bitmap(p
);
577 tlb_unprotect_code_phys(cpu_single_env
, start
);
582 /* len must be <= 8 and start must be a multiple of len */
583 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
588 p
= page_find(start
>> TARGET_PAGE_BITS
);
591 if (p
->code_bitmap
) {
592 offset
= start
& ~TARGET_PAGE_MASK
;
593 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
594 if (b
& ((1 << len
) - 1))
598 tb_invalidate_phys_page_range(start
, start
+ len
);
602 /* invalidate all TBs which intersect with the target virtual page
603 starting in range [start;end[. This function is usually used when
604 the target processor flushes its I-cache. NOTE: start and end must
605 refer to the same physical page */
606 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
610 TranslationBlock
*tb
, *tb_next
;
612 target_ulong phys_start
;
614 #if !defined(CONFIG_USER_ONLY)
617 vp
= virt_page_find(start
>> TARGET_PAGE_BITS
);
620 if (vp
->valid_tag
!= virt_valid_tag
)
622 phys_start
= vp
->phys_addr
+ (start
& ~TARGET_PAGE_MASK
);
627 p
= page_find(phys_start
>> TARGET_PAGE_BITS
);
630 /* we remove all the TBs in the range [start, end[ */
631 /* XXX: see if in some cases it could be faster to invalidate all the code */
635 tb
= (TranslationBlock
*)((long)tb
& ~3);
636 tb_next
= tb
->page_next
[n
];
638 if (!((pc
+ tb
->size
) <= start
|| pc
>= end
)) {
639 tb_phys_invalidate(tb
, -1);
643 #if !defined(CONFIG_USER_ONLY)
644 /* if no code remaining, no need to continue to use slow writes */
646 tlb_unprotect_code(cpu_single_env
, start
);
650 #if !defined(CONFIG_SOFTMMU)
651 static void tb_invalidate_phys_page(target_ulong addr
)
655 TranslationBlock
*tb
;
657 addr
&= TARGET_PAGE_MASK
;
658 p
= page_find(addr
>> TARGET_PAGE_BITS
);
664 tb
= (TranslationBlock
*)((long)tb
& ~3);
665 tb_phys_invalidate(tb
, addr
);
666 tb
= tb
->page_next
[n
];
672 /* add the tb in the target page and protect it if necessary */
673 static inline void tb_alloc_page(TranslationBlock
*tb
,
674 unsigned int n
, unsigned int page_addr
)
677 TranslationBlock
*last_first_tb
;
679 tb
->page_addr
[n
] = page_addr
;
680 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
681 tb
->page_next
[n
] = p
->first_tb
;
682 last_first_tb
= p
->first_tb
;
683 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
684 invalidate_page_bitmap(p
);
686 #if defined(CONFIG_USER_ONLY)
687 if (p
->flags
& PAGE_WRITE
) {
688 unsigned long host_start
, host_end
, addr
;
691 /* force the host page as non writable (writes will have a
692 page fault + mprotect overhead) */
693 host_start
= page_addr
& host_page_mask
;
694 host_end
= host_start
+ host_page_size
;
696 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
697 prot
|= page_get_flags(addr
);
698 mprotect((void *)host_start
, host_page_size
,
699 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
700 #ifdef DEBUG_TB_INVALIDATE
701 printf("protecting code page: 0x%08lx\n",
704 p
->flags
&= ~PAGE_WRITE
;
707 /* if some code is already present, then the pages are already
708 protected. So we handle the case where only the first TB is
709 allocated in a physical page */
710 if (!last_first_tb
) {
711 target_ulong virt_addr
;
713 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
714 tlb_protect_code(cpu_single_env
, virt_addr
);
719 /* Allocate a new translation block. Flush the translation buffer if
720 too many translation blocks or too much generated code. */
721 TranslationBlock
*tb_alloc(unsigned long pc
)
723 TranslationBlock
*tb
;
725 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
726 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
733 /* add a new TB and link it to the physical page tables. phys_page2 is
734 (-1) to indicate that only one page contains the TB. */
735 void tb_link_phys(TranslationBlock
*tb
,
736 target_ulong phys_pc
, target_ulong phys_page2
)
739 TranslationBlock
**ptb
;
741 /* add in the physical hash table */
742 h
= tb_phys_hash_func(phys_pc
);
743 ptb
= &tb_phys_hash
[h
];
744 tb
->phys_hash_next
= *ptb
;
747 /* add in the page list */
748 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
749 if (phys_page2
!= -1)
750 tb_alloc_page(tb
, 1, phys_page2
);
752 tb
->page_addr
[1] = -1;
753 #ifdef DEBUG_TB_CHECK
758 /* link the tb with the other TBs */
759 void tb_link(TranslationBlock
*tb
)
761 #if !defined(CONFIG_USER_ONLY)
766 /* save the code memory mappings (needed to invalidate the code) */
767 addr
= tb
->pc
& TARGET_PAGE_MASK
;
768 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
769 vp
->phys_addr
= tb
->page_addr
[0];
770 vp
->valid_tag
= virt_valid_tag
;
772 if (tb
->page_addr
[1] != -1) {
773 addr
+= TARGET_PAGE_SIZE
;
774 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
775 vp
->phys_addr
= tb
->page_addr
[1];
776 vp
->valid_tag
= virt_valid_tag
;
781 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
782 tb
->jmp_next
[0] = NULL
;
783 tb
->jmp_next
[1] = NULL
;
785 /* init original jump addresses */
786 if (tb
->tb_next_offset
[0] != 0xffff)
787 tb_reset_jump(tb
, 0);
788 if (tb
->tb_next_offset
[1] != 0xffff)
789 tb_reset_jump(tb
, 1);
792 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
793 tb[1].tc_ptr. Return NULL if not found */
794 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
798 TranslationBlock
*tb
;
802 if (tc_ptr
< (unsigned long)code_gen_buffer
||
803 tc_ptr
>= (unsigned long)code_gen_ptr
)
805 /* binary search (cf Knuth) */
808 while (m_min
<= m_max
) {
809 m
= (m_min
+ m_max
) >> 1;
811 v
= (unsigned long)tb
->tc_ptr
;
814 else if (tc_ptr
< v
) {
823 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
825 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
827 TranslationBlock
*tb1
, *tb_next
, **ptb
;
830 tb1
= tb
->jmp_next
[n
];
832 /* find head of list */
835 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
838 tb1
= tb1
->jmp_next
[n1
];
840 /* we are now sure now that tb jumps to tb1 */
843 /* remove tb from the jmp_first list */
844 ptb
= &tb_next
->jmp_first
;
848 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
849 if (n1
== n
&& tb1
== tb
)
851 ptb
= &tb1
->jmp_next
[n1
];
853 *ptb
= tb
->jmp_next
[n
];
854 tb
->jmp_next
[n
] = NULL
;
856 /* suppress the jump to next tb in generated code */
857 tb_reset_jump(tb
, n
);
859 /* suppress jumps in the tb on which we could have jumped */
860 tb_reset_jump_recursive(tb_next
);
864 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
866 tb_reset_jump_recursive2(tb
, 0);
867 tb_reset_jump_recursive2(tb
, 1);
870 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
871 breakpoint is reached */
872 int cpu_breakpoint_insert(CPUState
*env
, uint32_t pc
)
874 #if defined(TARGET_I386)
877 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
878 if (env
->breakpoints
[i
] == pc
)
882 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
884 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
885 tb_invalidate_page_range(pc
, pc
+ 1);
892 /* remove a breakpoint */
893 int cpu_breakpoint_remove(CPUState
*env
, uint32_t pc
)
895 #if defined(TARGET_I386)
897 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
898 if (env
->breakpoints
[i
] == pc
)
903 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
904 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
905 env
->nb_breakpoints
--;
906 tb_invalidate_page_range(pc
, pc
+ 1);
913 /* enable or disable single step mode. EXCP_DEBUG is returned by the
914 CPU loop after each instruction */
915 void cpu_single_step(CPUState
*env
, int enabled
)
917 #if defined(TARGET_I386)
918 if (env
->singlestep_enabled
!= enabled
) {
919 env
->singlestep_enabled
= enabled
;
920 /* must flush all the translated code to avoid inconsistancies */
921 /* XXX: only flush what is necessary */
927 /* enable or disable low levels log */
928 void cpu_set_log(int log_flags
)
930 loglevel
= log_flags
;
931 if (loglevel
&& !logfile
) {
932 logfile
= fopen(logfilename
, "w");
937 #if !defined(CONFIG_SOFTMMU)
938 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
940 static uint8_t logfile_buf
[4096];
941 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
944 setvbuf(logfile
, NULL
, _IOLBF
, 0);
949 void cpu_set_log_filename(const char *filename
)
951 logfilename
= strdup(filename
);
954 /* mask must never be zero, except for A20 change call */
955 void cpu_interrupt(CPUState
*env
, int mask
)
957 TranslationBlock
*tb
;
959 env
->interrupt_request
|= mask
;
960 /* if the cpu is currently executing code, we must unlink it and
961 all the potentially executing TB */
962 tb
= env
->current_tb
;
964 tb_reset_jump_recursive(tb
);
969 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
974 fprintf(stderr
, "qemu: fatal: ");
975 vfprintf(stderr
, fmt
, ap
);
976 fprintf(stderr
, "\n");
978 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
984 #if !defined(CONFIG_USER_ONLY)
986 void tlb_flush(CPUState
*env
)
990 #if defined(DEBUG_TLB)
991 printf("tlb_flush:\n");
993 /* must reset current TB so that interrupts cannot modify the
994 links while we are modifying them */
995 env
->current_tb
= NULL
;
997 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
998 env
->tlb_read
[0][i
].address
= -1;
999 env
->tlb_write
[0][i
].address
= -1;
1000 env
->tlb_read
[1][i
].address
= -1;
1001 env
->tlb_write
[1][i
].address
= -1;
1005 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1008 #if !defined(CONFIG_SOFTMMU)
1009 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1013 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1015 if (addr
== (tlb_entry
->address
&
1016 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1017 tlb_entry
->address
= -1;
1020 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1025 TranslationBlock
*tb
;
1027 #if defined(DEBUG_TLB)
1028 printf("tlb_flush_page: 0x%08x\n", addr
);
1030 /* must reset current TB so that interrupts cannot modify the
1031 links while we are modifying them */
1032 env
->current_tb
= NULL
;
1034 addr
&= TARGET_PAGE_MASK
;
1035 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1036 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1037 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1038 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1039 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1041 /* remove from the virtual pc hash table all the TB at this
1044 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1045 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1046 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1048 /* we remove all the links to the TBs in this virtual page */
1050 while (tb
!= NULL
) {
1052 tb
= (TranslationBlock
*)((long)tb
& ~3);
1053 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1054 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1057 tb
= tb
->page_next
[n
];
1062 #if !defined(CONFIG_SOFTMMU)
1063 if (addr
< MMAP_AREA_END
)
1064 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1068 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1070 if (addr
== (tlb_entry
->address
&
1071 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1072 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
1073 tlb_entry
->address
|= IO_MEM_CODE
;
1074 tlb_entry
->addend
-= (unsigned long)phys_ram_base
;
1078 /* update the TLBs so that writes to code in the virtual page 'addr'
1080 static void tlb_protect_code(CPUState
*env
, uint32_t addr
)
1084 addr
&= TARGET_PAGE_MASK
;
1085 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1086 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1087 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1088 #if !defined(CONFIG_SOFTMMU)
1089 /* NOTE: as we generated the code for this page, it is already at
1091 if (addr
< MMAP_AREA_END
)
1092 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1096 static inline void tlb_unprotect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1098 if (addr
== (tlb_entry
->address
&
1099 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1100 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
) {
1101 tlb_entry
->address
&= TARGET_PAGE_MASK
;
1102 tlb_entry
->addend
+= (unsigned long)phys_ram_base
;
1106 /* update the TLB so that writes in virtual page 'addr' are no longer
1107 tested self modifying code */
1108 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
)
1112 addr
&= TARGET_PAGE_MASK
;
1113 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1114 tlb_unprotect_code1(&env
->tlb_write
[0][i
], addr
);
1115 tlb_unprotect_code1(&env
->tlb_write
[1][i
], addr
);
1118 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1121 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1122 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1123 tlb_entry
->address
&= TARGET_PAGE_MASK
;
1124 tlb_entry
->addend
+= (unsigned long)phys_ram_base
;
1128 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1129 tested self modifying code */
1130 /* XXX: find a way to improve it */
1131 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
)
1135 phys_addr
&= TARGET_PAGE_MASK
;
1136 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1137 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1138 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1139 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1142 /* add a new TLB entry. At most a single entry for a given virtual
1143 address is permitted. */
1144 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1145 int is_user
, int is_softmmu
)
1149 TranslationBlock
*first_tb
;
1151 target_ulong address
, addend
;
1154 p
= page_find(paddr
>> TARGET_PAGE_BITS
);
1156 pd
= IO_MEM_UNASSIGNED
;
1159 pd
= p
->phys_offset
;
1160 first_tb
= p
->first_tb
;
1162 #if defined(DEBUG_TLB)
1163 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1164 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1168 #if !defined(CONFIG_SOFTMMU)
1172 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1173 /* IO memory case */
1174 address
= vaddr
| pd
;
1177 /* standard memory */
1179 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1182 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1184 if (prot
& PROT_READ
) {
1185 env
->tlb_read
[is_user
][index
].address
= address
;
1186 env
->tlb_read
[is_user
][index
].addend
= addend
;
1188 env
->tlb_read
[is_user
][index
].address
= -1;
1189 env
->tlb_read
[is_user
][index
].addend
= -1;
1191 if (prot
& PROT_WRITE
) {
1192 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1193 /* ROM: access is ignored (same as unassigned) */
1194 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1195 env
->tlb_write
[is_user
][index
].addend
= addend
- (unsigned long)phys_ram_base
;
1196 } else if (first_tb
) {
1197 /* if code is present, we use a specific memory
1198 handler. It works only for physical memory access */
1199 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1200 env
->tlb_write
[is_user
][index
].addend
= addend
- (unsigned long)phys_ram_base
;
1202 env
->tlb_write
[is_user
][index
].address
= address
;
1203 env
->tlb_write
[is_user
][index
].addend
= addend
;
1206 env
->tlb_write
[is_user
][index
].address
= -1;
1207 env
->tlb_write
[is_user
][index
].addend
= -1;
1210 #if !defined(CONFIG_SOFTMMU)
1212 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1213 /* IO access: no mapping is done as it will be handled by the
1215 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1219 if (prot
& PROT_WRITE
) {
1220 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
|| first_tb
) {
1221 /* ROM: we do as if code was inside */
1222 /* if code is present, we only map as read only and save the
1226 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1229 vp
->valid_tag
= virt_valid_tag
;
1230 prot
&= ~PAGE_WRITE
;
1233 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1234 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1235 if (map_addr
== MAP_FAILED
) {
1236 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1245 /* called from signal handler: invalidate the code and unprotect the
1246 page. Return TRUE if the fault was succesfully handled. */
1247 int page_unprotect(unsigned long addr
)
1249 #if !defined(CONFIG_SOFTMMU)
1252 #if defined(DEBUG_TLB)
1253 printf("page_unprotect: addr=0x%08x\n", addr
);
1255 addr
&= TARGET_PAGE_MASK
;
1256 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1259 /* NOTE: in this case, validate_tag is _not_ tested as it
1260 validates only the code TLB */
1261 if (vp
->valid_tag
!= virt_valid_tag
)
1263 if (!(vp
->prot
& PAGE_WRITE
))
1265 #if defined(DEBUG_TLB)
1266 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1267 addr
, vp
->phys_addr
, vp
->prot
);
1269 tb_invalidate_phys_page(vp
->phys_addr
);
1270 mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
);
1279 void tlb_flush(CPUState
*env
)
1283 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1287 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
)
1291 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1292 int is_user
, int is_softmmu
)
1297 /* dump memory mappings */
1298 void page_dump(FILE *f
)
1300 unsigned long start
, end
;
1301 int i
, j
, prot
, prot1
;
1304 fprintf(f
, "%-8s %-8s %-8s %s\n",
1305 "start", "end", "size", "prot");
1309 for(i
= 0; i
<= L1_SIZE
; i
++) {
1314 for(j
= 0;j
< L2_SIZE
; j
++) {
1319 if (prot1
!= prot
) {
1320 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1322 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1323 start
, end
, end
- start
,
1324 prot
& PAGE_READ
? 'r' : '-',
1325 prot
& PAGE_WRITE
? 'w' : '-',
1326 prot
& PAGE_EXEC
? 'x' : '-');
1340 int page_get_flags(unsigned long address
)
1344 p
= page_find(address
>> TARGET_PAGE_BITS
);
1350 /* modify the flags of a page and invalidate the code if
1351 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1352 depending on PAGE_WRITE */
1353 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1358 start
= start
& TARGET_PAGE_MASK
;
1359 end
= TARGET_PAGE_ALIGN(end
);
1360 if (flags
& PAGE_WRITE
)
1361 flags
|= PAGE_WRITE_ORG
;
1362 spin_lock(&tb_lock
);
1363 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1364 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1365 /* if the write protection is set, then we invalidate the code
1367 if (!(p
->flags
& PAGE_WRITE
) &&
1368 (flags
& PAGE_WRITE
) &&
1370 tb_invalidate_phys_page(addr
);
1374 spin_unlock(&tb_lock
);
1377 /* called from signal handler: invalidate the code and unprotect the
1378 page. Return TRUE if the fault was succesfully handled. */
1379 int page_unprotect(unsigned long address
)
1381 unsigned int page_index
, prot
, pindex
;
1383 unsigned long host_start
, host_end
, addr
;
1385 host_start
= address
& host_page_mask
;
1386 page_index
= host_start
>> TARGET_PAGE_BITS
;
1387 p1
= page_find(page_index
);
1390 host_end
= host_start
+ host_page_size
;
1393 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1397 /* if the page was really writable, then we change its
1398 protection back to writable */
1399 if (prot
& PAGE_WRITE_ORG
) {
1400 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1401 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1402 mprotect((void *)host_start
, host_page_size
,
1403 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1404 p1
[pindex
].flags
|= PAGE_WRITE
;
1405 /* and since the content will be modified, we must invalidate
1406 the corresponding translated code. */
1407 tb_invalidate_phys_page(address
);
1408 #ifdef DEBUG_TB_CHECK
1409 tb_invalidate_check(address
);
1417 /* call this function when system calls directly modify a memory area */
1418 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1420 unsigned long start
, end
, addr
;
1422 start
= (unsigned long)data
;
1423 end
= start
+ data_size
;
1424 start
&= TARGET_PAGE_MASK
;
1425 end
= TARGET_PAGE_ALIGN(end
);
1426 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1427 page_unprotect(addr
);
1431 #endif /* defined(CONFIG_USER_ONLY) */
1433 /* register physical memory. 'size' must be a multiple of the target
1434 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1436 void cpu_register_physical_memory(unsigned long start_addr
, unsigned long size
,
1439 unsigned long addr
, end_addr
;
1442 end_addr
= start_addr
+ size
;
1443 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1444 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1445 p
->phys_offset
= phys_offset
;
1446 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1447 phys_offset
+= TARGET_PAGE_SIZE
;
1451 static uint32_t unassigned_mem_readb(uint32_t addr
)
1456 static void unassigned_mem_writeb(uint32_t addr
, uint32_t val
)
1460 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1461 unassigned_mem_readb
,
1462 unassigned_mem_readb
,
1463 unassigned_mem_readb
,
1466 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1467 unassigned_mem_writeb
,
1468 unassigned_mem_writeb
,
1469 unassigned_mem_writeb
,
1472 /* self modifying code support in soft mmu mode : writing to a page
1473 containing code comes to these functions */
1475 static void code_mem_writeb(uint32_t addr
, uint32_t val
)
1477 #if !defined(CONFIG_USER_ONLY)
1478 tb_invalidate_phys_page_fast(addr
, 1);
1480 stb_raw(phys_ram_base
+ addr
, val
);
1483 static void code_mem_writew(uint32_t addr
, uint32_t val
)
1485 #if !defined(CONFIG_USER_ONLY)
1486 tb_invalidate_phys_page_fast(addr
, 2);
1488 stw_raw(phys_ram_base
+ addr
, val
);
1491 static void code_mem_writel(uint32_t addr
, uint32_t val
)
1493 #if !defined(CONFIG_USER_ONLY)
1494 tb_invalidate_phys_page_fast(addr
, 4);
1496 stl_raw(phys_ram_base
+ addr
, val
);
1499 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1500 NULL
, /* never used */
1501 NULL
, /* never used */
1502 NULL
, /* never used */
1505 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1511 static void io_mem_init(void)
1513 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
);
1514 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
);
1515 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
);
1519 /* mem_read and mem_write are arrays of functions containing the
1520 function to access byte (index 0), word (index 1) and dword (index
1521 2). All functions must be supplied. If io_index is non zero, the
1522 corresponding io zone is modified. If it is zero, a new io zone is
1523 allocated. The return value can be used with
1524 cpu_register_physical_memory(). (-1) is returned if error. */
1525 int cpu_register_io_memory(int io_index
,
1526 CPUReadMemoryFunc
**mem_read
,
1527 CPUWriteMemoryFunc
**mem_write
)
1531 if (io_index
<= 0) {
1532 if (io_index
>= IO_MEM_NB_ENTRIES
)
1534 io_index
= io_mem_nb
++;
1536 if (io_index
>= IO_MEM_NB_ENTRIES
)
1540 for(i
= 0;i
< 3; i
++) {
1541 io_mem_read
[io_index
][i
] = mem_read
[i
];
1542 io_mem_write
[io_index
][i
] = mem_write
[i
];
1544 return io_index
<< IO_MEM_SHIFT
;
1547 #if !defined(CONFIG_USER_ONLY)
1549 #define MMUSUFFIX _cmmu
1550 #define GETPC() NULL
1551 #define env cpu_single_env
1554 #include "softmmu_template.h"
1557 #include "softmmu_template.h"
1560 #include "softmmu_template.h"
1563 #include "softmmu_template.h"