]> git.ipfire.org Git - thirdparty/qemu.git/blob - exec.c
new generic TLB support - faster self modifying code support - added ROM memory support
[thirdparty/qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <unistd.h>
26 #include <inttypes.h>
27 #include <sys/mman.h>
28
29 #include "config.h"
30 #include "cpu.h"
31 #include "exec-all.h"
32
33 //#define DEBUG_TB_INVALIDATE
34 //#define DEBUG_FLUSH
35 //#define DEBUG_TLB
36
37 /* make various TB consistency checks */
38 //#define DEBUG_TB_CHECK
39
40 /* threshold to flush the translated code buffer */
41 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
42
43 #define SMC_BITMAP_USE_THRESHOLD 10
44
45 #define MMAP_AREA_START 0x00000000
46 #define MMAP_AREA_END 0xa8000000
47
48 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
49 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
50 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
51 int nb_tbs;
52 /* any access to the tbs or the page table must use this lock */
53 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
54
55 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
56 uint8_t *code_gen_ptr;
57
58 int phys_ram_size;
59 int phys_ram_fd;
60 uint8_t *phys_ram_base;
61
62 typedef struct PageDesc {
63 /* offset in memory of the page + io_index in the low 12 bits */
64 unsigned long phys_offset;
65 /* list of TBs intersecting this physical page */
66 TranslationBlock *first_tb;
67 /* in order to optimize self modifying code, we count the number
68 of lookups we do to a given page to use a bitmap */
69 unsigned int code_write_count;
70 uint8_t *code_bitmap;
71 #if defined(CONFIG_USER_ONLY)
72 unsigned long flags;
73 #endif
74 } PageDesc;
75
76 typedef struct VirtPageDesc {
77 /* physical address of code page. It is valid only if 'valid_tag'
78 matches 'virt_valid_tag' */
79 target_ulong phys_addr;
80 unsigned int valid_tag;
81 #if !defined(CONFIG_SOFTMMU)
82 /* original page access rights. It is valid only if 'valid_tag'
83 matches 'virt_valid_tag' */
84 unsigned int prot;
85 #endif
86 } VirtPageDesc;
87
88 #define L2_BITS 10
89 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
90
91 #define L1_SIZE (1 << L1_BITS)
92 #define L2_SIZE (1 << L2_BITS)
93
94 static void io_mem_init(void);
95
96 unsigned long real_host_page_size;
97 unsigned long host_page_bits;
98 unsigned long host_page_size;
99 unsigned long host_page_mask;
100
101 static PageDesc *l1_map[L1_SIZE];
102
103 #if !defined(CONFIG_USER_ONLY)
104 static VirtPageDesc *l1_virt_map[L1_SIZE];
105 static unsigned int virt_valid_tag;
106 #endif
107
108 /* io memory support */
109 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
110 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
111 static int io_mem_nb;
112
113 /* log support */
114 char *logfilename = "/tmp/qemu.log";
115 FILE *logfile;
116 int loglevel;
117
118 static void page_init(void)
119 {
120 /* NOTE: we can always suppose that host_page_size >=
121 TARGET_PAGE_SIZE */
122 real_host_page_size = getpagesize();
123 if (host_page_size == 0)
124 host_page_size = real_host_page_size;
125 if (host_page_size < TARGET_PAGE_SIZE)
126 host_page_size = TARGET_PAGE_SIZE;
127 host_page_bits = 0;
128 while ((1 << host_page_bits) < host_page_size)
129 host_page_bits++;
130 host_page_mask = ~(host_page_size - 1);
131 #if !defined(CONFIG_USER_ONLY)
132 virt_valid_tag = 1;
133 #endif
134 }
135
136 static inline PageDesc *page_find_alloc(unsigned int index)
137 {
138 PageDesc **lp, *p;
139
140 lp = &l1_map[index >> L2_BITS];
141 p = *lp;
142 if (!p) {
143 /* allocate if not found */
144 p = malloc(sizeof(PageDesc) * L2_SIZE);
145 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
146 *lp = p;
147 }
148 return p + (index & (L2_SIZE - 1));
149 }
150
151 static inline PageDesc *page_find(unsigned int index)
152 {
153 PageDesc *p;
154
155 p = l1_map[index >> L2_BITS];
156 if (!p)
157 return 0;
158 return p + (index & (L2_SIZE - 1));
159 }
160
161 #if !defined(CONFIG_USER_ONLY)
162 static void tlb_protect_code(CPUState *env, uint32_t addr);
163 static void tlb_unprotect_code(CPUState *env, uint32_t addr);
164 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
165
166 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
167 {
168 VirtPageDesc **lp, *p;
169
170 lp = &l1_virt_map[index >> L2_BITS];
171 p = *lp;
172 if (!p) {
173 /* allocate if not found */
174 p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
175 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
176 *lp = p;
177 }
178 return p + (index & (L2_SIZE - 1));
179 }
180
181 static inline VirtPageDesc *virt_page_find(unsigned int index)
182 {
183 VirtPageDesc *p;
184
185 p = l1_virt_map[index >> L2_BITS];
186 if (!p)
187 return 0;
188 return p + (index & (L2_SIZE - 1));
189 }
190
191 static void virt_page_flush(void)
192 {
193 int i, j;
194 VirtPageDesc *p;
195
196 virt_valid_tag++;
197
198 if (virt_valid_tag == 0) {
199 virt_valid_tag = 1;
200 for(i = 0; i < L1_SIZE; i++) {
201 p = l1_virt_map[i];
202 if (p) {
203 for(j = 0; j < L2_SIZE; j++)
204 p[j].valid_tag = 0;
205 }
206 }
207 }
208 }
209 #else
210 static void virt_page_flush(void)
211 {
212 }
213 #endif
214
215 void cpu_exec_init(void)
216 {
217 if (!code_gen_ptr) {
218 code_gen_ptr = code_gen_buffer;
219 page_init();
220 io_mem_init();
221 }
222 }
223
224 static inline void invalidate_page_bitmap(PageDesc *p)
225 {
226 if (p->code_bitmap) {
227 free(p->code_bitmap);
228 p->code_bitmap = NULL;
229 }
230 p->code_write_count = 0;
231 }
232
233 /* set to NULL all the 'first_tb' fields in all PageDescs */
234 static void page_flush_tb(void)
235 {
236 int i, j;
237 PageDesc *p;
238
239 for(i = 0; i < L1_SIZE; i++) {
240 p = l1_map[i];
241 if (p) {
242 for(j = 0; j < L2_SIZE; j++) {
243 p->first_tb = NULL;
244 invalidate_page_bitmap(p);
245 p++;
246 }
247 }
248 }
249 }
250
251 /* flush all the translation blocks */
252 /* XXX: tb_flush is currently not thread safe */
253 void tb_flush(CPUState *env)
254 {
255 int i;
256 #if defined(DEBUG_FLUSH)
257 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
258 code_gen_ptr - code_gen_buffer,
259 nb_tbs,
260 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
261 #endif
262 /* must reset current TB so that interrupts cannot modify the
263 links while we are modifying them */
264 env->current_tb = NULL;
265
266 nb_tbs = 0;
267 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
268 tb_hash[i] = NULL;
269 virt_page_flush();
270
271 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
272 tb_phys_hash[i] = NULL;
273 page_flush_tb();
274
275 code_gen_ptr = code_gen_buffer;
276 /* XXX: flush processor icache at this point if cache flush is
277 expensive */
278 }
279
280 #ifdef DEBUG_TB_CHECK
281
282 static void tb_invalidate_check(unsigned long address)
283 {
284 TranslationBlock *tb;
285 int i;
286 address &= TARGET_PAGE_MASK;
287 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
288 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
289 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
290 address >= tb->pc + tb->size)) {
291 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
292 address, tb->pc, tb->size);
293 }
294 }
295 }
296 }
297
298 /* verify that all the pages have correct rights for code */
299 static void tb_page_check(void)
300 {
301 TranslationBlock *tb;
302 int i, flags1, flags2;
303
304 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
305 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
306 flags1 = page_get_flags(tb->pc);
307 flags2 = page_get_flags(tb->pc + tb->size - 1);
308 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
309 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
310 tb->pc, tb->size, flags1, flags2);
311 }
312 }
313 }
314 }
315
316 void tb_jmp_check(TranslationBlock *tb)
317 {
318 TranslationBlock *tb1;
319 unsigned int n1;
320
321 /* suppress any remaining jumps to this TB */
322 tb1 = tb->jmp_first;
323 for(;;) {
324 n1 = (long)tb1 & 3;
325 tb1 = (TranslationBlock *)((long)tb1 & ~3);
326 if (n1 == 2)
327 break;
328 tb1 = tb1->jmp_next[n1];
329 }
330 /* check end of list */
331 if (tb1 != tb) {
332 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
333 }
334 }
335
336 #endif
337
338 /* invalidate one TB */
339 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
340 int next_offset)
341 {
342 TranslationBlock *tb1;
343 for(;;) {
344 tb1 = *ptb;
345 if (tb1 == tb) {
346 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
347 break;
348 }
349 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
350 }
351 }
352
353 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
354 {
355 TranslationBlock *tb1;
356 unsigned int n1;
357
358 for(;;) {
359 tb1 = *ptb;
360 n1 = (long)tb1 & 3;
361 tb1 = (TranslationBlock *)((long)tb1 & ~3);
362 if (tb1 == tb) {
363 *ptb = tb1->page_next[n1];
364 break;
365 }
366 ptb = &tb1->page_next[n1];
367 }
368 }
369
370 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
371 {
372 TranslationBlock *tb1, **ptb;
373 unsigned int n1;
374
375 ptb = &tb->jmp_next[n];
376 tb1 = *ptb;
377 if (tb1) {
378 /* find tb(n) in circular list */
379 for(;;) {
380 tb1 = *ptb;
381 n1 = (long)tb1 & 3;
382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
383 if (n1 == n && tb1 == tb)
384 break;
385 if (n1 == 2) {
386 ptb = &tb1->jmp_first;
387 } else {
388 ptb = &tb1->jmp_next[n1];
389 }
390 }
391 /* now we can suppress tb(n) from the list */
392 *ptb = tb->jmp_next[n];
393
394 tb->jmp_next[n] = NULL;
395 }
396 }
397
398 /* reset the jump entry 'n' of a TB so that it is not chained to
399 another TB */
400 static inline void tb_reset_jump(TranslationBlock *tb, int n)
401 {
402 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
403 }
404
405 static inline void tb_invalidate(TranslationBlock *tb)
406 {
407 unsigned int h, n1;
408 TranslationBlock *tb1, *tb2, **ptb;
409
410 tb_invalidated_flag = 1;
411
412 /* remove the TB from the hash list */
413 h = tb_hash_func(tb->pc);
414 ptb = &tb_hash[h];
415 for(;;) {
416 tb1 = *ptb;
417 /* NOTE: the TB is not necessarily linked in the hash. It
418 indicates that it is not currently used */
419 if (tb1 == NULL)
420 return;
421 if (tb1 == tb) {
422 *ptb = tb1->hash_next;
423 break;
424 }
425 ptb = &tb1->hash_next;
426 }
427
428 /* suppress this TB from the two jump lists */
429 tb_jmp_remove(tb, 0);
430 tb_jmp_remove(tb, 1);
431
432 /* suppress any remaining jumps to this TB */
433 tb1 = tb->jmp_first;
434 for(;;) {
435 n1 = (long)tb1 & 3;
436 if (n1 == 2)
437 break;
438 tb1 = (TranslationBlock *)((long)tb1 & ~3);
439 tb2 = tb1->jmp_next[n1];
440 tb_reset_jump(tb1, n1);
441 tb1->jmp_next[n1] = NULL;
442 tb1 = tb2;
443 }
444 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
445 }
446
447 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
448 {
449 PageDesc *p;
450 unsigned int h;
451 target_ulong phys_pc;
452
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
458
459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
464 }
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
469 }
470
471 tb_invalidate(tb);
472 }
473
474 static inline void set_bits(uint8_t *tab, int start, int len)
475 {
476 int end, mask, end1;
477
478 end = start + len;
479 tab += start >> 3;
480 mask = 0xff << (start & 7);
481 if ((start & ~7) == (end & ~7)) {
482 if (start < end) {
483 mask &= ~(0xff << (end & 7));
484 *tab |= mask;
485 }
486 } else {
487 *tab++ |= mask;
488 start = (start + 8) & ~7;
489 end1 = end & ~7;
490 while (start < end1) {
491 *tab++ = 0xff;
492 start += 8;
493 }
494 if (start < end) {
495 mask = ~(0xff << (end & 7));
496 *tab |= mask;
497 }
498 }
499 }
500
501 static void build_page_bitmap(PageDesc *p)
502 {
503 int n, tb_start, tb_end;
504 TranslationBlock *tb;
505
506 p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
507 if (!p->code_bitmap)
508 return;
509 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
510
511 tb = p->first_tb;
512 while (tb != NULL) {
513 n = (long)tb & 3;
514 tb = (TranslationBlock *)((long)tb & ~3);
515 /* NOTE: this is subtle as a TB may span two physical pages */
516 if (n == 0) {
517 /* NOTE: tb_end may be after the end of the page, but
518 it is not a problem */
519 tb_start = tb->pc & ~TARGET_PAGE_MASK;
520 tb_end = tb_start + tb->size;
521 if (tb_end > TARGET_PAGE_SIZE)
522 tb_end = TARGET_PAGE_SIZE;
523 } else {
524 tb_start = 0;
525 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
526 }
527 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
528 tb = tb->page_next[n];
529 }
530 }
531
532 /* invalidate all TBs which intersect with the target physical page
533 starting in range [start;end[. NOTE: start and end must refer to
534 the same physical page */
535 static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
536 {
537 int n;
538 PageDesc *p;
539 TranslationBlock *tb, *tb_next;
540 target_ulong tb_start, tb_end;
541
542 p = page_find(start >> TARGET_PAGE_BITS);
543 if (!p)
544 return;
545 if (!p->code_bitmap &&
546 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
547 /* build code bitmap */
548 build_page_bitmap(p);
549 }
550
551 /* we remove all the TBs in the range [start, end[ */
552 /* XXX: see if in some cases it could be faster to invalidate all the code */
553 tb = p->first_tb;
554 while (tb != NULL) {
555 n = (long)tb & 3;
556 tb = (TranslationBlock *)((long)tb & ~3);
557 tb_next = tb->page_next[n];
558 /* NOTE: this is subtle as a TB may span two physical pages */
559 if (n == 0) {
560 /* NOTE: tb_end may be after the end of the page, but
561 it is not a problem */
562 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
563 tb_end = tb_start + tb->size;
564 } else {
565 tb_start = tb->page_addr[1];
566 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
567 }
568 if (!(tb_end <= start || tb_start >= end)) {
569 tb_phys_invalidate(tb, -1);
570 }
571 tb = tb_next;
572 }
573 #if !defined(CONFIG_USER_ONLY)
574 /* if no code remaining, no need to continue to use slow writes */
575 if (!p->first_tb) {
576 invalidate_page_bitmap(p);
577 tlb_unprotect_code_phys(cpu_single_env, start);
578 }
579 #endif
580 }
581
582 /* len must be <= 8 and start must be a multiple of len */
583 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
584 {
585 PageDesc *p;
586 int offset, b;
587
588 p = page_find(start >> TARGET_PAGE_BITS);
589 if (!p)
590 return;
591 if (p->code_bitmap) {
592 offset = start & ~TARGET_PAGE_MASK;
593 b = p->code_bitmap[offset >> 3] >> (offset & 7);
594 if (b & ((1 << len) - 1))
595 goto do_invalidate;
596 } else {
597 do_invalidate:
598 tb_invalidate_phys_page_range(start, start + len);
599 }
600 }
601
602 /* invalidate all TBs which intersect with the target virtual page
603 starting in range [start;end[. This function is usually used when
604 the target processor flushes its I-cache. NOTE: start and end must
605 refer to the same physical page */
606 void tb_invalidate_page_range(target_ulong start, target_ulong end)
607 {
608 int n;
609 PageDesc *p;
610 TranslationBlock *tb, *tb_next;
611 target_ulong pc;
612 target_ulong phys_start;
613
614 #if !defined(CONFIG_USER_ONLY)
615 {
616 VirtPageDesc *vp;
617 vp = virt_page_find(start >> TARGET_PAGE_BITS);
618 if (!vp)
619 return;
620 if (vp->valid_tag != virt_valid_tag)
621 return;
622 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
623 }
624 #else
625 phys_start = start;
626 #endif
627 p = page_find(phys_start >> TARGET_PAGE_BITS);
628 if (!p)
629 return;
630 /* we remove all the TBs in the range [start, end[ */
631 /* XXX: see if in some cases it could be faster to invalidate all the code */
632 tb = p->first_tb;
633 while (tb != NULL) {
634 n = (long)tb & 3;
635 tb = (TranslationBlock *)((long)tb & ~3);
636 tb_next = tb->page_next[n];
637 pc = tb->pc;
638 if (!((pc + tb->size) <= start || pc >= end)) {
639 tb_phys_invalidate(tb, -1);
640 }
641 tb = tb_next;
642 }
643 #if !defined(CONFIG_USER_ONLY)
644 /* if no code remaining, no need to continue to use slow writes */
645 if (!p->first_tb)
646 tlb_unprotect_code(cpu_single_env, start);
647 #endif
648 }
649
650 #if !defined(CONFIG_SOFTMMU)
651 static void tb_invalidate_phys_page(target_ulong addr)
652 {
653 int n;
654 PageDesc *p;
655 TranslationBlock *tb;
656
657 addr &= TARGET_PAGE_MASK;
658 p = page_find(addr >> TARGET_PAGE_BITS);
659 if (!p)
660 return;
661 tb = p->first_tb;
662 while (tb != NULL) {
663 n = (long)tb & 3;
664 tb = (TranslationBlock *)((long)tb & ~3);
665 tb_phys_invalidate(tb, addr);
666 tb = tb->page_next[n];
667 }
668 p->first_tb = NULL;
669 }
670 #endif
671
672 /* add the tb in the target page and protect it if necessary */
673 static inline void tb_alloc_page(TranslationBlock *tb,
674 unsigned int n, unsigned int page_addr)
675 {
676 PageDesc *p;
677 TranslationBlock *last_first_tb;
678
679 tb->page_addr[n] = page_addr;
680 p = page_find(page_addr >> TARGET_PAGE_BITS);
681 tb->page_next[n] = p->first_tb;
682 last_first_tb = p->first_tb;
683 p->first_tb = (TranslationBlock *)((long)tb | n);
684 invalidate_page_bitmap(p);
685
686 #if defined(CONFIG_USER_ONLY)
687 if (p->flags & PAGE_WRITE) {
688 unsigned long host_start, host_end, addr;
689 int prot;
690
691 /* force the host page as non writable (writes will have a
692 page fault + mprotect overhead) */
693 host_start = page_addr & host_page_mask;
694 host_end = host_start + host_page_size;
695 prot = 0;
696 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
697 prot |= page_get_flags(addr);
698 mprotect((void *)host_start, host_page_size,
699 (prot & PAGE_BITS) & ~PAGE_WRITE);
700 #ifdef DEBUG_TB_INVALIDATE
701 printf("protecting code page: 0x%08lx\n",
702 host_start);
703 #endif
704 p->flags &= ~PAGE_WRITE;
705 }
706 #else
707 /* if some code is already present, then the pages are already
708 protected. So we handle the case where only the first TB is
709 allocated in a physical page */
710 if (!last_first_tb) {
711 target_ulong virt_addr;
712
713 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
714 tlb_protect_code(cpu_single_env, virt_addr);
715 }
716 #endif
717 }
718
719 /* Allocate a new translation block. Flush the translation buffer if
720 too many translation blocks or too much generated code. */
721 TranslationBlock *tb_alloc(unsigned long pc)
722 {
723 TranslationBlock *tb;
724
725 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
726 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
727 return NULL;
728 tb = &tbs[nb_tbs++];
729 tb->pc = pc;
730 return tb;
731 }
732
733 /* add a new TB and link it to the physical page tables. phys_page2 is
734 (-1) to indicate that only one page contains the TB. */
735 void tb_link_phys(TranslationBlock *tb,
736 target_ulong phys_pc, target_ulong phys_page2)
737 {
738 unsigned int h;
739 TranslationBlock **ptb;
740
741 /* add in the physical hash table */
742 h = tb_phys_hash_func(phys_pc);
743 ptb = &tb_phys_hash[h];
744 tb->phys_hash_next = *ptb;
745 *ptb = tb;
746
747 /* add in the page list */
748 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
749 if (phys_page2 != -1)
750 tb_alloc_page(tb, 1, phys_page2);
751 else
752 tb->page_addr[1] = -1;
753 #ifdef DEBUG_TB_CHECK
754 tb_page_check();
755 #endif
756 }
757
758 /* link the tb with the other TBs */
759 void tb_link(TranslationBlock *tb)
760 {
761 #if !defined(CONFIG_USER_ONLY)
762 {
763 VirtPageDesc *vp;
764 target_ulong addr;
765
766 /* save the code memory mappings (needed to invalidate the code) */
767 addr = tb->pc & TARGET_PAGE_MASK;
768 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
769 vp->phys_addr = tb->page_addr[0];
770 vp->valid_tag = virt_valid_tag;
771
772 if (tb->page_addr[1] != -1) {
773 addr += TARGET_PAGE_SIZE;
774 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
775 vp->phys_addr = tb->page_addr[1];
776 vp->valid_tag = virt_valid_tag;
777 }
778 }
779 #endif
780
781 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
782 tb->jmp_next[0] = NULL;
783 tb->jmp_next[1] = NULL;
784
785 /* init original jump addresses */
786 if (tb->tb_next_offset[0] != 0xffff)
787 tb_reset_jump(tb, 0);
788 if (tb->tb_next_offset[1] != 0xffff)
789 tb_reset_jump(tb, 1);
790 }
791
792 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
793 tb[1].tc_ptr. Return NULL if not found */
794 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
795 {
796 int m_min, m_max, m;
797 unsigned long v;
798 TranslationBlock *tb;
799
800 if (nb_tbs <= 0)
801 return NULL;
802 if (tc_ptr < (unsigned long)code_gen_buffer ||
803 tc_ptr >= (unsigned long)code_gen_ptr)
804 return NULL;
805 /* binary search (cf Knuth) */
806 m_min = 0;
807 m_max = nb_tbs - 1;
808 while (m_min <= m_max) {
809 m = (m_min + m_max) >> 1;
810 tb = &tbs[m];
811 v = (unsigned long)tb->tc_ptr;
812 if (v == tc_ptr)
813 return tb;
814 else if (tc_ptr < v) {
815 m_max = m - 1;
816 } else {
817 m_min = m + 1;
818 }
819 }
820 return &tbs[m_max];
821 }
822
823 static void tb_reset_jump_recursive(TranslationBlock *tb);
824
825 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
826 {
827 TranslationBlock *tb1, *tb_next, **ptb;
828 unsigned int n1;
829
830 tb1 = tb->jmp_next[n];
831 if (tb1 != NULL) {
832 /* find head of list */
833 for(;;) {
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == 2)
837 break;
838 tb1 = tb1->jmp_next[n1];
839 }
840 /* we are now sure now that tb jumps to tb1 */
841 tb_next = tb1;
842
843 /* remove tb from the jmp_first list */
844 ptb = &tb_next->jmp_first;
845 for(;;) {
846 tb1 = *ptb;
847 n1 = (long)tb1 & 3;
848 tb1 = (TranslationBlock *)((long)tb1 & ~3);
849 if (n1 == n && tb1 == tb)
850 break;
851 ptb = &tb1->jmp_next[n1];
852 }
853 *ptb = tb->jmp_next[n];
854 tb->jmp_next[n] = NULL;
855
856 /* suppress the jump to next tb in generated code */
857 tb_reset_jump(tb, n);
858
859 /* suppress jumps in the tb on which we could have jumped */
860 tb_reset_jump_recursive(tb_next);
861 }
862 }
863
864 static void tb_reset_jump_recursive(TranslationBlock *tb)
865 {
866 tb_reset_jump_recursive2(tb, 0);
867 tb_reset_jump_recursive2(tb, 1);
868 }
869
870 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
871 breakpoint is reached */
872 int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
873 {
874 #if defined(TARGET_I386)
875 int i;
876
877 for(i = 0; i < env->nb_breakpoints; i++) {
878 if (env->breakpoints[i] == pc)
879 return 0;
880 }
881
882 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
883 return -1;
884 env->breakpoints[env->nb_breakpoints++] = pc;
885 tb_invalidate_page_range(pc, pc + 1);
886 return 0;
887 #else
888 return -1;
889 #endif
890 }
891
892 /* remove a breakpoint */
893 int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
894 {
895 #if defined(TARGET_I386)
896 int i;
897 for(i = 0; i < env->nb_breakpoints; i++) {
898 if (env->breakpoints[i] == pc)
899 goto found;
900 }
901 return -1;
902 found:
903 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
904 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
905 env->nb_breakpoints--;
906 tb_invalidate_page_range(pc, pc + 1);
907 return 0;
908 #else
909 return -1;
910 #endif
911 }
912
913 /* enable or disable single step mode. EXCP_DEBUG is returned by the
914 CPU loop after each instruction */
915 void cpu_single_step(CPUState *env, int enabled)
916 {
917 #if defined(TARGET_I386)
918 if (env->singlestep_enabled != enabled) {
919 env->singlestep_enabled = enabled;
920 /* must flush all the translated code to avoid inconsistancies */
921 /* XXX: only flush what is necessary */
922 tb_flush(env);
923 }
924 #endif
925 }
926
927 /* enable or disable low levels log */
928 void cpu_set_log(int log_flags)
929 {
930 loglevel = log_flags;
931 if (loglevel && !logfile) {
932 logfile = fopen(logfilename, "w");
933 if (!logfile) {
934 perror(logfilename);
935 _exit(1);
936 }
937 #if !defined(CONFIG_SOFTMMU)
938 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
939 {
940 static uint8_t logfile_buf[4096];
941 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
942 }
943 #else
944 setvbuf(logfile, NULL, _IOLBF, 0);
945 #endif
946 }
947 }
948
949 void cpu_set_log_filename(const char *filename)
950 {
951 logfilename = strdup(filename);
952 }
953
954 /* mask must never be zero, except for A20 change call */
955 void cpu_interrupt(CPUState *env, int mask)
956 {
957 TranslationBlock *tb;
958
959 env->interrupt_request |= mask;
960 /* if the cpu is currently executing code, we must unlink it and
961 all the potentially executing TB */
962 tb = env->current_tb;
963 if (tb) {
964 tb_reset_jump_recursive(tb);
965 }
966 }
967
968
969 void cpu_abort(CPUState *env, const char *fmt, ...)
970 {
971 va_list ap;
972
973 va_start(ap, fmt);
974 fprintf(stderr, "qemu: fatal: ");
975 vfprintf(stderr, fmt, ap);
976 fprintf(stderr, "\n");
977 #ifdef TARGET_I386
978 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
979 #endif
980 va_end(ap);
981 abort();
982 }
983
984 #if !defined(CONFIG_USER_ONLY)
985
986 void tlb_flush(CPUState *env)
987 {
988 int i;
989
990 #if defined(DEBUG_TLB)
991 printf("tlb_flush:\n");
992 #endif
993 /* must reset current TB so that interrupts cannot modify the
994 links while we are modifying them */
995 env->current_tb = NULL;
996
997 for(i = 0; i < CPU_TLB_SIZE; i++) {
998 env->tlb_read[0][i].address = -1;
999 env->tlb_write[0][i].address = -1;
1000 env->tlb_read[1][i].address = -1;
1001 env->tlb_write[1][i].address = -1;
1002 }
1003
1004 virt_page_flush();
1005 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1006 tb_hash[i] = NULL;
1007
1008 #if !defined(CONFIG_SOFTMMU)
1009 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1010 #endif
1011 }
1012
1013 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1014 {
1015 if (addr == (tlb_entry->address &
1016 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1017 tlb_entry->address = -1;
1018 }
1019
1020 void tlb_flush_page(CPUState *env, uint32_t addr)
1021 {
1022 int i, n;
1023 VirtPageDesc *vp;
1024 PageDesc *p;
1025 TranslationBlock *tb;
1026
1027 #if defined(DEBUG_TLB)
1028 printf("tlb_flush_page: 0x%08x\n", addr);
1029 #endif
1030 /* must reset current TB so that interrupts cannot modify the
1031 links while we are modifying them */
1032 env->current_tb = NULL;
1033
1034 addr &= TARGET_PAGE_MASK;
1035 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1036 tlb_flush_entry(&env->tlb_read[0][i], addr);
1037 tlb_flush_entry(&env->tlb_write[0][i], addr);
1038 tlb_flush_entry(&env->tlb_read[1][i], addr);
1039 tlb_flush_entry(&env->tlb_write[1][i], addr);
1040
1041 /* remove from the virtual pc hash table all the TB at this
1042 virtual address */
1043
1044 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1045 if (vp && vp->valid_tag == virt_valid_tag) {
1046 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1047 if (p) {
1048 /* we remove all the links to the TBs in this virtual page */
1049 tb = p->first_tb;
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
1053 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1054 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1055 tb_invalidate(tb);
1056 }
1057 tb = tb->page_next[n];
1058 }
1059 }
1060 }
1061
1062 #if !defined(CONFIG_SOFTMMU)
1063 if (addr < MMAP_AREA_END)
1064 munmap((void *)addr, TARGET_PAGE_SIZE);
1065 #endif
1066 }
1067
1068 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1069 {
1070 if (addr == (tlb_entry->address &
1071 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1072 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1073 tlb_entry->address |= IO_MEM_CODE;
1074 tlb_entry->addend -= (unsigned long)phys_ram_base;
1075 }
1076 }
1077
1078 /* update the TLBs so that writes to code in the virtual page 'addr'
1079 can be detected */
1080 static void tlb_protect_code(CPUState *env, uint32_t addr)
1081 {
1082 int i;
1083
1084 addr &= TARGET_PAGE_MASK;
1085 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1086 tlb_protect_code1(&env->tlb_write[0][i], addr);
1087 tlb_protect_code1(&env->tlb_write[1][i], addr);
1088 #if !defined(CONFIG_SOFTMMU)
1089 /* NOTE: as we generated the code for this page, it is already at
1090 least readable */
1091 if (addr < MMAP_AREA_END)
1092 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1093 #endif
1094 }
1095
1096 static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1097 {
1098 if (addr == (tlb_entry->address &
1099 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1100 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1101 tlb_entry->address &= TARGET_PAGE_MASK;
1102 tlb_entry->addend += (unsigned long)phys_ram_base;
1103 }
1104 }
1105
1106 /* update the TLB so that writes in virtual page 'addr' are no longer
1107 tested self modifying code */
1108 static void tlb_unprotect_code(CPUState *env, uint32_t addr)
1109 {
1110 int i;
1111
1112 addr &= TARGET_PAGE_MASK;
1113 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1114 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1115 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1116 }
1117
1118 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1119 uint32_t phys_addr)
1120 {
1121 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1122 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1123 tlb_entry->address &= TARGET_PAGE_MASK;
1124 tlb_entry->addend += (unsigned long)phys_ram_base;
1125 }
1126 }
1127
1128 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1129 tested self modifying code */
1130 /* XXX: find a way to improve it */
1131 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
1132 {
1133 int i;
1134
1135 phys_addr &= TARGET_PAGE_MASK;
1136 for(i = 0; i < CPU_TLB_SIZE; i++)
1137 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1138 for(i = 0; i < CPU_TLB_SIZE; i++)
1139 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1140 }
1141
1142 /* add a new TLB entry. At most a single entry for a given virtual
1143 address is permitted. */
1144 int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1145 int is_user, int is_softmmu)
1146 {
1147 PageDesc *p;
1148 target_ulong pd;
1149 TranslationBlock *first_tb;
1150 unsigned int index;
1151 target_ulong address, addend;
1152 int ret;
1153
1154 p = page_find(paddr >> TARGET_PAGE_BITS);
1155 if (!p) {
1156 pd = IO_MEM_UNASSIGNED;
1157 first_tb = NULL;
1158 } else {
1159 pd = p->phys_offset;
1160 first_tb = p->first_tb;
1161 }
1162 #if defined(DEBUG_TLB)
1163 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1164 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1165 #endif
1166
1167 ret = 0;
1168 #if !defined(CONFIG_SOFTMMU)
1169 if (is_softmmu)
1170 #endif
1171 {
1172 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1173 /* IO memory case */
1174 address = vaddr | pd;
1175 addend = paddr;
1176 } else {
1177 /* standard memory */
1178 address = vaddr;
1179 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1180 }
1181
1182 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1183 addend -= vaddr;
1184 if (prot & PROT_READ) {
1185 env->tlb_read[is_user][index].address = address;
1186 env->tlb_read[is_user][index].addend = addend;
1187 } else {
1188 env->tlb_read[is_user][index].address = -1;
1189 env->tlb_read[is_user][index].addend = -1;
1190 }
1191 if (prot & PROT_WRITE) {
1192 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1193 /* ROM: access is ignored (same as unassigned) */
1194 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1195 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1196 } else if (first_tb) {
1197 /* if code is present, we use a specific memory
1198 handler. It works only for physical memory access */
1199 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1200 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1201 } else {
1202 env->tlb_write[is_user][index].address = address;
1203 env->tlb_write[is_user][index].addend = addend;
1204 }
1205 } else {
1206 env->tlb_write[is_user][index].address = -1;
1207 env->tlb_write[is_user][index].addend = -1;
1208 }
1209 }
1210 #if !defined(CONFIG_SOFTMMU)
1211 else {
1212 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1213 /* IO access: no mapping is done as it will be handled by the
1214 soft MMU */
1215 if (!(env->hflags & HF_SOFTMMU_MASK))
1216 ret = 2;
1217 } else {
1218 void *map_addr;
1219 if (prot & PROT_WRITE) {
1220 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
1221 /* ROM: we do as if code was inside */
1222 /* if code is present, we only map as read only and save the
1223 original mapping */
1224 VirtPageDesc *vp;
1225
1226 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1227 vp->phys_addr = pd;
1228 vp->prot = prot;
1229 vp->valid_tag = virt_valid_tag;
1230 prot &= ~PAGE_WRITE;
1231 }
1232 }
1233 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1234 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1235 if (map_addr == MAP_FAILED) {
1236 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1237 paddr, vaddr);
1238 }
1239 }
1240 }
1241 #endif
1242 return ret;
1243 }
1244
1245 /* called from signal handler: invalidate the code and unprotect the
1246 page. Return TRUE if the fault was succesfully handled. */
1247 int page_unprotect(unsigned long addr)
1248 {
1249 #if !defined(CONFIG_SOFTMMU)
1250 VirtPageDesc *vp;
1251
1252 #if defined(DEBUG_TLB)
1253 printf("page_unprotect: addr=0x%08x\n", addr);
1254 #endif
1255 addr &= TARGET_PAGE_MASK;
1256 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1257 if (!vp)
1258 return 0;
1259 /* NOTE: in this case, validate_tag is _not_ tested as it
1260 validates only the code TLB */
1261 if (vp->valid_tag != virt_valid_tag)
1262 return 0;
1263 if (!(vp->prot & PAGE_WRITE))
1264 return 0;
1265 #if defined(DEBUG_TLB)
1266 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1267 addr, vp->phys_addr, vp->prot);
1268 #endif
1269 tb_invalidate_phys_page(vp->phys_addr);
1270 mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
1271 return 1;
1272 #else
1273 return 0;
1274 #endif
1275 }
1276
1277 #else
1278
1279 void tlb_flush(CPUState *env)
1280 {
1281 }
1282
1283 void tlb_flush_page(CPUState *env, uint32_t addr)
1284 {
1285 }
1286
1287 void tlb_flush_page_write(CPUState *env, uint32_t addr)
1288 {
1289 }
1290
1291 int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1292 int is_user, int is_softmmu)
1293 {
1294 return 0;
1295 }
1296
1297 /* dump memory mappings */
1298 void page_dump(FILE *f)
1299 {
1300 unsigned long start, end;
1301 int i, j, prot, prot1;
1302 PageDesc *p;
1303
1304 fprintf(f, "%-8s %-8s %-8s %s\n",
1305 "start", "end", "size", "prot");
1306 start = -1;
1307 end = -1;
1308 prot = 0;
1309 for(i = 0; i <= L1_SIZE; i++) {
1310 if (i < L1_SIZE)
1311 p = l1_map[i];
1312 else
1313 p = NULL;
1314 for(j = 0;j < L2_SIZE; j++) {
1315 if (!p)
1316 prot1 = 0;
1317 else
1318 prot1 = p[j].flags;
1319 if (prot1 != prot) {
1320 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1321 if (start != -1) {
1322 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1323 start, end, end - start,
1324 prot & PAGE_READ ? 'r' : '-',
1325 prot & PAGE_WRITE ? 'w' : '-',
1326 prot & PAGE_EXEC ? 'x' : '-');
1327 }
1328 if (prot1 != 0)
1329 start = end;
1330 else
1331 start = -1;
1332 prot = prot1;
1333 }
1334 if (!p)
1335 break;
1336 }
1337 }
1338 }
1339
1340 int page_get_flags(unsigned long address)
1341 {
1342 PageDesc *p;
1343
1344 p = page_find(address >> TARGET_PAGE_BITS);
1345 if (!p)
1346 return 0;
1347 return p->flags;
1348 }
1349
1350 /* modify the flags of a page and invalidate the code if
1351 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1352 depending on PAGE_WRITE */
1353 void page_set_flags(unsigned long start, unsigned long end, int flags)
1354 {
1355 PageDesc *p;
1356 unsigned long addr;
1357
1358 start = start & TARGET_PAGE_MASK;
1359 end = TARGET_PAGE_ALIGN(end);
1360 if (flags & PAGE_WRITE)
1361 flags |= PAGE_WRITE_ORG;
1362 spin_lock(&tb_lock);
1363 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1364 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1365 /* if the write protection is set, then we invalidate the code
1366 inside */
1367 if (!(p->flags & PAGE_WRITE) &&
1368 (flags & PAGE_WRITE) &&
1369 p->first_tb) {
1370 tb_invalidate_phys_page(addr);
1371 }
1372 p->flags = flags;
1373 }
1374 spin_unlock(&tb_lock);
1375 }
1376
1377 /* called from signal handler: invalidate the code and unprotect the
1378 page. Return TRUE if the fault was succesfully handled. */
1379 int page_unprotect(unsigned long address)
1380 {
1381 unsigned int page_index, prot, pindex;
1382 PageDesc *p, *p1;
1383 unsigned long host_start, host_end, addr;
1384
1385 host_start = address & host_page_mask;
1386 page_index = host_start >> TARGET_PAGE_BITS;
1387 p1 = page_find(page_index);
1388 if (!p1)
1389 return 0;
1390 host_end = host_start + host_page_size;
1391 p = p1;
1392 prot = 0;
1393 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1394 prot |= p->flags;
1395 p++;
1396 }
1397 /* if the page was really writable, then we change its
1398 protection back to writable */
1399 if (prot & PAGE_WRITE_ORG) {
1400 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1401 if (!(p1[pindex].flags & PAGE_WRITE)) {
1402 mprotect((void *)host_start, host_page_size,
1403 (prot & PAGE_BITS) | PAGE_WRITE);
1404 p1[pindex].flags |= PAGE_WRITE;
1405 /* and since the content will be modified, we must invalidate
1406 the corresponding translated code. */
1407 tb_invalidate_phys_page(address);
1408 #ifdef DEBUG_TB_CHECK
1409 tb_invalidate_check(address);
1410 #endif
1411 return 1;
1412 }
1413 }
1414 return 0;
1415 }
1416
1417 /* call this function when system calls directly modify a memory area */
1418 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1419 {
1420 unsigned long start, end, addr;
1421
1422 start = (unsigned long)data;
1423 end = start + data_size;
1424 start &= TARGET_PAGE_MASK;
1425 end = TARGET_PAGE_ALIGN(end);
1426 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1427 page_unprotect(addr);
1428 }
1429 }
1430
1431 #endif /* defined(CONFIG_USER_ONLY) */
1432
1433 /* register physical memory. 'size' must be a multiple of the target
1434 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1435 io memory page */
1436 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1437 long phys_offset)
1438 {
1439 unsigned long addr, end_addr;
1440 PageDesc *p;
1441
1442 end_addr = start_addr + size;
1443 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1444 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1445 p->phys_offset = phys_offset;
1446 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1447 phys_offset += TARGET_PAGE_SIZE;
1448 }
1449 }
1450
1451 static uint32_t unassigned_mem_readb(uint32_t addr)
1452 {
1453 return 0;
1454 }
1455
1456 static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
1457 {
1458 }
1459
1460 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1461 unassigned_mem_readb,
1462 unassigned_mem_readb,
1463 unassigned_mem_readb,
1464 };
1465
1466 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1467 unassigned_mem_writeb,
1468 unassigned_mem_writeb,
1469 unassigned_mem_writeb,
1470 };
1471
1472 /* self modifying code support in soft mmu mode : writing to a page
1473 containing code comes to these functions */
1474
1475 static void code_mem_writeb(uint32_t addr, uint32_t val)
1476 {
1477 #if !defined(CONFIG_USER_ONLY)
1478 tb_invalidate_phys_page_fast(addr, 1);
1479 #endif
1480 stb_raw(phys_ram_base + addr, val);
1481 }
1482
1483 static void code_mem_writew(uint32_t addr, uint32_t val)
1484 {
1485 #if !defined(CONFIG_USER_ONLY)
1486 tb_invalidate_phys_page_fast(addr, 2);
1487 #endif
1488 stw_raw(phys_ram_base + addr, val);
1489 }
1490
1491 static void code_mem_writel(uint32_t addr, uint32_t val)
1492 {
1493 #if !defined(CONFIG_USER_ONLY)
1494 tb_invalidate_phys_page_fast(addr, 4);
1495 #endif
1496 stl_raw(phys_ram_base + addr, val);
1497 }
1498
1499 static CPUReadMemoryFunc *code_mem_read[3] = {
1500 NULL, /* never used */
1501 NULL, /* never used */
1502 NULL, /* never used */
1503 };
1504
1505 static CPUWriteMemoryFunc *code_mem_write[3] = {
1506 code_mem_writeb,
1507 code_mem_writew,
1508 code_mem_writel,
1509 };
1510
1511 static void io_mem_init(void)
1512 {
1513 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1514 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1515 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1516 io_mem_nb = 4;
1517 }
1518
1519 /* mem_read and mem_write are arrays of functions containing the
1520 function to access byte (index 0), word (index 1) and dword (index
1521 2). All functions must be supplied. If io_index is non zero, the
1522 corresponding io zone is modified. If it is zero, a new io zone is
1523 allocated. The return value can be used with
1524 cpu_register_physical_memory(). (-1) is returned if error. */
1525 int cpu_register_io_memory(int io_index,
1526 CPUReadMemoryFunc **mem_read,
1527 CPUWriteMemoryFunc **mem_write)
1528 {
1529 int i;
1530
1531 if (io_index <= 0) {
1532 if (io_index >= IO_MEM_NB_ENTRIES)
1533 return -1;
1534 io_index = io_mem_nb++;
1535 } else {
1536 if (io_index >= IO_MEM_NB_ENTRIES)
1537 return -1;
1538 }
1539
1540 for(i = 0;i < 3; i++) {
1541 io_mem_read[io_index][i] = mem_read[i];
1542 io_mem_write[io_index][i] = mem_write[i];
1543 }
1544 return io_index << IO_MEM_SHIFT;
1545 }
1546
1547 #if !defined(CONFIG_USER_ONLY)
1548
1549 #define MMUSUFFIX _cmmu
1550 #define GETPC() NULL
1551 #define env cpu_single_env
1552
1553 #define SHIFT 0
1554 #include "softmmu_template.h"
1555
1556 #define SHIFT 1
1557 #include "softmmu_template.h"
1558
1559 #define SHIFT 2
1560 #include "softmmu_template.h"
1561
1562 #define SHIFT 3
1563 #include "softmmu_template.h"
1564
1565 #undef env
1566
1567 #endif