2 * Translation Block Maintaince
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/cputlb.h"
23 #include "exec/exec-all.h"
24 #include "exec/translate-all.h"
25 #include "sysemu/tcg.h"
28 #include "tb-context.h"
32 static bool tb_cmp(const void *ap
, const void *bp
)
34 const TranslationBlock
*a
= ap
;
35 const TranslationBlock
*b
= bp
;
37 return (tb_pc(a
) == tb_pc(b
) &&
38 a
->cs_base
== b
->cs_base
&&
39 a
->flags
== b
->flags
&&
40 (tb_cflags(a
) & ~CF_INVALID
) == (tb_cflags(b
) & ~CF_INVALID
) &&
41 a
->trace_vcpu_dstate
== b
->trace_vcpu_dstate
&&
42 tb_page_addr0(a
) == tb_page_addr0(b
) &&
43 tb_page_addr1(a
) == tb_page_addr1(b
));
46 void tb_htable_init(void)
48 unsigned int mode
= QHT_MODE_AUTO_RESIZE
;
50 qht_init(&tb_ctx
.htable
, tb_cmp
, CODE_GEN_HTABLE_SIZE
, mode
);
53 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
54 static void page_flush_tb_1(int level
, void **lp
)
64 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
66 pd
[i
].first_tb
= (uintptr_t)NULL
;
72 for (i
= 0; i
< V_L2_SIZE
; ++i
) {
73 page_flush_tb_1(level
- 1, pp
+ i
);
78 static void page_flush_tb(void)
80 int i
, l1_sz
= v_l1_size
;
82 for (i
= 0; i
< l1_sz
; i
++) {
83 page_flush_tb_1(v_l2_levels
, l1_map
+ i
);
87 /* flush all the translation blocks */
88 static void do_tb_flush(CPUState
*cpu
, run_on_cpu_data tb_flush_count
)
90 bool did_flush
= false;
93 /* If it is already been done on request of another CPU, just retry. */
94 if (tb_ctx
.tb_flush_count
!= tb_flush_count
.host_int
) {
100 tcg_flush_jmp_cache(cpu
);
103 qht_reset_size(&tb_ctx
.htable
, CODE_GEN_HTABLE_SIZE
);
106 tcg_region_reset_all();
107 /* XXX: flush processor icache at this point if cache flush is expensive */
108 qatomic_mb_set(&tb_ctx
.tb_flush_count
, tb_ctx
.tb_flush_count
+ 1);
113 qemu_plugin_flush_cb();
117 void tb_flush(CPUState
*cpu
)
120 unsigned tb_flush_count
= qatomic_mb_read(&tb_ctx
.tb_flush_count
);
122 if (cpu_in_exclusive_context(cpu
)) {
123 do_tb_flush(cpu
, RUN_ON_CPU_HOST_INT(tb_flush_count
));
125 async_safe_run_on_cpu(cpu
, do_tb_flush
,
126 RUN_ON_CPU_HOST_INT(tb_flush_count
));
132 * user-mode: call with mmap_lock held
133 * !user-mode: call with @pd->lock held
135 static inline void tb_page_remove(PageDesc
*pd
, TranslationBlock
*tb
)
137 TranslationBlock
*tb1
;
141 assert_page_locked(pd
);
142 pprev
= &pd
->first_tb
;
143 PAGE_FOR_EACH_TB(pd
, tb1
, n1
) {
145 *pprev
= tb1
->page_next
[n1
];
148 pprev
= &tb1
->page_next
[n1
];
150 g_assert_not_reached();
153 /* remove @orig from its @n_orig-th jump list */
154 static inline void tb_remove_from_jmp_list(TranslationBlock
*orig
, int n_orig
)
156 uintptr_t ptr
, ptr_locked
;
157 TranslationBlock
*dest
;
158 TranslationBlock
*tb
;
162 /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
163 ptr
= qatomic_or_fetch(&orig
->jmp_dest
[n_orig
], 1);
164 dest
= (TranslationBlock
*)(ptr
& ~1);
169 qemu_spin_lock(&dest
->jmp_lock
);
171 * While acquiring the lock, the jump might have been removed if the
172 * destination TB was invalidated; check again.
174 ptr_locked
= qatomic_read(&orig
->jmp_dest
[n_orig
]);
175 if (ptr_locked
!= ptr
) {
176 qemu_spin_unlock(&dest
->jmp_lock
);
178 * The only possibility is that the jump was unlinked via
179 * tb_jump_unlink(dest). Seeing here another destination would be a bug,
180 * because we set the LSB above.
182 g_assert(ptr_locked
== 1 && dest
->cflags
& CF_INVALID
);
186 * We first acquired the lock, and since the destination pointer matches,
187 * we know for sure that @orig is in the jmp list.
189 pprev
= &dest
->jmp_list_head
;
190 TB_FOR_EACH_JMP(dest
, tb
, n
) {
191 if (tb
== orig
&& n
== n_orig
) {
192 *pprev
= tb
->jmp_list_next
[n
];
193 /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
194 qemu_spin_unlock(&dest
->jmp_lock
);
197 pprev
= &tb
->jmp_list_next
[n
];
199 g_assert_not_reached();
203 * Reset the jump entry 'n' of a TB so that it is not chained to another TB.
205 void tb_reset_jump(TranslationBlock
*tb
, int n
)
207 uintptr_t addr
= (uintptr_t)(tb
->tc
.ptr
+ tb
->jmp_reset_offset
[n
]);
208 tb_set_jmp_target(tb
, n
, addr
);
211 /* remove any jumps to the TB */
212 static inline void tb_jmp_unlink(TranslationBlock
*dest
)
214 TranslationBlock
*tb
;
217 qemu_spin_lock(&dest
->jmp_lock
);
219 TB_FOR_EACH_JMP(dest
, tb
, n
) {
220 tb_reset_jump(tb
, n
);
221 qatomic_and(&tb
->jmp_dest
[n
], (uintptr_t)NULL
| 1);
222 /* No need to clear the list entry; setting the dest ptr is enough */
224 dest
->jmp_list_head
= (uintptr_t)NULL
;
226 qemu_spin_unlock(&dest
->jmp_lock
);
229 static void tb_jmp_cache_inval_tb(TranslationBlock
*tb
)
233 if (TARGET_TB_PCREL
) {
234 /* A TB may be at any virtual address */
236 tcg_flush_jmp_cache(cpu
);
239 uint32_t h
= tb_jmp_cache_hash_func(tb_pc(tb
));
242 CPUJumpCache
*jc
= cpu
->tb_jmp_cache
;
244 if (qatomic_read(&jc
->array
[h
].tb
) == tb
) {
245 qatomic_set(&jc
->array
[h
].tb
, NULL
);
252 * In user-mode, call with mmap_lock held.
253 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
256 static void do_tb_phys_invalidate(TranslationBlock
*tb
, bool rm_from_page_list
)
260 tb_page_addr_t phys_pc
;
261 uint32_t orig_cflags
= tb_cflags(tb
);
263 assert_memory_lock();
265 /* make sure no further incoming jumps will be chained to this TB */
266 qemu_spin_lock(&tb
->jmp_lock
);
267 qatomic_set(&tb
->cflags
, tb
->cflags
| CF_INVALID
);
268 qemu_spin_unlock(&tb
->jmp_lock
);
270 /* remove the TB from the hash list */
271 phys_pc
= tb_page_addr0(tb
);
272 h
= tb_hash_func(phys_pc
, tb_pc(tb
),
273 tb
->flags
, orig_cflags
, tb
->trace_vcpu_dstate
);
274 if (!qht_remove(&tb_ctx
.htable
, tb
, h
)) {
278 /* remove the TB from the page list */
279 if (rm_from_page_list
) {
280 p
= page_find(phys_pc
>> TARGET_PAGE_BITS
);
281 tb_page_remove(p
, tb
);
282 phys_pc
= tb_page_addr1(tb
);
284 p
= page_find(phys_pc
>> TARGET_PAGE_BITS
);
285 tb_page_remove(p
, tb
);
289 /* remove the TB from the hash list */
290 tb_jmp_cache_inval_tb(tb
);
292 /* suppress this TB from the two jump lists */
293 tb_remove_from_jmp_list(tb
, 0);
294 tb_remove_from_jmp_list(tb
, 1);
296 /* suppress any remaining jumps to this TB */
299 qatomic_set(&tb_ctx
.tb_phys_invalidate_count
,
300 tb_ctx
.tb_phys_invalidate_count
+ 1);
303 static void tb_phys_invalidate__locked(TranslationBlock
*tb
)
305 qemu_thread_jit_write();
306 do_tb_phys_invalidate(tb
, true);
307 qemu_thread_jit_execute();
310 static void page_lock_pair(PageDesc
**ret_p1
, tb_page_addr_t phys1
,
311 PageDesc
**ret_p2
, tb_page_addr_t phys2
, bool alloc
)
314 tb_page_addr_t page1
;
315 tb_page_addr_t page2
;
317 assert_memory_lock();
318 g_assert(phys1
!= -1);
320 page1
= phys1
>> TARGET_PAGE_BITS
;
321 page2
= phys2
>> TARGET_PAGE_BITS
;
323 p1
= page_find_alloc(page1
, alloc
);
327 if (likely(phys2
== -1)) {
330 } else if (page1
== page2
) {
337 p2
= page_find_alloc(page2
, alloc
);
350 #ifdef CONFIG_USER_ONLY
351 static inline void page_lock_tb(const TranslationBlock
*tb
) { }
352 static inline void page_unlock_tb(const TranslationBlock
*tb
) { }
354 /* lock the page(s) of a TB in the correct acquisition order */
355 static void page_lock_tb(const TranslationBlock
*tb
)
357 page_lock_pair(NULL
, tb_page_addr0(tb
), NULL
, tb_page_addr1(tb
), false);
360 static void page_unlock_tb(const TranslationBlock
*tb
)
362 PageDesc
*p1
= page_find(tb_page_addr0(tb
) >> TARGET_PAGE_BITS
);
365 if (unlikely(tb_page_addr1(tb
) != -1)) {
366 PageDesc
*p2
= page_find(tb_page_addr1(tb
) >> TARGET_PAGE_BITS
);
377 * Called with mmap_lock held in user-mode.
379 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
381 if (page_addr
== -1 && tb_page_addr0(tb
) != -1) {
383 do_tb_phys_invalidate(tb
, true);
386 do_tb_phys_invalidate(tb
, false);
391 * Add the tb in the target page and protect it if necessary.
392 * Called with mmap_lock held for user-mode emulation.
393 * Called with @p->lock held in !user-mode.
395 static inline void tb_page_add(PageDesc
*p
, TranslationBlock
*tb
,
396 unsigned int n
, tb_page_addr_t page_addr
)
398 #ifndef CONFIG_USER_ONLY
399 bool page_already_protected
;
402 assert_page_locked(p
);
404 tb
->page_next
[n
] = p
->first_tb
;
405 #ifndef CONFIG_USER_ONLY
406 page_already_protected
= p
->first_tb
!= (uintptr_t)NULL
;
408 p
->first_tb
= (uintptr_t)tb
| n
;
410 #if defined(CONFIG_USER_ONLY)
411 /* translator_loop() must have made all TB pages non-writable */
412 assert(!(p
->flags
& PAGE_WRITE
));
415 * If some code is already present, then the pages are already
416 * protected. So we handle the case where only the first TB is
417 * allocated in a physical page.
419 if (!page_already_protected
) {
420 tlb_protect_code(page_addr
);
426 * Add a new TB and link it to the physical page tables. phys_page2 is
427 * (-1) to indicate that only one page contains the TB.
429 * Called with mmap_lock held for user-mode emulation.
431 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
432 * Note that in !user-mode, another thread might have already added a TB
433 * for the same block of guest code that @tb corresponds to. In that case,
434 * the caller should discard the original @tb, and use instead the returned TB.
436 TranslationBlock
*tb_link_page(TranslationBlock
*tb
, tb_page_addr_t phys_pc
,
437 tb_page_addr_t phys_page2
)
441 void *existing_tb
= NULL
;
444 assert_memory_lock();
445 tcg_debug_assert(!(tb
->cflags
& CF_INVALID
));
448 * Add the TB to the page list, acquiring first the pages's locks.
449 * We keep the locks held until after inserting the TB in the hash table,
450 * so that if the insertion fails we know for sure that the TBs are still
451 * in the page descriptors.
452 * Note that inserting into the hash table first isn't an option, since
453 * we can only insert TBs that are fully initialized.
455 page_lock_pair(&p
, phys_pc
, &p2
, phys_page2
, true);
456 tb_page_add(p
, tb
, 0, phys_pc
);
458 tb_page_add(p2
, tb
, 1, phys_page2
);
461 /* add in the hash table */
462 h
= tb_hash_func(phys_pc
, tb_pc(tb
),
463 tb
->flags
, tb
->cflags
, tb
->trace_vcpu_dstate
);
464 qht_insert(&tb_ctx
.htable
, tb
, h
, &existing_tb
);
466 /* remove TB from the page(s) if we couldn't insert it */
467 if (unlikely(existing_tb
)) {
468 tb_page_remove(p
, tb
);
470 tb_page_remove(p2
, tb
);
483 * @p must be non-NULL.
484 * user-mode: call with mmap_lock held.
485 * !user-mode: call with all @pages locked.
488 tb_invalidate_phys_page_range__locked(struct page_collection
*pages
,
489 PageDesc
*p
, tb_page_addr_t start
,
493 TranslationBlock
*tb
;
494 tb_page_addr_t tb_start
, tb_end
;
496 #ifdef TARGET_HAS_PRECISE_SMC
497 CPUState
*cpu
= current_cpu
;
498 bool current_tb_not_found
= retaddr
!= 0;
499 bool current_tb_modified
= false;
500 TranslationBlock
*current_tb
= NULL
;
501 #endif /* TARGET_HAS_PRECISE_SMC */
503 assert_page_locked(p
);
506 * We remove all the TBs in the range [start, end[.
507 * XXX: see if in some cases it could be faster to invalidate all the code
509 PAGE_FOR_EACH_TB(p
, tb
, n
) {
510 assert_page_locked(p
);
511 /* NOTE: this is subtle as a TB may span two physical pages */
513 /* NOTE: tb_end may be after the end of the page, but
514 it is not a problem */
515 tb_start
= tb_page_addr0(tb
);
516 tb_end
= tb_start
+ tb
->size
;
518 tb_start
= tb_page_addr1(tb
);
519 tb_end
= tb_start
+ ((tb_page_addr0(tb
) + tb
->size
)
520 & ~TARGET_PAGE_MASK
);
522 if (!(tb_end
<= start
|| tb_start
>= end
)) {
523 #ifdef TARGET_HAS_PRECISE_SMC
524 if (current_tb_not_found
) {
525 current_tb_not_found
= false;
526 /* now we have a real cpu fault */
527 current_tb
= tcg_tb_lookup(retaddr
);
529 if (current_tb
== tb
&&
530 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
532 * If we are modifying the current TB, we must stop
533 * its execution. We could be more precise by checking
534 * that the modification is after the current PC, but it
535 * would require a specialized function to partially
536 * restore the CPU state.
538 current_tb_modified
= true;
539 cpu_restore_state_from_tb(cpu
, current_tb
, retaddr
);
541 #endif /* TARGET_HAS_PRECISE_SMC */
542 tb_phys_invalidate__locked(tb
);
545 #if !defined(CONFIG_USER_ONLY)
546 /* if no code remaining, no need to continue to use slow writes */
548 tlb_unprotect_code(start
);
551 #ifdef TARGET_HAS_PRECISE_SMC
552 if (current_tb_modified
) {
553 page_collection_unlock(pages
);
554 /* Force execution of one insn next time. */
555 cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(cpu
);
557 cpu_loop_exit_noexc(cpu
);
563 * Invalidate all TBs which intersect with the target physical
564 * address page @addr.
566 * Called with mmap_lock held for user-mode emulation
568 void tb_invalidate_phys_page(tb_page_addr_t addr
)
570 struct page_collection
*pages
;
571 tb_page_addr_t start
, end
;
574 assert_memory_lock();
576 p
= page_find(addr
>> TARGET_PAGE_BITS
);
581 start
= addr
& TARGET_PAGE_MASK
;
582 end
= start
+ TARGET_PAGE_SIZE
;
583 pages
= page_collection_lock(start
, end
);
584 tb_invalidate_phys_page_range__locked(pages
, p
, start
, end
, 0);
585 page_collection_unlock(pages
);
589 * Invalidate all TBs which intersect with the target physical address range
590 * [start;end[. NOTE: start and end may refer to *different* physical pages.
591 * 'is_cpu_write_access' should be true if called from a real cpu write
592 * access: the virtual CPU will exit the current TB if code is modified inside
595 * Called with mmap_lock held for user-mode emulation.
597 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
)
599 struct page_collection
*pages
;
602 assert_memory_lock();
604 pages
= page_collection_lock(start
, end
);
605 for (next
= (start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
607 start
= next
, next
+= TARGET_PAGE_SIZE
) {
608 PageDesc
*pd
= page_find(start
>> TARGET_PAGE_BITS
);
609 tb_page_addr_t bound
= MIN(next
, end
);
614 tb_invalidate_phys_page_range__locked(pages
, pd
, start
, bound
, 0);
616 page_collection_unlock(pages
);
619 #ifdef CONFIG_SOFTMMU
621 * len must be <= 8 and start must be a multiple of len.
622 * Called via softmmu_template.h when code areas are written to with
623 * iothread mutex not held.
625 * Call with all @pages in the range [@start, @start + len[ locked.
627 void tb_invalidate_phys_page_fast(struct page_collection
*pages
,
628 tb_page_addr_t start
, int len
,
633 assert_memory_lock();
635 p
= page_find(start
>> TARGET_PAGE_BITS
);
640 assert_page_locked(p
);
641 tb_invalidate_phys_page_range__locked(pages
, p
, start
, start
+ len
,
646 * Called with mmap_lock held. If pc is not 0 then it indicates the
647 * host PC of the faulting store instruction that caused this invalidate.
648 * Returns true if the caller needs to abort execution of the current
649 * TB (because it was modified by this store and the guest CPU has
650 * precise-SMC semantics).
652 bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr
, uintptr_t pc
)
654 TranslationBlock
*tb
;
657 #ifdef TARGET_HAS_PRECISE_SMC
658 TranslationBlock
*current_tb
= NULL
;
659 CPUState
*cpu
= current_cpu
;
660 bool current_tb_modified
= false;
663 assert_memory_lock();
665 addr
&= TARGET_PAGE_MASK
;
666 p
= page_find(addr
>> TARGET_PAGE_BITS
);
671 #ifdef TARGET_HAS_PRECISE_SMC
672 if (p
->first_tb
&& pc
!= 0) {
673 current_tb
= tcg_tb_lookup(pc
);
676 assert_page_locked(p
);
677 PAGE_FOR_EACH_TB(p
, tb
, n
) {
678 #ifdef TARGET_HAS_PRECISE_SMC
679 if (current_tb
== tb
&&
680 (tb_cflags(current_tb
) & CF_COUNT_MASK
) != 1) {
682 * If we are modifying the current TB, we must stop its execution.
683 * We could be more precise by checking that the modification is
684 * after the current PC, but it would require a specialized
685 * function to partially restore the CPU state.
687 current_tb_modified
= true;
688 cpu_restore_state_from_tb(cpu
, current_tb
, pc
);
690 #endif /* TARGET_HAS_PRECISE_SMC */
691 tb_phys_invalidate(tb
, addr
);
693 p
->first_tb
= (uintptr_t)NULL
;
694 #ifdef TARGET_HAS_PRECISE_SMC
695 if (current_tb_modified
) {
696 /* Force execution of one insn next time. */
697 cpu
->cflags_next_tb
= 1 | CF_NOIRQ
| curr_cflags(cpu
);