2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/exec-all.h"
24 #include "exec/memory.h"
25 #include "exec/address-spaces.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
31 #include "qemu/error-report.h"
33 #include "exec/helper-proto.h"
34 #include "qemu/atomic.h"
35 #include "qemu/atomic128.h"
37 /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
38 /* #define DEBUG_TLB */
39 /* #define DEBUG_TLB_LOG */
42 # define DEBUG_TLB_GATE 1
44 # define DEBUG_TLB_LOG_GATE 1
46 # define DEBUG_TLB_LOG_GATE 0
49 # define DEBUG_TLB_GATE 0
50 # define DEBUG_TLB_LOG_GATE 0
53 #define tlb_debug(fmt, ...) do { \
54 if (DEBUG_TLB_LOG_GATE) { \
55 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
57 } else if (DEBUG_TLB_GATE) { \
58 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
62 #define assert_cpu_is_self(cpu) do { \
63 if (DEBUG_TLB_GATE) { \
64 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
68 /* run_on_cpu_data.target_ptr should always be big enough for a
69 * target_ulong even on 32 bit builds */
70 QEMU_BUILD_BUG_ON(sizeof(target_ulong
) > sizeof(run_on_cpu_data
));
72 /* We currently can't handle more than 16 bits in the MMUIDX bitmask.
74 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> 16);
75 #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
77 static inline size_t sizeof_tlb(CPUArchState
*env
, uintptr_t mmu_idx
)
79 return env_tlb(env
)->f
[mmu_idx
].mask
+ (1 << CPU_TLB_ENTRY_BITS
);
82 static void tlb_window_reset(CPUTLBDesc
*desc
, int64_t ns
,
85 desc
->window_begin_ns
= ns
;
86 desc
->window_max_entries
= max_entries
;
89 static void tlb_dyn_init(CPUArchState
*env
)
93 for (i
= 0; i
< NB_MMU_MODES
; i
++) {
94 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[i
];
95 size_t n_entries
= 1 << CPU_TLB_DYN_DEFAULT_BITS
;
97 tlb_window_reset(desc
, get_clock_realtime(), 0);
98 desc
->n_used_entries
= 0;
99 env_tlb(env
)->f
[i
].mask
= (n_entries
- 1) << CPU_TLB_ENTRY_BITS
;
100 env_tlb(env
)->f
[i
].table
= g_new(CPUTLBEntry
, n_entries
);
101 env_tlb(env
)->d
[i
].iotlb
= g_new(CPUIOTLBEntry
, n_entries
);
106 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
107 * @env: CPU that owns the TLB
108 * @mmu_idx: MMU index of the TLB
110 * Called with tlb_lock_held.
112 * We have two main constraints when resizing a TLB: (1) we only resize it
113 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
114 * the array or unnecessarily flushing it), which means we do not control how
115 * frequently the resizing can occur; (2) we don't have access to the guest's
116 * future scheduling decisions, and therefore have to decide the magnitude of
117 * the resize based on past observations.
119 * In general, a memory-hungry process can benefit greatly from an appropriately
120 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
121 * we just have to make the TLB as large as possible; while an oversized TLB
122 * results in minimal TLB miss rates, it also takes longer to be flushed
123 * (flushes can be _very_ frequent), and the reduced locality can also hurt
126 * To achieve near-optimal performance for all kinds of workloads, we:
128 * 1. Aggressively increase the size of the TLB when the use rate of the
129 * TLB being flushed is high, since it is likely that in the near future this
130 * memory-hungry process will execute again, and its memory hungriness will
131 * probably be similar.
133 * 2. Slowly reduce the size of the TLB as the use rate declines over a
134 * reasonably large time window. The rationale is that if in such a time window
135 * we have not observed a high TLB use rate, it is likely that we won't observe
136 * it in the near future. In that case, once a time window expires we downsize
137 * the TLB to match the maximum use rate observed in the window.
139 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
140 * since in that range performance is likely near-optimal. Recall that the TLB
141 * is direct mapped, so we want the use rate to be low (or at least not too
142 * high), since otherwise we are likely to have a significant amount of
145 static void tlb_mmu_resize_locked(CPUArchState
*env
, int mmu_idx
)
147 CPUTLBDesc
*desc
= &env_tlb(env
)->d
[mmu_idx
];
148 size_t old_size
= tlb_n_entries(env
, mmu_idx
);
150 size_t new_size
= old_size
;
151 int64_t now
= get_clock_realtime();
152 int64_t window_len_ms
= 100;
153 int64_t window_len_ns
= window_len_ms
* 1000 * 1000;
154 bool window_expired
= now
> desc
->window_begin_ns
+ window_len_ns
;
156 if (desc
->n_used_entries
> desc
->window_max_entries
) {
157 desc
->window_max_entries
= desc
->n_used_entries
;
159 rate
= desc
->window_max_entries
* 100 / old_size
;
162 new_size
= MIN(old_size
<< 1, 1 << CPU_TLB_DYN_MAX_BITS
);
163 } else if (rate
< 30 && window_expired
) {
164 size_t ceil
= pow2ceil(desc
->window_max_entries
);
165 size_t expected_rate
= desc
->window_max_entries
* 100 / ceil
;
168 * Avoid undersizing when the max number of entries seen is just below
169 * a pow2. For instance, if max_entries == 1025, the expected use rate
170 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
171 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
172 * later. Thus, make sure that the expected use rate remains below 70%.
173 * (and since we double the size, that means the lowest rate we'd
174 * expect to get is 35%, which is still in the 30-70% range where
175 * we consider that the size is appropriate.)
177 if (expected_rate
> 70) {
180 new_size
= MAX(ceil
, 1 << CPU_TLB_DYN_MIN_BITS
);
183 if (new_size
== old_size
) {
184 if (window_expired
) {
185 tlb_window_reset(desc
, now
, desc
->n_used_entries
);
190 g_free(env_tlb(env
)->f
[mmu_idx
].table
);
191 g_free(env_tlb(env
)->d
[mmu_idx
].iotlb
);
193 tlb_window_reset(desc
, now
, 0);
194 /* desc->n_used_entries is cleared by the caller */
195 env_tlb(env
)->f
[mmu_idx
].mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
196 env_tlb(env
)->f
[mmu_idx
].table
= g_try_new(CPUTLBEntry
, new_size
);
197 env_tlb(env
)->d
[mmu_idx
].iotlb
= g_try_new(CPUIOTLBEntry
, new_size
);
199 * If the allocations fail, try smaller sizes. We just freed some
200 * memory, so going back to half of new_size has a good chance of working.
201 * Increased memory pressure elsewhere in the system might cause the
202 * allocations to fail though, so we progressively reduce the allocation
203 * size, aborting if we cannot even allocate the smallest TLB we support.
205 while (env_tlb(env
)->f
[mmu_idx
].table
== NULL
||
206 env_tlb(env
)->d
[mmu_idx
].iotlb
== NULL
) {
207 if (new_size
== (1 << CPU_TLB_DYN_MIN_BITS
)) {
208 error_report("%s: %s", __func__
, strerror(errno
));
211 new_size
= MAX(new_size
>> 1, 1 << CPU_TLB_DYN_MIN_BITS
);
212 env_tlb(env
)->f
[mmu_idx
].mask
= (new_size
- 1) << CPU_TLB_ENTRY_BITS
;
214 g_free(env_tlb(env
)->f
[mmu_idx
].table
);
215 g_free(env_tlb(env
)->d
[mmu_idx
].iotlb
);
216 env_tlb(env
)->f
[mmu_idx
].table
= g_try_new(CPUTLBEntry
, new_size
);
217 env_tlb(env
)->d
[mmu_idx
].iotlb
= g_try_new(CPUIOTLBEntry
, new_size
);
221 static inline void tlb_table_flush_by_mmuidx(CPUArchState
*env
, int mmu_idx
)
223 tlb_mmu_resize_locked(env
, mmu_idx
);
224 memset(env_tlb(env
)->f
[mmu_idx
].table
, -1, sizeof_tlb(env
, mmu_idx
));
225 env_tlb(env
)->d
[mmu_idx
].n_used_entries
= 0;
228 static inline void tlb_n_used_entries_inc(CPUArchState
*env
, uintptr_t mmu_idx
)
230 env_tlb(env
)->d
[mmu_idx
].n_used_entries
++;
233 static inline void tlb_n_used_entries_dec(CPUArchState
*env
, uintptr_t mmu_idx
)
235 env_tlb(env
)->d
[mmu_idx
].n_used_entries
--;
238 void tlb_init(CPUState
*cpu
)
240 CPUArchState
*env
= cpu
->env_ptr
;
242 qemu_spin_init(&env_tlb(env
)->c
.lock
);
244 /* Ensure that cpu_reset performs a full flush. */
245 env_tlb(env
)->c
.dirty
= ALL_MMUIDX_BITS
;
250 /* flush_all_helper: run fn across all cpus
252 * If the wait flag is set then the src cpu's helper will be queued as
253 * "safe" work and the loop exited creating a synchronisation point
254 * where all queued work will be finished before execution starts
257 static void flush_all_helper(CPUState
*src
, run_on_cpu_func fn
,
264 async_run_on_cpu(cpu
, fn
, d
);
269 void tlb_flush_counts(size_t *pfull
, size_t *ppart
, size_t *pelide
)
272 size_t full
= 0, part
= 0, elide
= 0;
275 CPUArchState
*env
= cpu
->env_ptr
;
277 full
+= atomic_read(&env_tlb(env
)->c
.full_flush_count
);
278 part
+= atomic_read(&env_tlb(env
)->c
.part_flush_count
);
279 elide
+= atomic_read(&env_tlb(env
)->c
.elide_flush_count
);
286 static void tlb_flush_one_mmuidx_locked(CPUArchState
*env
, int mmu_idx
)
288 tlb_table_flush_by_mmuidx(env
, mmu_idx
);
289 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= -1;
290 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= -1;
291 env_tlb(env
)->d
[mmu_idx
].vindex
= 0;
292 memset(env_tlb(env
)->d
[mmu_idx
].vtable
, -1,
293 sizeof(env_tlb(env
)->d
[0].vtable
));
296 static void tlb_flush_by_mmuidx_async_work(CPUState
*cpu
, run_on_cpu_data data
)
298 CPUArchState
*env
= cpu
->env_ptr
;
299 uint16_t asked
= data
.host_int
;
300 uint16_t all_dirty
, work
, to_clean
;
302 assert_cpu_is_self(cpu
);
304 tlb_debug("mmu_idx:0x%04" PRIx16
"\n", asked
);
306 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
308 all_dirty
= env_tlb(env
)->c
.dirty
;
309 to_clean
= asked
& all_dirty
;
310 all_dirty
&= ~to_clean
;
311 env_tlb(env
)->c
.dirty
= all_dirty
;
313 for (work
= to_clean
; work
!= 0; work
&= work
- 1) {
314 int mmu_idx
= ctz32(work
);
315 tlb_flush_one_mmuidx_locked(env
, mmu_idx
);
318 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
320 cpu_tb_jmp_cache_clear(cpu
);
322 if (to_clean
== ALL_MMUIDX_BITS
) {
323 atomic_set(&env_tlb(env
)->c
.full_flush_count
,
324 env_tlb(env
)->c
.full_flush_count
+ 1);
326 atomic_set(&env_tlb(env
)->c
.part_flush_count
,
327 env_tlb(env
)->c
.part_flush_count
+ ctpop16(to_clean
));
328 if (to_clean
!= asked
) {
329 atomic_set(&env_tlb(env
)->c
.elide_flush_count
,
330 env_tlb(env
)->c
.elide_flush_count
+
331 ctpop16(asked
& ~to_clean
));
336 void tlb_flush_by_mmuidx(CPUState
*cpu
, uint16_t idxmap
)
338 tlb_debug("mmu_idx: 0x%" PRIx16
"\n", idxmap
);
340 if (cpu
->created
&& !qemu_cpu_is_self(cpu
)) {
341 async_run_on_cpu(cpu
, tlb_flush_by_mmuidx_async_work
,
342 RUN_ON_CPU_HOST_INT(idxmap
));
344 tlb_flush_by_mmuidx_async_work(cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
348 void tlb_flush(CPUState
*cpu
)
350 tlb_flush_by_mmuidx(cpu
, ALL_MMUIDX_BITS
);
353 void tlb_flush_by_mmuidx_all_cpus(CPUState
*src_cpu
, uint16_t idxmap
)
355 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
357 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
359 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
360 fn(src_cpu
, RUN_ON_CPU_HOST_INT(idxmap
));
363 void tlb_flush_all_cpus(CPUState
*src_cpu
)
365 tlb_flush_by_mmuidx_all_cpus(src_cpu
, ALL_MMUIDX_BITS
);
368 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
, uint16_t idxmap
)
370 const run_on_cpu_func fn
= tlb_flush_by_mmuidx_async_work
;
372 tlb_debug("mmu_idx: 0x%"PRIx16
"\n", idxmap
);
374 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
375 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_HOST_INT(idxmap
));
378 void tlb_flush_all_cpus_synced(CPUState
*src_cpu
)
380 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu
, ALL_MMUIDX_BITS
);
383 static inline bool tlb_hit_page_anyprot(CPUTLBEntry
*tlb_entry
,
386 return tlb_hit_page(tlb_entry
->addr_read
, page
) ||
387 tlb_hit_page(tlb_addr_write(tlb_entry
), page
) ||
388 tlb_hit_page(tlb_entry
->addr_code
, page
);
392 * tlb_entry_is_empty - return true if the entry is not in use
393 * @te: pointer to CPUTLBEntry
395 static inline bool tlb_entry_is_empty(const CPUTLBEntry
*te
)
397 return te
->addr_read
== -1 && te
->addr_write
== -1 && te
->addr_code
== -1;
400 /* Called with tlb_c.lock held */
401 static inline bool tlb_flush_entry_locked(CPUTLBEntry
*tlb_entry
,
404 if (tlb_hit_page_anyprot(tlb_entry
, page
)) {
405 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
411 /* Called with tlb_c.lock held */
412 static inline void tlb_flush_vtlb_page_locked(CPUArchState
*env
, int mmu_idx
,
415 CPUTLBDesc
*d
= &env_tlb(env
)->d
[mmu_idx
];
418 assert_cpu_is_self(env_cpu(env
));
419 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
420 if (tlb_flush_entry_locked(&d
->vtable
[k
], page
)) {
421 tlb_n_used_entries_dec(env
, mmu_idx
);
426 static void tlb_flush_page_locked(CPUArchState
*env
, int midx
,
429 target_ulong lp_addr
= env_tlb(env
)->d
[midx
].large_page_addr
;
430 target_ulong lp_mask
= env_tlb(env
)->d
[midx
].large_page_mask
;
432 /* Check if we need to flush due to large pages. */
433 if ((page
& lp_mask
) == lp_addr
) {
434 tlb_debug("forcing full flush midx %d ("
435 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
436 midx
, lp_addr
, lp_mask
);
437 tlb_flush_one_mmuidx_locked(env
, midx
);
439 if (tlb_flush_entry_locked(tlb_entry(env
, midx
, page
), page
)) {
440 tlb_n_used_entries_dec(env
, midx
);
442 tlb_flush_vtlb_page_locked(env
, midx
, page
);
446 /* As we are going to hijack the bottom bits of the page address for a
447 * mmuidx bit mask we need to fail to build if we can't do that
449 QEMU_BUILD_BUG_ON(NB_MMU_MODES
> TARGET_PAGE_BITS_MIN
);
451 static void tlb_flush_page_by_mmuidx_async_work(CPUState
*cpu
,
452 run_on_cpu_data data
)
454 CPUArchState
*env
= cpu
->env_ptr
;
455 target_ulong addr_and_mmuidx
= (target_ulong
) data
.target_ptr
;
456 target_ulong addr
= addr_and_mmuidx
& TARGET_PAGE_MASK
;
457 unsigned long mmu_idx_bitmap
= addr_and_mmuidx
& ALL_MMUIDX_BITS
;
460 assert_cpu_is_self(cpu
);
462 tlb_debug("page addr:" TARGET_FMT_lx
" mmu_map:0x%lx\n",
463 addr
, mmu_idx_bitmap
);
465 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
466 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
467 if (test_bit(mmu_idx
, &mmu_idx_bitmap
)) {
468 tlb_flush_page_locked(env
, mmu_idx
, addr
);
471 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
473 tb_flush_jmp_cache(cpu
, addr
);
476 void tlb_flush_page_by_mmuidx(CPUState
*cpu
, target_ulong addr
, uint16_t idxmap
)
478 target_ulong addr_and_mmu_idx
;
480 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%" PRIx16
"\n", addr
, idxmap
);
482 /* This should already be page aligned */
483 addr_and_mmu_idx
= addr
& TARGET_PAGE_MASK
;
484 addr_and_mmu_idx
|= idxmap
;
486 if (!qemu_cpu_is_self(cpu
)) {
487 async_run_on_cpu(cpu
, tlb_flush_page_by_mmuidx_async_work
,
488 RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
490 tlb_flush_page_by_mmuidx_async_work(
491 cpu
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
495 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
497 tlb_flush_page_by_mmuidx(cpu
, addr
, ALL_MMUIDX_BITS
);
500 void tlb_flush_page_by_mmuidx_all_cpus(CPUState
*src_cpu
, target_ulong addr
,
503 const run_on_cpu_func fn
= tlb_flush_page_by_mmuidx_async_work
;
504 target_ulong addr_and_mmu_idx
;
506 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
508 /* This should already be page aligned */
509 addr_and_mmu_idx
= addr
& TARGET_PAGE_MASK
;
510 addr_and_mmu_idx
|= idxmap
;
512 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
513 fn(src_cpu
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
516 void tlb_flush_page_all_cpus(CPUState
*src
, target_ulong addr
)
518 tlb_flush_page_by_mmuidx_all_cpus(src
, addr
, ALL_MMUIDX_BITS
);
521 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState
*src_cpu
,
525 const run_on_cpu_func fn
= tlb_flush_page_by_mmuidx_async_work
;
526 target_ulong addr_and_mmu_idx
;
528 tlb_debug("addr: "TARGET_FMT_lx
" mmu_idx:%"PRIx16
"\n", addr
, idxmap
);
530 /* This should already be page aligned */
531 addr_and_mmu_idx
= addr
& TARGET_PAGE_MASK
;
532 addr_and_mmu_idx
|= idxmap
;
534 flush_all_helper(src_cpu
, fn
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
535 async_safe_run_on_cpu(src_cpu
, fn
, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx
));
538 void tlb_flush_page_all_cpus_synced(CPUState
*src
, target_ulong addr
)
540 tlb_flush_page_by_mmuidx_all_cpus_synced(src
, addr
, ALL_MMUIDX_BITS
);
543 /* update the TLBs so that writes to code in the virtual page 'addr'
545 void tlb_protect_code(ram_addr_t ram_addr
)
547 cpu_physical_memory_test_and_clear_dirty(ram_addr
, TARGET_PAGE_SIZE
,
551 /* update the TLB so that writes in physical page 'phys_addr' are no longer
552 tested for self modifying code */
553 void tlb_unprotect_code(ram_addr_t ram_addr
)
555 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
560 * Dirty write flag handling
562 * When the TCG code writes to a location it looks up the address in
563 * the TLB and uses that data to compute the final address. If any of
564 * the lower bits of the address are set then the slow path is forced.
565 * There are a number of reasons to do this but for normal RAM the
566 * most usual is detecting writes to code regions which may invalidate
569 * Other vCPUs might be reading their TLBs during guest execution, so we update
570 * te->addr_write with atomic_set. We don't need to worry about this for
571 * oversized guests as MTTCG is disabled for them.
573 * Called with tlb_c.lock held.
575 static void tlb_reset_dirty_range_locked(CPUTLBEntry
*tlb_entry
,
576 uintptr_t start
, uintptr_t length
)
578 uintptr_t addr
= tlb_entry
->addr_write
;
580 if ((addr
& (TLB_INVALID_MASK
| TLB_MMIO
| TLB_NOTDIRTY
)) == 0) {
581 addr
&= TARGET_PAGE_MASK
;
582 addr
+= tlb_entry
->addend
;
583 if ((addr
- start
) < length
) {
584 #if TCG_OVERSIZED_GUEST
585 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
587 atomic_set(&tlb_entry
->addr_write
,
588 tlb_entry
->addr_write
| TLB_NOTDIRTY
);
595 * Called with tlb_c.lock held.
596 * Called only from the vCPU context, i.e. the TLB's owner thread.
598 static inline void copy_tlb_helper_locked(CPUTLBEntry
*d
, const CPUTLBEntry
*s
)
603 /* This is a cross vCPU call (i.e. another vCPU resetting the flags of
605 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
606 * thing actually updated is the target TLB entry ->addr_write flags.
608 void tlb_reset_dirty(CPUState
*cpu
, ram_addr_t start1
, ram_addr_t length
)
615 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
616 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
618 unsigned int n
= tlb_n_entries(env
, mmu_idx
);
620 for (i
= 0; i
< n
; i
++) {
621 tlb_reset_dirty_range_locked(&env_tlb(env
)->f
[mmu_idx
].table
[i
],
625 for (i
= 0; i
< CPU_VTLB_SIZE
; i
++) {
626 tlb_reset_dirty_range_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[i
],
630 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
633 /* Called with tlb_c.lock held */
634 static inline void tlb_set_dirty1_locked(CPUTLBEntry
*tlb_entry
,
637 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
638 tlb_entry
->addr_write
= vaddr
;
642 /* update the TLB corresponding to virtual page vaddr
643 so that it is no longer dirty */
644 void tlb_set_dirty(CPUState
*cpu
, target_ulong vaddr
)
646 CPUArchState
*env
= cpu
->env_ptr
;
649 assert_cpu_is_self(cpu
);
651 vaddr
&= TARGET_PAGE_MASK
;
652 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
653 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
654 tlb_set_dirty1_locked(tlb_entry(env
, mmu_idx
, vaddr
), vaddr
);
657 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
659 for (k
= 0; k
< CPU_VTLB_SIZE
; k
++) {
660 tlb_set_dirty1_locked(&env_tlb(env
)->d
[mmu_idx
].vtable
[k
], vaddr
);
663 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
666 /* Our TLB does not support large pages, so remember the area covered by
667 large pages and trigger a full TLB flush if these are invalidated. */
668 static void tlb_add_large_page(CPUArchState
*env
, int mmu_idx
,
669 target_ulong vaddr
, target_ulong size
)
671 target_ulong lp_addr
= env_tlb(env
)->d
[mmu_idx
].large_page_addr
;
672 target_ulong lp_mask
= ~(size
- 1);
674 if (lp_addr
== (target_ulong
)-1) {
675 /* No previous large page. */
678 /* Extend the existing region to include the new page.
679 This is a compromise between unnecessary flushes and
680 the cost of maintaining a full variable size TLB. */
681 lp_mask
&= env_tlb(env
)->d
[mmu_idx
].large_page_mask
;
682 while (((lp_addr
^ vaddr
) & lp_mask
) != 0) {
686 env_tlb(env
)->d
[mmu_idx
].large_page_addr
= lp_addr
& lp_mask
;
687 env_tlb(env
)->d
[mmu_idx
].large_page_mask
= lp_mask
;
690 /* Add a new TLB entry. At most one entry for a given virtual address
691 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
692 * supplied size is only used by tlb_flush_page.
694 * Called from TCG-generated code, which is under an RCU read-side
697 void tlb_set_page_with_attrs(CPUState
*cpu
, target_ulong vaddr
,
698 hwaddr paddr
, MemTxAttrs attrs
, int prot
,
699 int mmu_idx
, target_ulong size
)
701 CPUArchState
*env
= cpu
->env_ptr
;
702 CPUTLB
*tlb
= env_tlb(env
);
703 CPUTLBDesc
*desc
= &tlb
->d
[mmu_idx
];
704 MemoryRegionSection
*section
;
706 target_ulong address
;
707 target_ulong code_address
;
710 hwaddr iotlb
, xlat
, sz
, paddr_page
;
711 target_ulong vaddr_page
;
712 int asidx
= cpu_asidx_from_attrs(cpu
, attrs
);
715 assert_cpu_is_self(cpu
);
717 if (size
<= TARGET_PAGE_SIZE
) {
718 sz
= TARGET_PAGE_SIZE
;
720 tlb_add_large_page(env
, mmu_idx
, vaddr
, size
);
723 vaddr_page
= vaddr
& TARGET_PAGE_MASK
;
724 paddr_page
= paddr
& TARGET_PAGE_MASK
;
726 section
= address_space_translate_for_iotlb(cpu
, asidx
, paddr_page
,
727 &xlat
, &sz
, attrs
, &prot
);
728 assert(sz
>= TARGET_PAGE_SIZE
);
730 tlb_debug("vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
732 vaddr
, paddr
, prot
, mmu_idx
);
734 address
= vaddr_page
;
735 if (size
< TARGET_PAGE_SIZE
) {
736 /* Repeat the MMU check and TLB fill on every access. */
737 address
|= TLB_INVALID_MASK
;
739 if (attrs
.byte_swap
) {
740 /* Force the access through the I/O slow path. */
743 if (!memory_region_is_ram(section
->mr
) &&
744 !memory_region_is_romd(section
->mr
)) {
749 /* TLB_MMIO for rom/romd handled below */
750 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
753 code_address
= address
;
754 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr_page
,
755 paddr_page
, xlat
, prot
, &address
);
756 wp_flags
= cpu_watchpoint_address_matches(cpu
, vaddr_page
,
759 index
= tlb_index(env
, mmu_idx
, vaddr_page
);
760 te
= tlb_entry(env
, mmu_idx
, vaddr_page
);
763 * Hold the TLB lock for the rest of the function. We could acquire/release
764 * the lock several times in the function, but it is faster to amortize the
765 * acquisition cost by acquiring it just once. Note that this leads to
766 * a longer critical section, but this is not a concern since the TLB lock
767 * is unlikely to be contended.
769 qemu_spin_lock(&tlb
->c
.lock
);
771 /* Note that the tlb is no longer clean. */
772 tlb
->c
.dirty
|= 1 << mmu_idx
;
774 /* Make sure there's no cached translation for the new page. */
775 tlb_flush_vtlb_page_locked(env
, mmu_idx
, vaddr_page
);
778 * Only evict the old entry to the victim tlb if it's for a
779 * different page; otherwise just overwrite the stale data.
781 if (!tlb_hit_page_anyprot(te
, vaddr_page
) && !tlb_entry_is_empty(te
)) {
782 unsigned vidx
= desc
->vindex
++ % CPU_VTLB_SIZE
;
783 CPUTLBEntry
*tv
= &desc
->vtable
[vidx
];
785 /* Evict the old entry into the victim tlb. */
786 copy_tlb_helper_locked(tv
, te
);
787 desc
->viotlb
[vidx
] = desc
->iotlb
[index
];
788 tlb_n_used_entries_dec(env
, mmu_idx
);
793 * At this point iotlb contains a physical section number in the lower
794 * TARGET_PAGE_BITS, and either
795 * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM)
796 * + the offset within section->mr of the page base (otherwise)
797 * We subtract the vaddr_page (which is page aligned and thus won't
798 * disturb the low bits) to give an offset which can be added to the
799 * (non-page-aligned) vaddr of the eventual memory access to get
800 * the MemoryRegion offset for the access. Note that the vaddr we
801 * subtract here is that of the page base, and not the same as the
802 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
804 desc
->iotlb
[index
].addr
= iotlb
- vaddr_page
;
805 desc
->iotlb
[index
].attrs
= attrs
;
807 /* Now calculate the new entry */
808 tn
.addend
= addend
- vaddr_page
;
809 if (prot
& PAGE_READ
) {
810 tn
.addr_read
= address
;
811 if (wp_flags
& BP_MEM_READ
) {
812 tn
.addr_read
|= TLB_WATCHPOINT
;
818 if (prot
& PAGE_EXEC
) {
819 tn
.addr_code
= code_address
;
825 if (prot
& PAGE_WRITE
) {
826 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
827 || memory_region_is_romd(section
->mr
)) {
828 /* Write access calls the I/O callback. */
829 tn
.addr_write
= address
| TLB_MMIO
;
830 } else if (memory_region_is_ram(section
->mr
)
831 && cpu_physical_memory_is_clean(
832 memory_region_get_ram_addr(section
->mr
) + xlat
)) {
833 tn
.addr_write
= address
| TLB_NOTDIRTY
;
835 tn
.addr_write
= address
;
837 if (prot
& PAGE_WRITE_INV
) {
838 tn
.addr_write
|= TLB_INVALID_MASK
;
840 if (wp_flags
& BP_MEM_WRITE
) {
841 tn
.addr_write
|= TLB_WATCHPOINT
;
845 copy_tlb_helper_locked(te
, &tn
);
846 tlb_n_used_entries_inc(env
, mmu_idx
);
847 qemu_spin_unlock(&tlb
->c
.lock
);
850 /* Add a new TLB entry, but without specifying the memory
851 * transaction attributes to be used.
853 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
854 hwaddr paddr
, int prot
,
855 int mmu_idx
, target_ulong size
)
857 tlb_set_page_with_attrs(cpu
, vaddr
, paddr
, MEMTXATTRS_UNSPECIFIED
,
858 prot
, mmu_idx
, size
);
861 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
865 ram_addr
= qemu_ram_addr_from_host(ptr
);
866 if (ram_addr
== RAM_ADDR_INVALID
) {
867 error_report("Bad ram pointer %p", ptr
);
874 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
875 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
876 * be discarded and looked up again (e.g. via tlb_entry()).
878 static void tlb_fill(CPUState
*cpu
, target_ulong addr
, int size
,
879 MMUAccessType access_type
, int mmu_idx
, uintptr_t retaddr
)
881 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
885 * This is not a probe, so only valid return is success; failure
886 * should result in exception + longjmp to the cpu loop.
888 ok
= cc
->tlb_fill(cpu
, addr
, size
, access_type
, mmu_idx
, false, retaddr
);
892 static uint64_t io_readx(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
893 int mmu_idx
, target_ulong addr
, uintptr_t retaddr
,
894 MMUAccessType access_type
, MemOp op
)
896 CPUState
*cpu
= env_cpu(env
);
898 MemoryRegionSection
*section
;
904 if (iotlbentry
->attrs
.byte_swap
) {
908 section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
910 mr_offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
911 cpu
->mem_io_pc
= retaddr
;
912 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
913 cpu_io_recompile(cpu
, retaddr
);
916 cpu
->mem_io_vaddr
= addr
;
917 cpu
->mem_io_access_type
= access_type
;
919 if (mr
->global_locking
&& !qemu_mutex_iothread_locked()) {
920 qemu_mutex_lock_iothread();
923 r
= memory_region_dispatch_read(mr
, mr_offset
, &val
, op
, iotlbentry
->attrs
);
925 hwaddr physaddr
= mr_offset
+
926 section
->offset_within_address_space
-
927 section
->offset_within_region
;
929 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
), access_type
,
930 mmu_idx
, iotlbentry
->attrs
, r
, retaddr
);
933 qemu_mutex_unlock_iothread();
939 static void io_writex(CPUArchState
*env
, CPUIOTLBEntry
*iotlbentry
,
940 int mmu_idx
, uint64_t val
, target_ulong addr
,
941 uintptr_t retaddr
, MemOp op
)
943 CPUState
*cpu
= env_cpu(env
);
945 MemoryRegionSection
*section
;
950 if (iotlbentry
->attrs
.byte_swap
) {
954 section
= iotlb_to_section(cpu
, iotlbentry
->addr
, iotlbentry
->attrs
);
956 mr_offset
= (iotlbentry
->addr
& TARGET_PAGE_MASK
) + addr
;
957 if (mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !cpu
->can_do_io
) {
958 cpu_io_recompile(cpu
, retaddr
);
960 cpu
->mem_io_vaddr
= addr
;
961 cpu
->mem_io_pc
= retaddr
;
963 if (mr
->global_locking
&& !qemu_mutex_iothread_locked()) {
964 qemu_mutex_lock_iothread();
967 r
= memory_region_dispatch_write(mr
, mr_offset
, val
, op
, iotlbentry
->attrs
);
969 hwaddr physaddr
= mr_offset
+
970 section
->offset_within_address_space
-
971 section
->offset_within_region
;
973 cpu_transaction_failed(cpu
, physaddr
, addr
, memop_size(op
),
974 MMU_DATA_STORE
, mmu_idx
, iotlbentry
->attrs
, r
,
978 qemu_mutex_unlock_iothread();
982 static inline target_ulong
tlb_read_ofs(CPUTLBEntry
*entry
, size_t ofs
)
984 #if TCG_OVERSIZED_GUEST
985 return *(target_ulong
*)((uintptr_t)entry
+ ofs
);
987 /* ofs might correspond to .addr_write, so use atomic_read */
988 return atomic_read((target_ulong
*)((uintptr_t)entry
+ ofs
));
992 /* Return true if ADDR is present in the victim tlb, and has been copied
993 back to the main tlb. */
994 static bool victim_tlb_hit(CPUArchState
*env
, size_t mmu_idx
, size_t index
,
995 size_t elt_ofs
, target_ulong page
)
999 assert_cpu_is_self(env_cpu(env
));
1000 for (vidx
= 0; vidx
< CPU_VTLB_SIZE
; ++vidx
) {
1001 CPUTLBEntry
*vtlb
= &env_tlb(env
)->d
[mmu_idx
].vtable
[vidx
];
1004 /* elt_ofs might correspond to .addr_write, so use atomic_read */
1005 #if TCG_OVERSIZED_GUEST
1006 cmp
= *(target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
);
1008 cmp
= atomic_read((target_ulong
*)((uintptr_t)vtlb
+ elt_ofs
));
1012 /* Found entry in victim tlb, swap tlb and iotlb. */
1013 CPUTLBEntry tmptlb
, *tlb
= &env_tlb(env
)->f
[mmu_idx
].table
[index
];
1015 qemu_spin_lock(&env_tlb(env
)->c
.lock
);
1016 copy_tlb_helper_locked(&tmptlb
, tlb
);
1017 copy_tlb_helper_locked(tlb
, vtlb
);
1018 copy_tlb_helper_locked(vtlb
, &tmptlb
);
1019 qemu_spin_unlock(&env_tlb(env
)->c
.lock
);
1021 CPUIOTLBEntry tmpio
, *io
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1022 CPUIOTLBEntry
*vio
= &env_tlb(env
)->d
[mmu_idx
].viotlb
[vidx
];
1023 tmpio
= *io
; *io
= *vio
; *vio
= tmpio
;
1030 /* Macro to call the above, with local variables from the use context. */
1031 #define VICTIM_TLB_HIT(TY, ADDR) \
1032 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
1033 (ADDR) & TARGET_PAGE_MASK)
1036 * Return a ram_addr_t for the virtual address for execution.
1038 * Return -1 if we can't translate and execute from an entire page
1039 * of RAM. This will force us to execute by loading and translating
1040 * one insn at a time, without caching.
1042 * NOTE: This function will trigger an exception if the page is
1045 tb_page_addr_t
get_page_addr_code(CPUArchState
*env
, target_ulong addr
)
1047 uintptr_t mmu_idx
= cpu_mmu_index(env
, true);
1048 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1049 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1052 if (unlikely(!tlb_hit(entry
->addr_code
, addr
))) {
1053 if (!VICTIM_TLB_HIT(addr_code
, addr
)) {
1054 tlb_fill(env_cpu(env
), addr
, 0, MMU_INST_FETCH
, mmu_idx
, 0);
1055 index
= tlb_index(env
, mmu_idx
, addr
);
1056 entry
= tlb_entry(env
, mmu_idx
, addr
);
1058 if (unlikely(entry
->addr_code
& TLB_INVALID_MASK
)) {
1060 * The MMU protection covers a smaller range than a target
1061 * page, so we must redo the MMU check for every insn.
1066 assert(tlb_hit(entry
->addr_code
, addr
));
1069 if (unlikely(entry
->addr_code
& TLB_MMIO
)) {
1070 /* The region is not backed by RAM. */
1074 p
= (void *)((uintptr_t)addr
+ entry
->addend
);
1075 return qemu_ram_addr_from_host_nofail(p
);
1078 /* Probe for whether the specified guest write access is permitted.
1079 * If it is not permitted then an exception will be taken in the same
1080 * way as if this were a real write access (and we will not return).
1081 * Otherwise the function will return, and there will be a valid
1082 * entry in the TLB for this access.
1084 void probe_write(CPUArchState
*env
, target_ulong addr
, int size
, int mmu_idx
,
1087 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1088 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1090 if (!tlb_hit(tlb_addr_write(entry
), addr
)) {
1091 /* TLB entry is for a different page */
1092 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1093 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
1099 void *tlb_vaddr_to_host(CPUArchState
*env
, abi_ptr addr
,
1100 MMUAccessType access_type
, int mmu_idx
)
1102 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1103 uintptr_t tlb_addr
, page
;
1106 switch (access_type
) {
1108 elt_ofs
= offsetof(CPUTLBEntry
, addr_read
);
1110 case MMU_DATA_STORE
:
1111 elt_ofs
= offsetof(CPUTLBEntry
, addr_write
);
1113 case MMU_INST_FETCH
:
1114 elt_ofs
= offsetof(CPUTLBEntry
, addr_code
);
1117 g_assert_not_reached();
1120 page
= addr
& TARGET_PAGE_MASK
;
1121 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1123 if (!tlb_hit_page(tlb_addr
, page
)) {
1124 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1126 if (!victim_tlb_hit(env
, mmu_idx
, index
, elt_ofs
, page
)) {
1127 CPUState
*cs
= env_cpu(env
);
1128 CPUClass
*cc
= CPU_GET_CLASS(cs
);
1130 if (!cc
->tlb_fill(cs
, addr
, 0, access_type
, mmu_idx
, true, 0)) {
1131 /* Non-faulting page table read failed. */
1135 /* TLB resize via tlb_fill may have moved the entry. */
1136 entry
= tlb_entry(env
, mmu_idx
, addr
);
1138 tlb_addr
= tlb_read_ofs(entry
, elt_ofs
);
1141 if (tlb_addr
& ~TARGET_PAGE_MASK
) {
1146 return (void *)((uintptr_t)addr
+ entry
->addend
);
1149 /* Probe for a read-modify-write atomic operation. Do not allow unaligned
1150 * operations, or io operations to proceed. Return the host address. */
1151 static void *atomic_mmu_lookup(CPUArchState
*env
, target_ulong addr
,
1152 TCGMemOpIdx oi
, uintptr_t retaddr
,
1155 size_t mmu_idx
= get_mmuidx(oi
);
1156 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1157 CPUTLBEntry
*tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1158 target_ulong tlb_addr
= tlb_addr_write(tlbe
);
1159 MemOp mop
= get_memop(oi
);
1160 int a_bits
= get_alignment_bits(mop
);
1161 int s_bits
= mop
& MO_SIZE
;
1164 /* Adjust the given return address. */
1165 retaddr
-= GETPC_ADJ
;
1167 /* Enforce guest required alignment. */
1168 if (unlikely(a_bits
> 0 && (addr
& ((1 << a_bits
) - 1)))) {
1169 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
1170 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1174 /* Enforce qemu required alignment. */
1175 if (unlikely(addr
& ((1 << s_bits
) - 1))) {
1176 /* We get here if guest alignment was not requested,
1177 or was not enforced by cpu_unaligned_access above.
1178 We might widen the access and emulate, but for now
1179 mark an exception and exit the cpu loop. */
1180 goto stop_the_world
;
1183 /* Check TLB entry and enforce page permissions. */
1184 if (!tlb_hit(tlb_addr
, addr
)) {
1185 if (!VICTIM_TLB_HIT(addr_write
, addr
)) {
1186 tlb_fill(env_cpu(env
), addr
, 1 << s_bits
, MMU_DATA_STORE
,
1188 index
= tlb_index(env
, mmu_idx
, addr
);
1189 tlbe
= tlb_entry(env
, mmu_idx
, addr
);
1191 tlb_addr
= tlb_addr_write(tlbe
) & ~TLB_INVALID_MASK
;
1194 /* Notice an IO access or a needs-MMU-lookup access */
1195 if (unlikely(tlb_addr
& TLB_MMIO
)) {
1196 /* There's really nothing that can be done to
1197 support this apart from stop-the-world. */
1198 goto stop_the_world
;
1201 /* Let the guest notice RMW on a write-only page. */
1202 if (unlikely(tlbe
->addr_read
!= (tlb_addr
& ~TLB_NOTDIRTY
))) {
1203 tlb_fill(env_cpu(env
), addr
, 1 << s_bits
, MMU_DATA_LOAD
,
1205 /* Since we don't support reads and writes to different addresses,
1206 and we do have the proper page loaded for write, this shouldn't
1207 ever return. But just in case, handle via stop-the-world. */
1208 goto stop_the_world
;
1211 hostaddr
= (void *)((uintptr_t)addr
+ tlbe
->addend
);
1213 ndi
->active
= false;
1214 if (unlikely(tlb_addr
& TLB_NOTDIRTY
)) {
1216 memory_notdirty_write_prepare(ndi
, env_cpu(env
), addr
,
1217 qemu_ram_addr_from_host_nofail(hostaddr
),
1224 cpu_loop_exit_atomic(env_cpu(env
), retaddr
);
1230 * We support two different access types. SOFTMMU_CODE_ACCESS is
1231 * specifically for reading instructions from system memory. It is
1232 * called by the translation loop and in some helpers where the code
1233 * is disassembled. It shouldn't be called directly by guest code.
1236 typedef uint64_t FullLoadHelper(CPUArchState
*env
, target_ulong addr
,
1237 TCGMemOpIdx oi
, uintptr_t retaddr
);
1239 static inline uint64_t __attribute__((always_inline
))
1240 load_helper(CPUArchState
*env
, target_ulong addr
, TCGMemOpIdx oi
,
1241 uintptr_t retaddr
, MemOp op
, bool code_read
,
1242 FullLoadHelper
*full_load
)
1244 uintptr_t mmu_idx
= get_mmuidx(oi
);
1245 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1246 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1247 target_ulong tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1248 const size_t tlb_off
= code_read
?
1249 offsetof(CPUTLBEntry
, addr_code
) : offsetof(CPUTLBEntry
, addr_read
);
1250 const MMUAccessType access_type
=
1251 code_read
? MMU_INST_FETCH
: MMU_DATA_LOAD
;
1252 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
1255 size_t size
= memop_size(op
);
1257 /* Handle CPU specific unaligned behaviour */
1258 if (addr
& ((1 << a_bits
) - 1)) {
1259 cpu_unaligned_access(env_cpu(env
), addr
, access_type
,
1263 /* If the TLB entry is for a different page, reload and try again. */
1264 if (!tlb_hit(tlb_addr
, addr
)) {
1265 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
1266 addr
& TARGET_PAGE_MASK
)) {
1267 tlb_fill(env_cpu(env
), addr
, size
,
1268 access_type
, mmu_idx
, retaddr
);
1269 index
= tlb_index(env
, mmu_idx
, addr
);
1270 entry
= tlb_entry(env
, mmu_idx
, addr
);
1272 tlb_addr
= code_read
? entry
->addr_code
: entry
->addr_read
;
1273 tlb_addr
&= ~TLB_INVALID_MASK
;
1276 /* Handle anything that isn't just a straight memory access. */
1277 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1278 CPUIOTLBEntry
*iotlbentry
;
1280 /* For anything that is unaligned, recurse through full_load. */
1281 if ((addr
& (size
- 1)) != 0) {
1282 goto do_unaligned_access
;
1285 iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1287 /* Handle watchpoints. */
1288 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
1289 /* On watchpoint hit, this will longjmp out. */
1290 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1291 iotlbentry
->attrs
, BP_MEM_READ
, retaddr
);
1293 /* The backing page may or may not require I/O. */
1294 tlb_addr
&= ~TLB_WATCHPOINT
;
1295 if ((tlb_addr
& ~TARGET_PAGE_MASK
) == 0) {
1296 goto do_aligned_access
;
1300 /* Handle I/O access. */
1301 return io_readx(env
, iotlbentry
, mmu_idx
, addr
,
1302 retaddr
, access_type
, op
);
1305 /* Handle slow unaligned access (it spans two pages or IO). */
1307 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
1308 >= TARGET_PAGE_SIZE
)) {
1309 target_ulong addr1
, addr2
;
1312 do_unaligned_access
:
1313 addr1
= addr
& ~((target_ulong
)size
- 1);
1314 addr2
= addr1
+ size
;
1315 r1
= full_load(env
, addr1
, oi
, retaddr
);
1316 r2
= full_load(env
, addr2
, oi
, retaddr
);
1317 shift
= (addr
& (size
- 1)) * 8;
1319 if (memop_big_endian(op
)) {
1320 /* Big-endian combine. */
1321 res
= (r1
<< shift
) | (r2
>> ((size
* 8) - shift
));
1323 /* Little-endian combine. */
1324 res
= (r1
>> shift
) | (r2
<< ((size
* 8) - shift
));
1326 return res
& MAKE_64BIT_MASK(0, size
* 8);
1330 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1333 res
= ldub_p(haddr
);
1336 res
= lduw_be_p(haddr
);
1339 res
= lduw_le_p(haddr
);
1342 res
= (uint32_t)ldl_be_p(haddr
);
1345 res
= (uint32_t)ldl_le_p(haddr
);
1348 res
= ldq_be_p(haddr
);
1351 res
= ldq_le_p(haddr
);
1354 g_assert_not_reached();
1361 * For the benefit of TCG generated code, we want to avoid the
1362 * complication of ABI-specific return type promotion and always
1363 * return a value extended to the register size of the host. This is
1364 * tcg_target_long, except in the case of a 32-bit host and 64-bit
1365 * data, and for that we always have uint64_t.
1367 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
1370 static uint64_t full_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1371 TCGMemOpIdx oi
, uintptr_t retaddr
)
1373 return load_helper(env
, addr
, oi
, retaddr
, MO_UB
, false, full_ldub_mmu
);
1376 tcg_target_ulong
helper_ret_ldub_mmu(CPUArchState
*env
, target_ulong addr
,
1377 TCGMemOpIdx oi
, uintptr_t retaddr
)
1379 return full_ldub_mmu(env
, addr
, oi
, retaddr
);
1382 static uint64_t full_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1383 TCGMemOpIdx oi
, uintptr_t retaddr
)
1385 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUW
, false,
1389 tcg_target_ulong
helper_le_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1390 TCGMemOpIdx oi
, uintptr_t retaddr
)
1392 return full_le_lduw_mmu(env
, addr
, oi
, retaddr
);
1395 static uint64_t full_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1396 TCGMemOpIdx oi
, uintptr_t retaddr
)
1398 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUW
, false,
1402 tcg_target_ulong
helper_be_lduw_mmu(CPUArchState
*env
, target_ulong addr
,
1403 TCGMemOpIdx oi
, uintptr_t retaddr
)
1405 return full_be_lduw_mmu(env
, addr
, oi
, retaddr
);
1408 static uint64_t full_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1409 TCGMemOpIdx oi
, uintptr_t retaddr
)
1411 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUL
, false,
1415 tcg_target_ulong
helper_le_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1416 TCGMemOpIdx oi
, uintptr_t retaddr
)
1418 return full_le_ldul_mmu(env
, addr
, oi
, retaddr
);
1421 static uint64_t full_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1422 TCGMemOpIdx oi
, uintptr_t retaddr
)
1424 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUL
, false,
1428 tcg_target_ulong
helper_be_ldul_mmu(CPUArchState
*env
, target_ulong addr
,
1429 TCGMemOpIdx oi
, uintptr_t retaddr
)
1431 return full_be_ldul_mmu(env
, addr
, oi
, retaddr
);
1434 uint64_t helper_le_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1435 TCGMemOpIdx oi
, uintptr_t retaddr
)
1437 return load_helper(env
, addr
, oi
, retaddr
, MO_LEQ
, false,
1441 uint64_t helper_be_ldq_mmu(CPUArchState
*env
, target_ulong addr
,
1442 TCGMemOpIdx oi
, uintptr_t retaddr
)
1444 return load_helper(env
, addr
, oi
, retaddr
, MO_BEQ
, false,
1449 * Provide signed versions of the load routines as well. We can of course
1450 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
1454 tcg_target_ulong
helper_ret_ldsb_mmu(CPUArchState
*env
, target_ulong addr
,
1455 TCGMemOpIdx oi
, uintptr_t retaddr
)
1457 return (int8_t)helper_ret_ldub_mmu(env
, addr
, oi
, retaddr
);
1460 tcg_target_ulong
helper_le_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1461 TCGMemOpIdx oi
, uintptr_t retaddr
)
1463 return (int16_t)helper_le_lduw_mmu(env
, addr
, oi
, retaddr
);
1466 tcg_target_ulong
helper_be_ldsw_mmu(CPUArchState
*env
, target_ulong addr
,
1467 TCGMemOpIdx oi
, uintptr_t retaddr
)
1469 return (int16_t)helper_be_lduw_mmu(env
, addr
, oi
, retaddr
);
1472 tcg_target_ulong
helper_le_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1473 TCGMemOpIdx oi
, uintptr_t retaddr
)
1475 return (int32_t)helper_le_ldul_mmu(env
, addr
, oi
, retaddr
);
1478 tcg_target_ulong
helper_be_ldsl_mmu(CPUArchState
*env
, target_ulong addr
,
1479 TCGMemOpIdx oi
, uintptr_t retaddr
)
1481 return (int32_t)helper_be_ldul_mmu(env
, addr
, oi
, retaddr
);
1488 static inline void __attribute__((always_inline
))
1489 store_helper(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1490 TCGMemOpIdx oi
, uintptr_t retaddr
, MemOp op
)
1492 uintptr_t mmu_idx
= get_mmuidx(oi
);
1493 uintptr_t index
= tlb_index(env
, mmu_idx
, addr
);
1494 CPUTLBEntry
*entry
= tlb_entry(env
, mmu_idx
, addr
);
1495 target_ulong tlb_addr
= tlb_addr_write(entry
);
1496 const size_t tlb_off
= offsetof(CPUTLBEntry
, addr_write
);
1497 unsigned a_bits
= get_alignment_bits(get_memop(oi
));
1499 size_t size
= memop_size(op
);
1501 /* Handle CPU specific unaligned behaviour */
1502 if (addr
& ((1 << a_bits
) - 1)) {
1503 cpu_unaligned_access(env_cpu(env
), addr
, MMU_DATA_STORE
,
1507 /* If the TLB entry is for a different page, reload and try again. */
1508 if (!tlb_hit(tlb_addr
, addr
)) {
1509 if (!victim_tlb_hit(env
, mmu_idx
, index
, tlb_off
,
1510 addr
& TARGET_PAGE_MASK
)) {
1511 tlb_fill(env_cpu(env
), addr
, size
, MMU_DATA_STORE
,
1513 index
= tlb_index(env
, mmu_idx
, addr
);
1514 entry
= tlb_entry(env
, mmu_idx
, addr
);
1516 tlb_addr
= tlb_addr_write(entry
) & ~TLB_INVALID_MASK
;
1519 /* Handle anything that isn't just a straight memory access. */
1520 if (unlikely(tlb_addr
& ~TARGET_PAGE_MASK
)) {
1521 CPUIOTLBEntry
*iotlbentry
;
1523 /* For anything that is unaligned, recurse through byte stores. */
1524 if ((addr
& (size
- 1)) != 0) {
1525 goto do_unaligned_access
;
1528 iotlbentry
= &env_tlb(env
)->d
[mmu_idx
].iotlb
[index
];
1530 /* Handle watchpoints. */
1531 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
1532 /* On watchpoint hit, this will longjmp out. */
1533 cpu_check_watchpoint(env_cpu(env
), addr
, size
,
1534 iotlbentry
->attrs
, BP_MEM_WRITE
, retaddr
);
1536 /* The backing page may or may not require I/O. */
1537 tlb_addr
&= ~TLB_WATCHPOINT
;
1538 if ((tlb_addr
& ~TARGET_PAGE_MASK
) == 0) {
1539 goto do_aligned_access
;
1543 /* Handle I/O access. */
1544 io_writex(env
, iotlbentry
, mmu_idx
, val
, addr
, retaddr
, op
);
1548 /* Handle slow unaligned access (it spans two pages or IO). */
1550 && unlikely((addr
& ~TARGET_PAGE_MASK
) + size
- 1
1551 >= TARGET_PAGE_SIZE
)) {
1554 CPUTLBEntry
*entry2
;
1555 target_ulong page2
, tlb_addr2
;
1558 do_unaligned_access
:
1560 * Ensure the second page is in the TLB. Note that the first page
1561 * is already guaranteed to be filled, and that the second page
1562 * cannot evict the first.
1564 page2
= (addr
+ size
) & TARGET_PAGE_MASK
;
1565 size2
= (addr
+ size
) & ~TARGET_PAGE_MASK
;
1566 index2
= tlb_index(env
, mmu_idx
, page2
);
1567 entry2
= tlb_entry(env
, mmu_idx
, page2
);
1568 tlb_addr2
= tlb_addr_write(entry2
);
1569 if (!tlb_hit_page(tlb_addr2
, page2
)) {
1570 if (!victim_tlb_hit(env
, mmu_idx
, index2
, tlb_off
, page2
)) {
1571 tlb_fill(env_cpu(env
), page2
, size2
, MMU_DATA_STORE
,
1573 index2
= tlb_index(env
, mmu_idx
, page2
);
1574 entry2
= tlb_entry(env
, mmu_idx
, page2
);
1576 tlb_addr2
= tlb_addr_write(entry2
);
1580 * Handle watchpoints. Since this may trap, all checks
1581 * must happen before any store.
1583 if (unlikely(tlb_addr
& TLB_WATCHPOINT
)) {
1584 cpu_check_watchpoint(env_cpu(env
), addr
, size
- size2
,
1585 env_tlb(env
)->d
[mmu_idx
].iotlb
[index
].attrs
,
1586 BP_MEM_WRITE
, retaddr
);
1588 if (unlikely(tlb_addr2
& TLB_WATCHPOINT
)) {
1589 cpu_check_watchpoint(env_cpu(env
), page2
, size2
,
1590 env_tlb(env
)->d
[mmu_idx
].iotlb
[index2
].attrs
,
1591 BP_MEM_WRITE
, retaddr
);
1595 * XXX: not efficient, but simple.
1596 * This loop must go in the forward direction to avoid issues
1597 * with self-modifying code in Windows 64-bit.
1599 for (i
= 0; i
< size
; ++i
) {
1601 if (memop_big_endian(op
)) {
1602 /* Big-endian extract. */
1603 val8
= val
>> (((size
- 1) * 8) - (i
* 8));
1605 /* Little-endian extract. */
1606 val8
= val
>> (i
* 8);
1608 helper_ret_stb_mmu(env
, addr
+ i
, val8
, oi
, retaddr
);
1614 haddr
= (void *)((uintptr_t)addr
+ entry
->addend
);
1620 stw_be_p(haddr
, val
);
1623 stw_le_p(haddr
, val
);
1626 stl_be_p(haddr
, val
);
1629 stl_le_p(haddr
, val
);
1632 stq_be_p(haddr
, val
);
1635 stq_le_p(haddr
, val
);
1638 g_assert_not_reached();
1643 void helper_ret_stb_mmu(CPUArchState
*env
, target_ulong addr
, uint8_t val
,
1644 TCGMemOpIdx oi
, uintptr_t retaddr
)
1646 store_helper(env
, addr
, val
, oi
, retaddr
, MO_UB
);
1649 void helper_le_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1650 TCGMemOpIdx oi
, uintptr_t retaddr
)
1652 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUW
);
1655 void helper_be_stw_mmu(CPUArchState
*env
, target_ulong addr
, uint16_t val
,
1656 TCGMemOpIdx oi
, uintptr_t retaddr
)
1658 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUW
);
1661 void helper_le_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1662 TCGMemOpIdx oi
, uintptr_t retaddr
)
1664 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEUL
);
1667 void helper_be_stl_mmu(CPUArchState
*env
, target_ulong addr
, uint32_t val
,
1668 TCGMemOpIdx oi
, uintptr_t retaddr
)
1670 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEUL
);
1673 void helper_le_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1674 TCGMemOpIdx oi
, uintptr_t retaddr
)
1676 store_helper(env
, addr
, val
, oi
, retaddr
, MO_LEQ
);
1679 void helper_be_stq_mmu(CPUArchState
*env
, target_ulong addr
, uint64_t val
,
1680 TCGMemOpIdx oi
, uintptr_t retaddr
)
1682 store_helper(env
, addr
, val
, oi
, retaddr
, MO_BEQ
);
1685 /* First set of helpers allows passing in of OI and RETADDR. This makes
1686 them callable from other helpers. */
1688 #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
1689 #define ATOMIC_NAME(X) \
1690 HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
1691 #define ATOMIC_MMU_DECLS NotDirtyInfo ndi
1692 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi)
1693 #define ATOMIC_MMU_CLEANUP \
1695 if (unlikely(ndi.active)) { \
1696 memory_notdirty_write_complete(&ndi); \
1701 #include "atomic_template.h"
1704 #include "atomic_template.h"
1707 #include "atomic_template.h"
1709 #ifdef CONFIG_ATOMIC64
1711 #include "atomic_template.h"
1714 #if HAVE_CMPXCHG128 || HAVE_ATOMIC128
1715 #define DATA_SIZE 16
1716 #include "atomic_template.h"
1719 /* Second set of helpers are directly callable from TCG as helpers. */
1723 #undef ATOMIC_MMU_LOOKUP
1724 #define EXTRA_ARGS , TCGMemOpIdx oi
1725 #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
1726 #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi)
1729 #include "atomic_template.h"
1732 #include "atomic_template.h"
1735 #include "atomic_template.h"
1737 #ifdef CONFIG_ATOMIC64
1739 #include "atomic_template.h"
1742 /* Code access functions. */
1744 static uint64_t full_ldub_cmmu(CPUArchState
*env
, target_ulong addr
,
1745 TCGMemOpIdx oi
, uintptr_t retaddr
)
1747 return load_helper(env
, addr
, oi
, retaddr
, MO_8
, true, full_ldub_cmmu
);
1750 uint8_t helper_ret_ldb_cmmu(CPUArchState
*env
, target_ulong addr
,
1751 TCGMemOpIdx oi
, uintptr_t retaddr
)
1753 return full_ldub_cmmu(env
, addr
, oi
, retaddr
);
1756 static uint64_t full_le_lduw_cmmu(CPUArchState
*env
, target_ulong addr
,
1757 TCGMemOpIdx oi
, uintptr_t retaddr
)
1759 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUW
, true,
1763 uint16_t helper_le_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1764 TCGMemOpIdx oi
, uintptr_t retaddr
)
1766 return full_le_lduw_cmmu(env
, addr
, oi
, retaddr
);
1769 static uint64_t full_be_lduw_cmmu(CPUArchState
*env
, target_ulong addr
,
1770 TCGMemOpIdx oi
, uintptr_t retaddr
)
1772 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUW
, true,
1776 uint16_t helper_be_ldw_cmmu(CPUArchState
*env
, target_ulong addr
,
1777 TCGMemOpIdx oi
, uintptr_t retaddr
)
1779 return full_be_lduw_cmmu(env
, addr
, oi
, retaddr
);
1782 static uint64_t full_le_ldul_cmmu(CPUArchState
*env
, target_ulong addr
,
1783 TCGMemOpIdx oi
, uintptr_t retaddr
)
1785 return load_helper(env
, addr
, oi
, retaddr
, MO_LEUL
, true,
1789 uint32_t helper_le_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1790 TCGMemOpIdx oi
, uintptr_t retaddr
)
1792 return full_le_ldul_cmmu(env
, addr
, oi
, retaddr
);
1795 static uint64_t full_be_ldul_cmmu(CPUArchState
*env
, target_ulong addr
,
1796 TCGMemOpIdx oi
, uintptr_t retaddr
)
1798 return load_helper(env
, addr
, oi
, retaddr
, MO_BEUL
, true,
1802 uint32_t helper_be_ldl_cmmu(CPUArchState
*env
, target_ulong addr
,
1803 TCGMemOpIdx oi
, uintptr_t retaddr
)
1805 return full_be_ldul_cmmu(env
, addr
, oi
, retaddr
);
1808 uint64_t helper_le_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1809 TCGMemOpIdx oi
, uintptr_t retaddr
)
1811 return load_helper(env
, addr
, oi
, retaddr
, MO_LEQ
, true,
1812 helper_le_ldq_cmmu
);
1815 uint64_t helper_be_ldq_cmmu(CPUArchState
*env
, target_ulong addr
,
1816 TCGMemOpIdx oi
, uintptr_t retaddr
)
1818 return load_helper(env
, addr
, oi
, retaddr
, MO_BEQ
, true,
1819 helper_be_ldq_cmmu
);