2 * Common CPU TLB handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "exec/exec-all.h"
23 #include "exec/memory.h"
24 #include "exec/address-spaces.h"
26 #include "exec/cputlb.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
33 //#define DEBUG_TLB_CHECK
39 * If flush_global is true (the usual case), flush all tlb entries.
40 * If flush_global is false, flush (at least) all tlb entries not
43 * Since QEMU doesn't currently implement a global/not-global flag
44 * for tlb entries, at the moment tlb_flush() will also flush all
45 * tlb entries in the flush_global == false case. This is OK because
46 * CPU architectures generally permit an implementation to drop
47 * entries from the TLB at any time, so flushing more entries than
48 * required is only an efficiency issue, not a correctness issue.
50 void tlb_flush(CPUState
*cpu
, int flush_global
)
52 CPUArchState
*env
= cpu
->env_ptr
;
54 #if defined(DEBUG_TLB)
55 printf("tlb_flush:\n");
57 /* must reset current TB so that interrupts cannot modify the
58 links while we are modifying them */
59 cpu
->current_tb
= NULL
;
61 memset(env
->tlb_table
, -1, sizeof(env
->tlb_table
));
62 memset(cpu
->tb_jmp_cache
, 0, sizeof(cpu
->tb_jmp_cache
));
64 env
->tlb_flush_addr
= -1;
65 env
->tlb_flush_mask
= 0;
69 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
71 if (addr
== (tlb_entry
->addr_read
&
72 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
73 addr
== (tlb_entry
->addr_write
&
74 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
75 addr
== (tlb_entry
->addr_code
&
76 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
77 memset(tlb_entry
, -1, sizeof(*tlb_entry
));
81 void tlb_flush_page(CPUState
*cpu
, target_ulong addr
)
83 CPUArchState
*env
= cpu
->env_ptr
;
87 #if defined(DEBUG_TLB)
88 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
90 /* Check if we need to flush due to large pages. */
91 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
92 #if defined(DEBUG_TLB)
93 printf("tlb_flush_page: forced full flush ("
94 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
95 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
100 /* must reset current TB so that interrupts cannot modify the
101 links while we are modifying them */
102 cpu
->current_tb
= NULL
;
104 addr
&= TARGET_PAGE_MASK
;
105 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
106 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
107 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
110 tb_flush_jmp_cache(cpu
, addr
);
113 /* update the TLBs so that writes to code in the virtual page 'addr'
115 void tlb_protect_code(ram_addr_t ram_addr
)
117 cpu_physical_memory_reset_dirty(ram_addr
, TARGET_PAGE_SIZE
,
121 /* update the TLB so that writes in physical page 'phys_addr' are no longer
122 tested for self modifying code */
123 void tlb_unprotect_code_phys(CPUState
*cpu
, ram_addr_t ram_addr
,
126 cpu_physical_memory_set_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
);
129 static bool tlb_is_dirty_ram(CPUTLBEntry
*tlbe
)
131 return (tlbe
->addr_write
& (TLB_INVALID_MASK
|TLB_MMIO
|TLB_NOTDIRTY
)) == 0;
134 void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
, uintptr_t start
,
139 if (tlb_is_dirty_ram(tlb_entry
)) {
140 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
141 if ((addr
- start
) < length
) {
142 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
147 static inline ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
151 if (qemu_ram_addr_from_host(ptr
, &ram_addr
) == NULL
) {
152 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
158 void cpu_tlb_reset_dirty_all(ram_addr_t start1
, ram_addr_t length
)
167 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
170 for (i
= 0; i
< CPU_TLB_SIZE
; i
++) {
171 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
178 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
180 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
)) {
181 tlb_entry
->addr_write
= vaddr
;
185 /* update the TLB corresponding to virtual page vaddr
186 so that it is no longer dirty */
187 void tlb_set_dirty(CPUArchState
*env
, target_ulong vaddr
)
192 vaddr
&= TARGET_PAGE_MASK
;
193 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
194 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
195 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
199 /* Our TLB does not support large pages, so remember the area covered by
200 large pages and trigger a full TLB flush if these are invalidated. */
201 static void tlb_add_large_page(CPUArchState
*env
, target_ulong vaddr
,
204 target_ulong mask
= ~(size
- 1);
206 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
207 env
->tlb_flush_addr
= vaddr
& mask
;
208 env
->tlb_flush_mask
= mask
;
211 /* Extend the existing region to include the new page.
212 This is a compromise between unnecessary flushes and the cost
213 of maintaining a full variable size TLB. */
214 mask
&= env
->tlb_flush_mask
;
215 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
218 env
->tlb_flush_addr
&= mask
;
219 env
->tlb_flush_mask
= mask
;
222 /* Add a new TLB entry. At most one entry for a given virtual address
223 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
224 supplied size is only used by tlb_flush_page. */
225 void tlb_set_page(CPUState
*cpu
, target_ulong vaddr
,
226 hwaddr paddr
, int prot
,
227 int mmu_idx
, target_ulong size
)
229 CPUArchState
*env
= cpu
->env_ptr
;
230 MemoryRegionSection
*section
;
232 target_ulong address
;
233 target_ulong code_address
;
236 hwaddr iotlb
, xlat
, sz
;
238 assert(size
>= TARGET_PAGE_SIZE
);
239 if (size
!= TARGET_PAGE_SIZE
) {
240 tlb_add_large_page(env
, vaddr
, size
);
244 section
= address_space_translate_for_iotlb(cpu
->as
, paddr
,
246 assert(sz
>= TARGET_PAGE_SIZE
);
248 #if defined(DEBUG_TLB)
249 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
251 vaddr
, paddr
, prot
, mmu_idx
);
255 if (!memory_region_is_ram(section
->mr
) && !memory_region_is_romd(section
->mr
)) {
260 /* TLB_MMIO for rom/romd handled below */
261 addend
= (uintptr_t)memory_region_get_ram_ptr(section
->mr
) + xlat
;
264 code_address
= address
;
265 iotlb
= memory_region_section_get_iotlb(cpu
, section
, vaddr
, paddr
, xlat
,
268 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
269 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
270 te
= &env
->tlb_table
[mmu_idx
][index
];
271 te
->addend
= addend
- vaddr
;
272 if (prot
& PAGE_READ
) {
273 te
->addr_read
= address
;
278 if (prot
& PAGE_EXEC
) {
279 te
->addr_code
= code_address
;
283 if (prot
& PAGE_WRITE
) {
284 if ((memory_region_is_ram(section
->mr
) && section
->readonly
)
285 || memory_region_is_romd(section
->mr
)) {
286 /* Write access calls the I/O callback. */
287 te
->addr_write
= address
| TLB_MMIO
;
288 } else if (memory_region_is_ram(section
->mr
)
289 && cpu_physical_memory_is_clean(section
->mr
->ram_addr
291 te
->addr_write
= address
| TLB_NOTDIRTY
;
293 te
->addr_write
= address
;
300 /* NOTE: this function can trigger an exception */
301 /* NOTE2: the returned address is not exactly the physical address: it
302 * is actually a ram_addr_t (in system mode; the user mode emulation
303 * version of this function returns a guest virtual address).
305 tb_page_addr_t
get_page_addr_code(CPUArchState
*env1
, target_ulong addr
)
307 int mmu_idx
, page_index
, pd
;
310 CPUState
*cpu
= ENV_GET_CPU(env1
);
312 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
313 mmu_idx
= cpu_mmu_index(env1
);
314 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
315 (addr
& TARGET_PAGE_MASK
))) {
316 cpu_ldub_code(env1
, addr
);
318 pd
= env1
->iotlb
[mmu_idx
][page_index
] & ~TARGET_PAGE_MASK
;
319 mr
= iotlb_to_region(cpu
->as
, pd
);
320 if (memory_region_is_unassigned(mr
)) {
321 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
323 if (cc
->do_unassigned_access
) {
324 cc
->do_unassigned_access(cpu
, addr
, false, true, 0, 4);
326 cpu_abort(cpu
, "Trying to execute code outside RAM or ROM at 0x"
327 TARGET_FMT_lx
"\n", addr
);
330 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
331 return qemu_ram_addr_from_host_nofail(p
);
334 #define MMUSUFFIX _mmu
337 #include "softmmu_template.h"
340 #include "softmmu_template.h"
343 #include "softmmu_template.h"
346 #include "softmmu_template.h"
349 #define MMUSUFFIX _cmmu
353 #define GETRA() ((uintptr_t)0)
354 #define SOFTMMU_CODE_ACCESS
357 #include "softmmu_template.h"
360 #include "softmmu_template.h"
363 #include "softmmu_template.h"
366 #include "softmmu_template.h"