]>
Commit | Line | Data |
---|---|---|
d4e8164f FB |
1 | /* |
2 | * internal execution defines for qemu | |
5fafdf24 | 3 | * |
d4e8164f FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
d4e8164f FB |
18 | */ |
19 | ||
2a6a4076 MA |
20 | #ifndef EXEC_ALL_H |
21 | #define EXEC_ALL_H | |
7d99a001 | 22 | |
ec150c7e | 23 | #include "cpu.h" |
00f6da6a | 24 | #include "exec/tb-context.h" |
4b2190da | 25 | #include "exec/cpu_ldst.h" |
416986d3 | 26 | #include "sysemu/cpus.h" |
7d99a001 | 27 | |
b346ff46 | 28 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
de9a95f0 | 29 | #define DEBUG_DISAS |
b346ff46 | 30 | |
41c1b1c9 PB |
31 | /* Page tracking code uses ram addresses in system mode, and virtual |
32 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate | |
33 | type. */ | |
34 | #if defined(CONFIG_USER_ONLY) | |
b480d9b7 | 35 | typedef abi_ulong tb_page_addr_t; |
67a5b5d2 | 36 | #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx |
41c1b1c9 PB |
37 | #else |
38 | typedef ram_addr_t tb_page_addr_t; | |
67a5b5d2 | 39 | #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT |
41c1b1c9 PB |
40 | #endif |
41 | ||
1de7afc9 | 42 | #include "qemu/log.h" |
b346ff46 | 43 | |
8b86d6d2 RH |
44 | void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); |
45 | void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, | |
bad729e2 | 46 | target_ulong *data); |
d2856f1a | 47 | |
57fec1fe | 48 | void cpu_gen_init(void); |
d25f2a72 AB |
49 | |
50 | /** | |
51 | * cpu_restore_state: | |
52 | * @cpu: the vCPU state is to be restore to | |
53 | * @searched_pc: the host PC the fault occurred at | |
afd46fca PD |
54 | * @will_exit: true if the TB executed will be interrupted after some |
55 | cpu adjustments. Required for maintaining the correct | |
56 | icount valus | |
d25f2a72 AB |
57 | * @return: true if state was restored, false otherwise |
58 | * | |
59 | * Attempt to restore the state for a fault occurring in translated | |
60 | * code. If the searched_pc is not in translated code no state is | |
61 | * restored and the function returns false. | |
62 | */ | |
afd46fca | 63 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); |
a8a826a3 | 64 | |
6886b980 | 65 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
90b40a69 | 66 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
648f034c | 67 | TranslationBlock *tb_gen_code(CPUState *cpu, |
89fee74a EC |
68 | target_ulong pc, target_ulong cs_base, |
69 | uint32_t flags, | |
2e70f6ef | 70 | int cflags); |
1bc7e522 | 71 | |
5638d180 | 72 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
1c3c8af1 | 73 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
fdbc2b57 | 74 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
1652b974 | 75 | |
1f6493be DH |
76 | /** |
77 | * cpu_loop_exit_requested: | |
78 | * @cpu: The CPU state to be tested | |
79 | * | |
80 | * Indicate if somebody asked for a return of the CPU to the main loop | |
81 | * (e.g., via cpu_exit() or cpu_interrupt()). | |
82 | * | |
83 | * This is helpful for architectures that support interruptible | |
84 | * instructions. After writing back all state to registers/memory, this | |
85 | * call can be used to check if it makes sense to return to the main loop | |
86 | * or to continue executing the interruptible instruction. | |
87 | */ | |
88 | static inline bool cpu_loop_exit_requested(CPUState *cpu) | |
89 | { | |
90 | return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0; | |
91 | } | |
92 | ||
0cac1b66 | 93 | #if !defined(CONFIG_USER_ONLY) |
32857f4d | 94 | void cpu_reloading_memory_map(void); |
56943e8c PM |
95 | /** |
96 | * cpu_address_space_init: | |
97 | * @cpu: CPU to add this address space to | |
56943e8c | 98 | * @asidx: integer index of this address space |
80ceb07a PX |
99 | * @prefix: prefix to be used as name of address space |
100 | * @mr: the root memory region of address space | |
56943e8c PM |
101 | * |
102 | * Add the specified address space to the CPU's cpu_ases list. | |
103 | * The address space added with @asidx 0 is the one used for the | |
104 | * convenience pointer cpu->as. | |
105 | * The target-specific code which registers ASes is responsible | |
106 | * for defining what semantics address space 0, 1, 2, etc have. | |
107 | * | |
12ebc9a7 PM |
108 | * Before the first call to this function, the caller must set |
109 | * cpu->num_ases to the total number of address spaces it needs | |
110 | * to support. | |
111 | * | |
56943e8c PM |
112 | * Note that with KVM only one address space is supported. |
113 | */ | |
80ceb07a PX |
114 | void cpu_address_space_init(CPUState *cpu, int asidx, |
115 | const char *prefix, MemoryRegion *mr); | |
b11ec7f2 YZ |
116 | #endif |
117 | ||
118 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) | |
0cac1b66 | 119 | /* cputlb.c */ |
5005e253 EC |
120 | /** |
121 | * tlb_init - initialize a CPU's TLB | |
122 | * @cpu: CPU whose TLB should be initialized | |
123 | */ | |
124 | void tlb_init(CPUState *cpu); | |
d7a74a9d PM |
125 | /** |
126 | * tlb_flush_page: | |
127 | * @cpu: CPU whose TLB should be flushed | |
128 | * @addr: virtual address of page to be flushed | |
129 | * | |
130 | * Flush one page from the TLB of the specified CPU, for all | |
131 | * MMU indexes. | |
132 | */ | |
31b030d4 | 133 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
c3b9a07a AB |
134 | /** |
135 | * tlb_flush_page_all_cpus: | |
136 | * @cpu: src CPU of the flush | |
137 | * @addr: virtual address of page to be flushed | |
138 | * | |
139 | * Flush one page from the TLB of the specified CPU, for all | |
140 | * MMU indexes. | |
141 | */ | |
142 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); | |
143 | /** | |
144 | * tlb_flush_page_all_cpus_synced: | |
145 | * @cpu: src CPU of the flush | |
146 | * @addr: virtual address of page to be flushed | |
147 | * | |
148 | * Flush one page from the TLB of the specified CPU, for all MMU | |
149 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work | |
150 | * is scheduled as safe work meaning all flushes will be complete once | |
151 | * the source vCPUs safe work is complete. This will depend on when | |
152 | * the guests translation ends the TB. | |
153 | */ | |
154 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); | |
d7a74a9d PM |
155 | /** |
156 | * tlb_flush: | |
157 | * @cpu: CPU whose TLB should be flushed | |
d7a74a9d | 158 | * |
d10eb08f AB |
159 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
160 | * allow the implementation to drop entries from the TLB at any time | |
161 | * so this is generally safe. If more selective flushing is required | |
162 | * use one of the other functions for efficiency. | |
d7a74a9d | 163 | */ |
d10eb08f | 164 | void tlb_flush(CPUState *cpu); |
c3b9a07a AB |
165 | /** |
166 | * tlb_flush_all_cpus: | |
167 | * @cpu: src CPU of the flush | |
168 | */ | |
169 | void tlb_flush_all_cpus(CPUState *src_cpu); | |
170 | /** | |
171 | * tlb_flush_all_cpus_synced: | |
172 | * @cpu: src CPU of the flush | |
173 | * | |
174 | * Like tlb_flush_all_cpus except this except the source vCPUs work is | |
175 | * scheduled as safe work meaning all flushes will be complete once | |
176 | * the source vCPUs safe work is complete. This will depend on when | |
177 | * the guests translation ends the TB. | |
178 | */ | |
179 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); | |
d7a74a9d PM |
180 | /** |
181 | * tlb_flush_page_by_mmuidx: | |
182 | * @cpu: CPU whose TLB should be flushed | |
183 | * @addr: virtual address of page to be flushed | |
0336cbf8 | 184 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
185 | * |
186 | * Flush one page from the TLB of the specified CPU, for the specified | |
187 | * MMU indexes. | |
188 | */ | |
0336cbf8 AB |
189 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
190 | uint16_t idxmap); | |
c3b9a07a AB |
191 | /** |
192 | * tlb_flush_page_by_mmuidx_all_cpus: | |
193 | * @cpu: Originating CPU of the flush | |
194 | * @addr: virtual address of page to be flushed | |
195 | * @idxmap: bitmap of MMU indexes to flush | |
196 | * | |
197 | * Flush one page from the TLB of all CPUs, for the specified | |
198 | * MMU indexes. | |
199 | */ | |
200 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, | |
201 | uint16_t idxmap); | |
202 | /** | |
203 | * tlb_flush_page_by_mmuidx_all_cpus_synced: | |
204 | * @cpu: Originating CPU of the flush | |
205 | * @addr: virtual address of page to be flushed | |
206 | * @idxmap: bitmap of MMU indexes to flush | |
207 | * | |
208 | * Flush one page from the TLB of all CPUs, for the specified MMU | |
209 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source | |
210 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
211 | * complete once the source vCPUs safe work is complete. This will | |
212 | * depend on when the guests translation ends the TB. | |
213 | */ | |
214 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, | |
215 | uint16_t idxmap); | |
d7a74a9d PM |
216 | /** |
217 | * tlb_flush_by_mmuidx: | |
218 | * @cpu: CPU whose TLB should be flushed | |
c3b9a07a | 219 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
0336cbf8 | 220 | * @idxmap: bitmap of MMU indexes to flush |
d7a74a9d PM |
221 | * |
222 | * Flush all entries from the TLB of the specified CPU, for the specified | |
223 | * MMU indexes. | |
224 | */ | |
0336cbf8 | 225 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
c3b9a07a AB |
226 | /** |
227 | * tlb_flush_by_mmuidx_all_cpus: | |
228 | * @cpu: Originating CPU of the flush | |
229 | * @idxmap: bitmap of MMU indexes to flush | |
230 | * | |
231 | * Flush all entries from all TLBs of all CPUs, for the specified | |
232 | * MMU indexes. | |
233 | */ | |
234 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); | |
235 | /** | |
236 | * tlb_flush_by_mmuidx_all_cpus_synced: | |
237 | * @cpu: Originating CPU of the flush | |
238 | * @idxmap: bitmap of MMU indexes to flush | |
239 | * | |
240 | * Flush all entries from all TLBs of all CPUs, for the specified | |
241 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source | |
242 | * vCPUs work is scheduled as safe work meaning all flushes will be | |
243 | * complete once the source vCPUs safe work is complete. This will | |
244 | * depend on when the guests translation ends the TB. | |
245 | */ | |
246 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); | |
1787cc8e PM |
247 | /** |
248 | * tlb_set_page_with_attrs: | |
249 | * @cpu: CPU to add this TLB entry for | |
250 | * @vaddr: virtual address of page to add entry for | |
251 | * @paddr: physical address of the page | |
252 | * @attrs: memory transaction attributes | |
253 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) | |
254 | * @mmu_idx: MMU index to insert TLB entry for | |
255 | * @size: size of the page in bytes | |
256 | * | |
257 | * Add an entry to this CPU's TLB (a mapping from virtual address | |
258 | * @vaddr to physical address @paddr) with the specified memory | |
259 | * transaction attributes. This is generally called by the target CPU | |
260 | * specific code after it has been called through the tlb_fill() | |
261 | * entry point and performed a successful page table walk to find | |
262 | * the physical address and attributes for the virtual address | |
263 | * which provoked the TLB miss. | |
264 | * | |
265 | * At most one entry for a given virtual address is permitted. Only a | |
266 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only | |
267 | * used by tlb_flush_page. | |
268 | */ | |
fadc1cbe PM |
269 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
270 | hwaddr paddr, MemTxAttrs attrs, | |
271 | int prot, int mmu_idx, target_ulong size); | |
1787cc8e PM |
272 | /* tlb_set_page: |
273 | * | |
274 | * This function is equivalent to calling tlb_set_page_with_attrs() | |
275 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided | |
276 | * as a convenience for CPUs which don't use memory transaction attributes. | |
277 | */ | |
278 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, | |
279 | hwaddr paddr, int prot, | |
280 | int mmu_idx, target_ulong size); | |
0cac1b66 | 281 | #else |
5005e253 EC |
282 | static inline void tlb_init(CPUState *cpu) |
283 | { | |
284 | } | |
31b030d4 | 285 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
0cac1b66 BS |
286 | { |
287 | } | |
c3b9a07a AB |
288 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
289 | { | |
290 | } | |
291 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, | |
292 | target_ulong addr) | |
293 | { | |
294 | } | |
d10eb08f | 295 | static inline void tlb_flush(CPUState *cpu) |
0cac1b66 BS |
296 | { |
297 | } | |
c3b9a07a AB |
298 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
299 | { | |
300 | } | |
301 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) | |
302 | { | |
303 | } | |
d7a74a9d | 304 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
0336cbf8 | 305 | target_ulong addr, uint16_t idxmap) |
d7a74a9d PM |
306 | { |
307 | } | |
308 | ||
0336cbf8 | 309 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
d7a74a9d PM |
310 | { |
311 | } | |
c3b9a07a AB |
312 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
313 | target_ulong addr, | |
314 | uint16_t idxmap) | |
315 | { | |
316 | } | |
317 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, | |
318 | target_ulong addr, | |
319 | uint16_t idxmap) | |
320 | { | |
321 | } | |
322 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) | |
323 | { | |
324 | } | |
8bca9a03 | 325 | |
c3b9a07a AB |
326 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, |
327 | uint16_t idxmap) | |
328 | { | |
329 | } | |
c527ee8f | 330 | #endif |
c25c283d DH |
331 | void *probe_access(CPUArchState *env, target_ulong addr, int size, |
332 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); | |
333 | ||
334 | static inline void *probe_write(CPUArchState *env, target_ulong addr, int size, | |
335 | int mmu_idx, uintptr_t retaddr) | |
336 | { | |
337 | return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); | |
338 | } | |
d4e8164f | 339 | |
d4e8164f FB |
340 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
341 | ||
126d89e8 RH |
342 | /* Estimated block size for TB allocation. */ |
343 | /* ??? The following is based on a 2015 survey of x86_64 host output. | |
344 | Better would seem to be some sort of dynamically sized TB array, | |
345 | adapting to the block sizes actually being produced. */ | |
4390df51 | 346 | #if defined(CONFIG_SOFTMMU) |
126d89e8 | 347 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
4390df51 | 348 | #else |
126d89e8 | 349 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
4390df51 FB |
350 | #endif |
351 | ||
e7e168f4 EC |
352 | /* |
353 | * Translation Cache-related fields of a TB. | |
2ac01d6d EC |
354 | * This struct exists just for convenience; we keep track of TB's in a binary |
355 | * search tree, and the only fields needed to compare TB's in the tree are | |
356 | * @ptr and @size. | |
357 | * Note: the address of search data can be obtained by adding @size to @ptr. | |
e7e168f4 EC |
358 | */ |
359 | struct tb_tc { | |
360 | void *ptr; /* pointer to the translated code */ | |
2ac01d6d | 361 | size_t size; |
e7e168f4 EC |
362 | }; |
363 | ||
2e70f6ef | 364 | struct TranslationBlock { |
2e12669a FB |
365 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
366 | target_ulong cs_base; /* CS base for this block */ | |
89fee74a | 367 | uint32_t flags; /* flags defining in which context the code was generated */ |
d4e8164f FB |
368 | uint16_t size; /* size of target code for this block (1 <= |
369 | size <= TARGET_PAGE_SIZE) */ | |
0266359e PB |
370 | uint16_t icount; |
371 | uint32_t cflags; /* compile flags */ | |
416986d3 RH |
372 | #define CF_COUNT_MASK 0x00007fff |
373 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ | |
374 | #define CF_NOCACHE 0x00010000 /* To be freed after execution */ | |
375 | #define CF_USE_ICOUNT 0x00020000 | |
194125e3 | 376 | #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ |
416986d3 | 377 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ |
f7b78602 PM |
378 | #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ |
379 | #define CF_CLUSTER_SHIFT 24 | |
4e2ca83e | 380 | /* cflags' mask for hashing/comparison */ |
0cf8a44c | 381 | #define CF_HASH_MASK \ |
f7b78602 | 382 | (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) |
58fe2f10 | 383 | |
61a67f71 LV |
384 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
385 | uint32_t trace_vcpu_dstate; | |
386 | ||
e7e168f4 EC |
387 | struct tb_tc tc; |
388 | ||
02d57ea1 SF |
389 | /* original tb when cflags has CF_NOCACHE */ |
390 | struct TranslationBlock *orig_tb; | |
4390df51 | 391 | /* first and second physical page containing code. The lower bit |
0b5c91f7 EC |
392 | of the pointer tells the index in page_next[]. |
393 | The list is protected by the TB's page('s) lock(s) */ | |
1e05197f | 394 | uintptr_t page_next[2]; |
41c1b1c9 | 395 | tb_page_addr_t page_addr[2]; |
4390df51 | 396 | |
194125e3 EC |
397 | /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ |
398 | QemuSpin jmp_lock; | |
399 | ||
f309101c SF |
400 | /* The following data are used to directly call another TB from |
401 | * the code of this one. This can be done either by emitting direct or | |
402 | * indirect native jump instructions. These jumps are reset so that the TB | |
eb5e2b9e | 403 | * just continues its execution. The TB can be linked to another one by |
f309101c SF |
404 | * setting one of the jump targets (or patching the jump instruction). Only |
405 | * two of such jumps are supported. | |
406 | */ | |
407 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ | |
408 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ | |
a8583393 RH |
409 | uintptr_t jmp_target_arg[2]; /* target address or offset */ |
410 | ||
194125e3 EC |
411 | /* |
412 | * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. | |
413 | * Each TB can have two outgoing jumps, and therefore can participate | |
414 | * in two lists. The list entries are kept in jmp_list_next[2]. The least | |
415 | * significant bit (LSB) of the pointers in these lists is used to encode | |
416 | * which of the two list entries is to be used in the pointed TB. | |
417 | * | |
418 | * List traversals are protected by jmp_lock. The destination TB of each | |
419 | * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock | |
420 | * can be acquired from any origin TB. | |
421 | * | |
422 | * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is | |
423 | * being invalidated, so that no further outgoing jumps from it can be set. | |
424 | * | |
425 | * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained | |
426 | * to a destination TB that has CF_INVALID set. | |
f309101c | 427 | */ |
194125e3 | 428 | uintptr_t jmp_list_head; |
c37e6d7e | 429 | uintptr_t jmp_list_next[2]; |
194125e3 | 430 | uintptr_t jmp_dest[2]; |
2e70f6ef | 431 | }; |
d4e8164f | 432 | |
4e2ca83e EC |
433 | extern bool parallel_cpus; |
434 | ||
435 | /* Hide the atomic_read to make code a little easier on the eyes */ | |
436 | static inline uint32_t tb_cflags(const TranslationBlock *tb) | |
437 | { | |
438 | return atomic_read(&tb->cflags); | |
439 | } | |
440 | ||
441 | /* current cflags for hashing/comparison */ | |
442 | static inline uint32_t curr_cflags(void) | |
443 | { | |
416986d3 RH |
444 | return (parallel_cpus ? CF_PARALLEL : 0) |
445 | | (use_icount ? CF_USE_ICOUNT : 0); | |
4e2ca83e EC |
446 | } |
447 | ||
646f34fa | 448 | /* TranslationBlock invalidate API */ |
646f34fa | 449 | #if defined(CONFIG_USER_ONLY) |
c40d4792 | 450 | void tb_invalidate_phys_addr(target_ulong addr); |
646f34fa | 451 | void tb_invalidate_phys_range(target_ulong start, target_ulong end); |
c40d4792 PB |
452 | #else |
453 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); | |
646f34fa | 454 | #endif |
bbd77c18 | 455 | void tb_flush(CPUState *cpu); |
41c1b1c9 | 456 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
cedbcb01 | 457 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
4e2ca83e EC |
458 | target_ulong cs_base, uint32_t flags, |
459 | uint32_t cf_mask); | |
a8583393 | 460 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
d4e8164f | 461 | |
01ecaf43 | 462 | /* GETPC is the true target of the return instruction that we'll execute. */ |
7316329a | 463 | #if defined(CONFIG_TCG_INTERPRETER) |
c3ca0467 | 464 | extern uintptr_t tci_tb_ptr; |
01ecaf43 | 465 | # define GETPC() tci_tb_ptr |
0f842f8a | 466 | #else |
01ecaf43 | 467 | # define GETPC() \ |
0f842f8a RH |
468 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
469 | #endif | |
470 | ||
471 | /* The true return address will often point to a host insn that is part of | |
472 | the next translated guest insn. Adjust the address backward to point to | |
473 | the middle of the call insn. Subtracting one would do the job except for | |
474 | several compressed mode architectures (arm, mips) which set the low bit | |
475 | to indicate the compressed mode; subtracting two works around that. It | |
476 | is also the case that there are no host isas that contain a call insn | |
477 | smaller than 4 bytes, so we don't worry about special-casing this. */ | |
a17d4482 | 478 | #define GETPC_ADJ 2 |
3917149d | 479 | |
faa9372c EC |
480 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) |
481 | void assert_no_pages_locked(void); | |
482 | #else | |
483 | static inline void assert_no_pages_locked(void) | |
484 | { | |
485 | } | |
486 | #endif | |
487 | ||
e95c8d51 | 488 | #if !defined(CONFIG_USER_ONLY) |
6e59c1db | 489 | |
2d54f194 PM |
490 | /** |
491 | * iotlb_to_section: | |
492 | * @cpu: CPU performing the access | |
493 | * @index: TCG CPU IOTLB entry | |
494 | * | |
495 | * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that | |
496 | * it refers to. @index will have been initially created and returned | |
497 | * by memory_region_section_get_iotlb(). | |
498 | */ | |
499 | struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, | |
500 | hwaddr index, MemTxAttrs attrs); | |
6e59c1db | 501 | #endif |
4390df51 FB |
502 | |
503 | #if defined(CONFIG_USER_ONLY) | |
8fd19e6c PB |
504 | void mmap_lock(void); |
505 | void mmap_unlock(void); | |
301e40ed | 506 | bool have_mmap_lock(void); |
8fd19e6c | 507 | |
8c01eb78 EC |
508 | /** |
509 | * get_page_addr_code() - user-mode version | |
510 | * @env: CPUArchState | |
511 | * @addr: guest virtual address of guest code | |
512 | * | |
513 | * Returns @addr. | |
514 | */ | |
515 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, | |
516 | target_ulong addr) | |
4390df51 FB |
517 | { |
518 | return addr; | |
519 | } | |
4b2190da EC |
520 | |
521 | /** | |
522 | * get_page_addr_code_hostp() - user-mode version | |
523 | * @env: CPUArchState | |
524 | * @addr: guest virtual address of guest code | |
525 | * | |
526 | * Returns @addr. | |
527 | * | |
528 | * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content | |
529 | * is kept. | |
530 | */ | |
531 | static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, | |
532 | target_ulong addr, | |
533 | void **hostp) | |
534 | { | |
535 | if (hostp) { | |
536 | *hostp = g2h(addr); | |
537 | } | |
538 | return addr; | |
539 | } | |
4390df51 | 540 | #else |
8fd19e6c PB |
541 | static inline void mmap_lock(void) {} |
542 | static inline void mmap_unlock(void) {} | |
543 | ||
8c01eb78 EC |
544 | /** |
545 | * get_page_addr_code() - full-system version | |
546 | * @env: CPUArchState | |
547 | * @addr: guest virtual address of guest code | |
548 | * | |
549 | * If we cannot translate and execute from the entire RAM page, or if | |
550 | * the region is not backed by RAM, returns -1. Otherwise, returns the | |
551 | * ram_addr_t corresponding to the guest code at @addr. | |
552 | * | |
553 | * Note: this function can trigger an exception. | |
554 | */ | |
555 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); | |
dfccc760 | 556 | |
4b2190da EC |
557 | /** |
558 | * get_page_addr_code_hostp() - full-system version | |
559 | * @env: CPUArchState | |
560 | * @addr: guest virtual address of guest code | |
561 | * | |
562 | * See get_page_addr_code() (full-system version) for documentation on the | |
563 | * return value. | |
564 | * | |
565 | * Sets *@hostp (when @hostp is non-NULL) as follows. | |
566 | * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp | |
567 | * to the host address where @addr's content is kept. | |
568 | * | |
569 | * Note: this function can trigger an exception. | |
570 | */ | |
571 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, | |
572 | void **hostp); | |
573 | ||
dfccc760 PC |
574 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); |
575 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); | |
576 | ||
577 | /* exec.c */ | |
578 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); | |
579 | ||
580 | MemoryRegionSection * | |
d7898cda | 581 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
1f871c5e PM |
582 | hwaddr *xlat, hwaddr *plen, |
583 | MemTxAttrs attrs, int *prot); | |
dfccc760 | 584 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
8f5db641 | 585 | MemoryRegionSection *section); |
4390df51 | 586 | #endif |
9df217a3 | 587 | |
1b530a6d AJ |
588 | /* vl.c */ |
589 | extern int singlestep; | |
590 | ||
875cdcf6 | 591 | #endif |