]> git.ipfire.org Git - thirdparty/qemu.git/blame - include/exec/exec-all.h
include/exec: wrap cpu_ldst.h in CONFIG_TCG
[thirdparty/qemu.git] / include / exec / exec-all.h
CommitLineData
d4e8164f
FB
1/*
2 * internal execution defines for qemu
5fafdf24 3 *
d4e8164f
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
d4e8164f
FB
18 */
19
2a6a4076
MA
20#ifndef EXEC_ALL_H
21#define EXEC_ALL_H
7d99a001 22
ec150c7e 23#include "cpu.h"
00f6da6a 24#include "exec/tb-context.h"
dc069b22 25#ifdef CONFIG_TCG
4b2190da 26#include "exec/cpu_ldst.h"
dc069b22 27#endif
416986d3 28#include "sysemu/cpus.h"
7d99a001 29
b346ff46 30/* allow to see translation results - the slowdown should be negligible, so we leave it */
de9a95f0 31#define DEBUG_DISAS
b346ff46 32
41c1b1c9
PB
33/* Page tracking code uses ram addresses in system mode, and virtual
34 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
35 type. */
36#if defined(CONFIG_USER_ONLY)
b480d9b7 37typedef abi_ulong tb_page_addr_t;
67a5b5d2 38#define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx
41c1b1c9
PB
39#else
40typedef ram_addr_t tb_page_addr_t;
67a5b5d2 41#define TB_PAGE_ADDR_FMT RAM_ADDR_FMT
41c1b1c9
PB
42#endif
43
1de7afc9 44#include "qemu/log.h"
b346ff46 45
8b86d6d2
RH
46void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns);
47void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb,
bad729e2 48 target_ulong *data);
d2856f1a 49
57fec1fe 50void cpu_gen_init(void);
d25f2a72
AB
51
52/**
53 * cpu_restore_state:
54 * @cpu: the vCPU state is to be restore to
55 * @searched_pc: the host PC the fault occurred at
afd46fca
PD
56 * @will_exit: true if the TB executed will be interrupted after some
57 cpu adjustments. Required for maintaining the correct
58 icount valus
d25f2a72
AB
59 * @return: true if state was restored, false otherwise
60 *
61 * Attempt to restore the state for a fault occurring in translated
62 * code. If the searched_pc is not in translated code no state is
63 * restored and the function returns false.
64 */
afd46fca 65bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
a8a826a3 66
6886b980 67void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
90b40a69 68void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
648f034c 69TranslationBlock *tb_gen_code(CPUState *cpu,
89fee74a
EC
70 target_ulong pc, target_ulong cs_base,
71 uint32_t flags,
2e70f6ef 72 int cflags);
1bc7e522 73
5638d180 74void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
1c3c8af1 75void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
fdbc2b57 76void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
1652b974 77
1f6493be
DH
78/**
79 * cpu_loop_exit_requested:
80 * @cpu: The CPU state to be tested
81 *
82 * Indicate if somebody asked for a return of the CPU to the main loop
83 * (e.g., via cpu_exit() or cpu_interrupt()).
84 *
85 * This is helpful for architectures that support interruptible
86 * instructions. After writing back all state to registers/memory, this
87 * call can be used to check if it makes sense to return to the main loop
88 * or to continue executing the interruptible instruction.
89 */
90static inline bool cpu_loop_exit_requested(CPUState *cpu)
91{
92 return (int32_t)atomic_read(&cpu_neg(cpu)->icount_decr.u32) < 0;
93}
94
0cac1b66 95#if !defined(CONFIG_USER_ONLY)
32857f4d 96void cpu_reloading_memory_map(void);
56943e8c
PM
97/**
98 * cpu_address_space_init:
99 * @cpu: CPU to add this address space to
56943e8c 100 * @asidx: integer index of this address space
80ceb07a
PX
101 * @prefix: prefix to be used as name of address space
102 * @mr: the root memory region of address space
56943e8c
PM
103 *
104 * Add the specified address space to the CPU's cpu_ases list.
105 * The address space added with @asidx 0 is the one used for the
106 * convenience pointer cpu->as.
107 * The target-specific code which registers ASes is responsible
108 * for defining what semantics address space 0, 1, 2, etc have.
109 *
12ebc9a7
PM
110 * Before the first call to this function, the caller must set
111 * cpu->num_ases to the total number of address spaces it needs
112 * to support.
113 *
56943e8c
PM
114 * Note that with KVM only one address space is supported.
115 */
80ceb07a
PX
116void cpu_address_space_init(CPUState *cpu, int asidx,
117 const char *prefix, MemoryRegion *mr);
b11ec7f2
YZ
118#endif
119
120#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG)
0cac1b66 121/* cputlb.c */
5005e253
EC
122/**
123 * tlb_init - initialize a CPU's TLB
124 * @cpu: CPU whose TLB should be initialized
125 */
126void tlb_init(CPUState *cpu);
d7a74a9d
PM
127/**
128 * tlb_flush_page:
129 * @cpu: CPU whose TLB should be flushed
130 * @addr: virtual address of page to be flushed
131 *
132 * Flush one page from the TLB of the specified CPU, for all
133 * MMU indexes.
134 */
31b030d4 135void tlb_flush_page(CPUState *cpu, target_ulong addr);
c3b9a07a
AB
136/**
137 * tlb_flush_page_all_cpus:
138 * @cpu: src CPU of the flush
139 * @addr: virtual address of page to be flushed
140 *
141 * Flush one page from the TLB of the specified CPU, for all
142 * MMU indexes.
143 */
144void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr);
145/**
146 * tlb_flush_page_all_cpus_synced:
147 * @cpu: src CPU of the flush
148 * @addr: virtual address of page to be flushed
149 *
150 * Flush one page from the TLB of the specified CPU, for all MMU
151 * indexes like tlb_flush_page_all_cpus except the source vCPUs work
152 * is scheduled as safe work meaning all flushes will be complete once
153 * the source vCPUs safe work is complete. This will depend on when
154 * the guests translation ends the TB.
155 */
156void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr);
d7a74a9d
PM
157/**
158 * tlb_flush:
159 * @cpu: CPU whose TLB should be flushed
d7a74a9d 160 *
d10eb08f
AB
161 * Flush the entire TLB for the specified CPU. Most CPU architectures
162 * allow the implementation to drop entries from the TLB at any time
163 * so this is generally safe. If more selective flushing is required
164 * use one of the other functions for efficiency.
d7a74a9d 165 */
d10eb08f 166void tlb_flush(CPUState *cpu);
c3b9a07a
AB
167/**
168 * tlb_flush_all_cpus:
169 * @cpu: src CPU of the flush
170 */
171void tlb_flush_all_cpus(CPUState *src_cpu);
172/**
173 * tlb_flush_all_cpus_synced:
174 * @cpu: src CPU of the flush
175 *
176 * Like tlb_flush_all_cpus except this except the source vCPUs work is
177 * scheduled as safe work meaning all flushes will be complete once
178 * the source vCPUs safe work is complete. This will depend on when
179 * the guests translation ends the TB.
180 */
181void tlb_flush_all_cpus_synced(CPUState *src_cpu);
d7a74a9d
PM
182/**
183 * tlb_flush_page_by_mmuidx:
184 * @cpu: CPU whose TLB should be flushed
185 * @addr: virtual address of page to be flushed
0336cbf8 186 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
187 *
188 * Flush one page from the TLB of the specified CPU, for the specified
189 * MMU indexes.
190 */
0336cbf8
AB
191void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr,
192 uint16_t idxmap);
c3b9a07a
AB
193/**
194 * tlb_flush_page_by_mmuidx_all_cpus:
195 * @cpu: Originating CPU of the flush
196 * @addr: virtual address of page to be flushed
197 * @idxmap: bitmap of MMU indexes to flush
198 *
199 * Flush one page from the TLB of all CPUs, for the specified
200 * MMU indexes.
201 */
202void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
203 uint16_t idxmap);
204/**
205 * tlb_flush_page_by_mmuidx_all_cpus_synced:
206 * @cpu: Originating CPU of the flush
207 * @addr: virtual address of page to be flushed
208 * @idxmap: bitmap of MMU indexes to flush
209 *
210 * Flush one page from the TLB of all CPUs, for the specified MMU
211 * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source
212 * vCPUs work is scheduled as safe work meaning all flushes will be
213 * complete once the source vCPUs safe work is complete. This will
214 * depend on when the guests translation ends the TB.
215 */
216void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
217 uint16_t idxmap);
d7a74a9d
PM
218/**
219 * tlb_flush_by_mmuidx:
220 * @cpu: CPU whose TLB should be flushed
c3b9a07a 221 * @wait: If true ensure synchronisation by exiting the cpu_loop
0336cbf8 222 * @idxmap: bitmap of MMU indexes to flush
d7a74a9d
PM
223 *
224 * Flush all entries from the TLB of the specified CPU, for the specified
225 * MMU indexes.
226 */
0336cbf8 227void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
c3b9a07a
AB
228/**
229 * tlb_flush_by_mmuidx_all_cpus:
230 * @cpu: Originating CPU of the flush
231 * @idxmap: bitmap of MMU indexes to flush
232 *
233 * Flush all entries from all TLBs of all CPUs, for the specified
234 * MMU indexes.
235 */
236void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
237/**
238 * tlb_flush_by_mmuidx_all_cpus_synced:
239 * @cpu: Originating CPU of the flush
240 * @idxmap: bitmap of MMU indexes to flush
241 *
242 * Flush all entries from all TLBs of all CPUs, for the specified
243 * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source
244 * vCPUs work is scheduled as safe work meaning all flushes will be
245 * complete once the source vCPUs safe work is complete. This will
246 * depend on when the guests translation ends the TB.
247 */
248void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
1787cc8e
PM
249/**
250 * tlb_set_page_with_attrs:
251 * @cpu: CPU to add this TLB entry for
252 * @vaddr: virtual address of page to add entry for
253 * @paddr: physical address of the page
254 * @attrs: memory transaction attributes
255 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
256 * @mmu_idx: MMU index to insert TLB entry for
257 * @size: size of the page in bytes
258 *
259 * Add an entry to this CPU's TLB (a mapping from virtual address
260 * @vaddr to physical address @paddr) with the specified memory
261 * transaction attributes. This is generally called by the target CPU
262 * specific code after it has been called through the tlb_fill()
263 * entry point and performed a successful page table walk to find
264 * the physical address and attributes for the virtual address
265 * which provoked the TLB miss.
266 *
267 * At most one entry for a given virtual address is permitted. Only a
268 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
269 * used by tlb_flush_page.
270 */
fadc1cbe
PM
271void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
272 hwaddr paddr, MemTxAttrs attrs,
273 int prot, int mmu_idx, target_ulong size);
1787cc8e
PM
274/* tlb_set_page:
275 *
276 * This function is equivalent to calling tlb_set_page_with_attrs()
277 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
278 * as a convenience for CPUs which don't use memory transaction attributes.
279 */
280void tlb_set_page(CPUState *cpu, target_ulong vaddr,
281 hwaddr paddr, int prot,
282 int mmu_idx, target_ulong size);
0cac1b66 283#else
5005e253
EC
284static inline void tlb_init(CPUState *cpu)
285{
286}
31b030d4 287static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
0cac1b66
BS
288{
289}
c3b9a07a
AB
290static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
291{
292}
293static inline void tlb_flush_page_all_cpus_synced(CPUState *src,
294 target_ulong addr)
295{
296}
d10eb08f 297static inline void tlb_flush(CPUState *cpu)
0cac1b66
BS
298{
299}
c3b9a07a
AB
300static inline void tlb_flush_all_cpus(CPUState *src_cpu)
301{
302}
303static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
304{
305}
d7a74a9d 306static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
0336cbf8 307 target_ulong addr, uint16_t idxmap)
d7a74a9d
PM
308{
309}
310
0336cbf8 311static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
d7a74a9d
PM
312{
313}
c3b9a07a
AB
314static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu,
315 target_ulong addr,
316 uint16_t idxmap)
317{
318}
319static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
320 target_ulong addr,
321 uint16_t idxmap)
322{
323}
324static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap)
325{
326}
8bca9a03 327
c3b9a07a
AB
328static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
329 uint16_t idxmap)
330{
331}
c527ee8f 332#endif
c25c283d
DH
333void *probe_access(CPUArchState *env, target_ulong addr, int size,
334 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
335
336static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
337 int mmu_idx, uintptr_t retaddr)
338{
339 return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
340}
d4e8164f 341
d4e8164f
FB
342#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
343
126d89e8
RH
344/* Estimated block size for TB allocation. */
345/* ??? The following is based on a 2015 survey of x86_64 host output.
346 Better would seem to be some sort of dynamically sized TB array,
347 adapting to the block sizes actually being produced. */
4390df51 348#if defined(CONFIG_SOFTMMU)
126d89e8 349#define CODE_GEN_AVG_BLOCK_SIZE 400
4390df51 350#else
126d89e8 351#define CODE_GEN_AVG_BLOCK_SIZE 150
4390df51
FB
352#endif
353
e7e168f4
EC
354/*
355 * Translation Cache-related fields of a TB.
2ac01d6d
EC
356 * This struct exists just for convenience; we keep track of TB's in a binary
357 * search tree, and the only fields needed to compare TB's in the tree are
358 * @ptr and @size.
359 * Note: the address of search data can be obtained by adding @size to @ptr.
e7e168f4
EC
360 */
361struct tb_tc {
362 void *ptr; /* pointer to the translated code */
2ac01d6d 363 size_t size;
e7e168f4
EC
364};
365
2e70f6ef 366struct TranslationBlock {
2e12669a
FB
367 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
368 target_ulong cs_base; /* CS base for this block */
89fee74a 369 uint32_t flags; /* flags defining in which context the code was generated */
d4e8164f
FB
370 uint16_t size; /* size of target code for this block (1 <=
371 size <= TARGET_PAGE_SIZE) */
0266359e
PB
372 uint16_t icount;
373 uint32_t cflags; /* compile flags */
416986d3
RH
374#define CF_COUNT_MASK 0x00007fff
375#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
376#define CF_NOCACHE 0x00010000 /* To be freed after execution */
377#define CF_USE_ICOUNT 0x00020000
194125e3 378#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
416986d3 379#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
f7b78602
PM
380#define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */
381#define CF_CLUSTER_SHIFT 24
4e2ca83e 382/* cflags' mask for hashing/comparison */
0cf8a44c 383#define CF_HASH_MASK \
f7b78602 384 (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK)
58fe2f10 385
61a67f71
LV
386 /* Per-vCPU dynamic tracing state used to generate this TB */
387 uint32_t trace_vcpu_dstate;
388
e7e168f4
EC
389 struct tb_tc tc;
390
02d57ea1
SF
391 /* original tb when cflags has CF_NOCACHE */
392 struct TranslationBlock *orig_tb;
4390df51 393 /* first and second physical page containing code. The lower bit
0b5c91f7
EC
394 of the pointer tells the index in page_next[].
395 The list is protected by the TB's page('s) lock(s) */
1e05197f 396 uintptr_t page_next[2];
41c1b1c9 397 tb_page_addr_t page_addr[2];
4390df51 398
194125e3
EC
399 /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
400 QemuSpin jmp_lock;
401
f309101c
SF
402 /* The following data are used to directly call another TB from
403 * the code of this one. This can be done either by emitting direct or
404 * indirect native jump instructions. These jumps are reset so that the TB
eb5e2b9e 405 * just continues its execution. The TB can be linked to another one by
f309101c
SF
406 * setting one of the jump targets (or patching the jump instruction). Only
407 * two of such jumps are supported.
408 */
409 uint16_t jmp_reset_offset[2]; /* offset of original jump target */
410#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
a8583393
RH
411 uintptr_t jmp_target_arg[2]; /* target address or offset */
412
194125e3
EC
413 /*
414 * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
415 * Each TB can have two outgoing jumps, and therefore can participate
416 * in two lists. The list entries are kept in jmp_list_next[2]. The least
417 * significant bit (LSB) of the pointers in these lists is used to encode
418 * which of the two list entries is to be used in the pointed TB.
419 *
420 * List traversals are protected by jmp_lock. The destination TB of each
421 * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
422 * can be acquired from any origin TB.
423 *
424 * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
425 * being invalidated, so that no further outgoing jumps from it can be set.
426 *
427 * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
428 * to a destination TB that has CF_INVALID set.
f309101c 429 */
194125e3 430 uintptr_t jmp_list_head;
c37e6d7e 431 uintptr_t jmp_list_next[2];
194125e3 432 uintptr_t jmp_dest[2];
2e70f6ef 433};
d4e8164f 434
4e2ca83e
EC
435extern bool parallel_cpus;
436
437/* Hide the atomic_read to make code a little easier on the eyes */
438static inline uint32_t tb_cflags(const TranslationBlock *tb)
439{
440 return atomic_read(&tb->cflags);
441}
442
443/* current cflags for hashing/comparison */
444static inline uint32_t curr_cflags(void)
445{
416986d3
RH
446 return (parallel_cpus ? CF_PARALLEL : 0)
447 | (use_icount ? CF_USE_ICOUNT : 0);
4e2ca83e
EC
448}
449
646f34fa 450/* TranslationBlock invalidate API */
646f34fa 451#if defined(CONFIG_USER_ONLY)
c40d4792 452void tb_invalidate_phys_addr(target_ulong addr);
646f34fa 453void tb_invalidate_phys_range(target_ulong start, target_ulong end);
c40d4792
PB
454#else
455void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs);
646f34fa 456#endif
bbd77c18 457void tb_flush(CPUState *cpu);
41c1b1c9 458void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
cedbcb01 459TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
4e2ca83e
EC
460 target_ulong cs_base, uint32_t flags,
461 uint32_t cf_mask);
a8583393 462void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
d4e8164f 463
01ecaf43 464/* GETPC is the true target of the return instruction that we'll execute. */
7316329a 465#if defined(CONFIG_TCG_INTERPRETER)
c3ca0467 466extern uintptr_t tci_tb_ptr;
01ecaf43 467# define GETPC() tci_tb_ptr
0f842f8a 468#else
01ecaf43 469# define GETPC() \
0f842f8a
RH
470 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
471#endif
472
473/* The true return address will often point to a host insn that is part of
474 the next translated guest insn. Adjust the address backward to point to
475 the middle of the call insn. Subtracting one would do the job except for
476 several compressed mode architectures (arm, mips) which set the low bit
477 to indicate the compressed mode; subtracting two works around that. It
478 is also the case that there are no host isas that contain a call insn
479 smaller than 4 bytes, so we don't worry about special-casing this. */
a17d4482 480#define GETPC_ADJ 2
3917149d 481
faa9372c
EC
482#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
483void assert_no_pages_locked(void);
484#else
485static inline void assert_no_pages_locked(void)
486{
487}
488#endif
489
e95c8d51 490#if !defined(CONFIG_USER_ONLY)
6e59c1db 491
2d54f194
PM
492/**
493 * iotlb_to_section:
494 * @cpu: CPU performing the access
495 * @index: TCG CPU IOTLB entry
496 *
497 * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
498 * it refers to. @index will have been initially created and returned
499 * by memory_region_section_get_iotlb().
500 */
501struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
502 hwaddr index, MemTxAttrs attrs);
6e59c1db 503#endif
4390df51
FB
504
505#if defined(CONFIG_USER_ONLY)
8fd19e6c
PB
506void mmap_lock(void);
507void mmap_unlock(void);
301e40ed 508bool have_mmap_lock(void);
8fd19e6c 509
8c01eb78
EC
510/**
511 * get_page_addr_code() - user-mode version
512 * @env: CPUArchState
513 * @addr: guest virtual address of guest code
514 *
515 * Returns @addr.
516 */
517static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
518 target_ulong addr)
4390df51
FB
519{
520 return addr;
521}
4b2190da
EC
522
523/**
524 * get_page_addr_code_hostp() - user-mode version
525 * @env: CPUArchState
526 * @addr: guest virtual address of guest code
527 *
528 * Returns @addr.
529 *
530 * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content
531 * is kept.
532 */
533static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env,
534 target_ulong addr,
535 void **hostp)
536{
537 if (hostp) {
538 *hostp = g2h(addr);
539 }
540 return addr;
541}
4390df51 542#else
8fd19e6c
PB
543static inline void mmap_lock(void) {}
544static inline void mmap_unlock(void) {}
545
8c01eb78
EC
546/**
547 * get_page_addr_code() - full-system version
548 * @env: CPUArchState
549 * @addr: guest virtual address of guest code
550 *
551 * If we cannot translate and execute from the entire RAM page, or if
552 * the region is not backed by RAM, returns -1. Otherwise, returns the
553 * ram_addr_t corresponding to the guest code at @addr.
554 *
555 * Note: this function can trigger an exception.
556 */
557tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr);
dfccc760 558
4b2190da
EC
559/**
560 * get_page_addr_code_hostp() - full-system version
561 * @env: CPUArchState
562 * @addr: guest virtual address of guest code
563 *
564 * See get_page_addr_code() (full-system version) for documentation on the
565 * return value.
566 *
567 * Sets *@hostp (when @hostp is non-NULL) as follows.
568 * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
569 * to the host address where @addr's content is kept.
570 *
571 * Note: this function can trigger an exception.
572 */
573tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
574 void **hostp);
575
dfccc760
PC
576void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
577void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
578
579/* exec.c */
580void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
581
582MemoryRegionSection *
d7898cda 583address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
1f871c5e
PM
584 hwaddr *xlat, hwaddr *plen,
585 MemTxAttrs attrs, int *prot);
dfccc760 586hwaddr memory_region_section_get_iotlb(CPUState *cpu,
8f5db641 587 MemoryRegionSection *section);
4390df51 588#endif
9df217a3 589
1b530a6d
AJ
590/* vl.c */
591extern int singlestep;
592
875cdcf6 593#endif