]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
5b6dd868 | 2 | * Virtual page mapping |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
67b915a5 | 19 | #include "config.h" |
d5a8f07c FB |
20 | #ifdef _WIN32 |
21 | #include <windows.h> | |
22 | #else | |
a98d49b1 | 23 | #include <sys/types.h> |
d5a8f07c FB |
24 | #include <sys/mman.h> |
25 | #endif | |
54936004 | 26 | |
055403b2 | 27 | #include "qemu-common.h" |
6180a181 | 28 | #include "cpu.h" |
b67d9a52 | 29 | #include "tcg.h" |
b3c7724c | 30 | #include "hw/hw.h" |
cc9e98cb | 31 | #include "hw/qdev.h" |
1de7afc9 | 32 | #include "qemu/osdep.h" |
9c17d615 | 33 | #include "sysemu/kvm.h" |
0d09e41a | 34 | #include "hw/xen/xen.h" |
1de7afc9 PB |
35 | #include "qemu/timer.h" |
36 | #include "qemu/config-file.h" | |
022c62cb | 37 | #include "exec/memory.h" |
9c17d615 | 38 | #include "sysemu/dma.h" |
022c62cb | 39 | #include "exec/address-spaces.h" |
53a5960a PB |
40 | #if defined(CONFIG_USER_ONLY) |
41 | #include <qemu.h> | |
432d268c | 42 | #else /* !CONFIG_USER_ONLY */ |
9c17d615 | 43 | #include "sysemu/xen-mapcache.h" |
6506e4f9 | 44 | #include "trace.h" |
53a5960a | 45 | #endif |
0d6d3c87 | 46 | #include "exec/cpu-all.h" |
54936004 | 47 | |
022c62cb | 48 | #include "exec/cputlb.h" |
5b6dd868 | 49 | #include "translate-all.h" |
0cac1b66 | 50 | |
022c62cb | 51 | #include "exec/memory-internal.h" |
67d95c15 | 52 | |
db7b5426 | 53 | //#define DEBUG_SUBPAGE |
1196be37 | 54 | |
e2eef170 | 55 | #if !defined(CONFIG_USER_ONLY) |
9fa3e853 | 56 | int phys_ram_fd; |
74576198 | 57 | static int in_migration; |
94a6b54f | 58 | |
a3161038 | 59 | RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
60 | |
61 | static MemoryRegion *system_memory; | |
309cb471 | 62 | static MemoryRegion *system_io; |
62152b8a | 63 | |
f6790af6 AK |
64 | AddressSpace address_space_io; |
65 | AddressSpace address_space_memory; | |
9e11908f | 66 | DMAContext dma_context_memory; |
2673a5da | 67 | |
0844e007 PB |
68 | MemoryRegion io_mem_rom, io_mem_notdirty; |
69 | static MemoryRegion io_mem_unassigned, io_mem_subpage_ram; | |
0e0df1e2 | 70 | |
e2eef170 | 71 | #endif |
9fa3e853 | 72 | |
9349b4f9 | 73 | CPUArchState *first_cpu; |
6a00d601 FB |
74 | /* current CPU in the current thread. It is only valid inside |
75 | cpu_exec() */ | |
9349b4f9 | 76 | DEFINE_TLS(CPUArchState *,cpu_single_env); |
2e70f6ef | 77 | /* 0 = Do not count executed instructions. |
bf20dc07 | 78 | 1 = Precise instruction counting. |
2e70f6ef | 79 | 2 = Adaptive rate instruction counting. */ |
5708fc66 | 80 | int use_icount; |
6a00d601 | 81 | |
e2eef170 | 82 | #if !defined(CONFIG_USER_ONLY) |
4346ae3e | 83 | |
5312bd8b AK |
84 | static MemoryRegionSection *phys_sections; |
85 | static unsigned phys_sections_nb, phys_sections_nb_alloc; | |
86 | static uint16_t phys_section_unassigned; | |
aa102231 AK |
87 | static uint16_t phys_section_notdirty; |
88 | static uint16_t phys_section_rom; | |
89 | static uint16_t phys_section_watch; | |
5312bd8b | 90 | |
d6f2ea22 AK |
91 | /* Simple allocator for PhysPageEntry nodes */ |
92 | static PhysPageEntry (*phys_map_nodes)[L2_SIZE]; | |
93 | static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; | |
94 | ||
07f07b31 | 95 | #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1) |
d6f2ea22 | 96 | |
e2eef170 | 97 | static void io_mem_init(void); |
62152b8a | 98 | static void memory_map_init(void); |
8b9c99d9 | 99 | static void *qemu_safe_ram_ptr(ram_addr_t addr); |
e2eef170 | 100 | |
1ec9b909 | 101 | static MemoryRegion io_mem_watch; |
6658ffb8 | 102 | #endif |
fd6ce8f6 | 103 | |
6d9a1304 | 104 | #if !defined(CONFIG_USER_ONLY) |
d6f2ea22 | 105 | |
f7bf5461 | 106 | static void phys_map_node_reserve(unsigned nodes) |
d6f2ea22 | 107 | { |
f7bf5461 | 108 | if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) { |
d6f2ea22 AK |
109 | typedef PhysPageEntry Node[L2_SIZE]; |
110 | phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16); | |
f7bf5461 AK |
111 | phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc, |
112 | phys_map_nodes_nb + nodes); | |
d6f2ea22 AK |
113 | phys_map_nodes = g_renew(Node, phys_map_nodes, |
114 | phys_map_nodes_nb_alloc); | |
115 | } | |
f7bf5461 AK |
116 | } |
117 | ||
118 | static uint16_t phys_map_node_alloc(void) | |
119 | { | |
120 | unsigned i; | |
121 | uint16_t ret; | |
122 | ||
123 | ret = phys_map_nodes_nb++; | |
124 | assert(ret != PHYS_MAP_NODE_NIL); | |
125 | assert(ret != phys_map_nodes_nb_alloc); | |
d6f2ea22 | 126 | for (i = 0; i < L2_SIZE; ++i) { |
07f07b31 | 127 | phys_map_nodes[ret][i].is_leaf = 0; |
c19e8800 | 128 | phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL; |
d6f2ea22 | 129 | } |
f7bf5461 | 130 | return ret; |
d6f2ea22 AK |
131 | } |
132 | ||
133 | static void phys_map_nodes_reset(void) | |
134 | { | |
135 | phys_map_nodes_nb = 0; | |
136 | } | |
137 | ||
92e873b9 | 138 | |
a8170e5e AK |
139 | static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index, |
140 | hwaddr *nb, uint16_t leaf, | |
2999097b | 141 | int level) |
f7bf5461 AK |
142 | { |
143 | PhysPageEntry *p; | |
144 | int i; | |
a8170e5e | 145 | hwaddr step = (hwaddr)1 << (level * L2_BITS); |
108c49b8 | 146 | |
07f07b31 | 147 | if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) { |
c19e8800 AK |
148 | lp->ptr = phys_map_node_alloc(); |
149 | p = phys_map_nodes[lp->ptr]; | |
f7bf5461 AK |
150 | if (level == 0) { |
151 | for (i = 0; i < L2_SIZE; i++) { | |
07f07b31 | 152 | p[i].is_leaf = 1; |
c19e8800 | 153 | p[i].ptr = phys_section_unassigned; |
4346ae3e | 154 | } |
67c4d23c | 155 | } |
f7bf5461 | 156 | } else { |
c19e8800 | 157 | p = phys_map_nodes[lp->ptr]; |
92e873b9 | 158 | } |
2999097b | 159 | lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)]; |
f7bf5461 | 160 | |
2999097b | 161 | while (*nb && lp < &p[L2_SIZE]) { |
07f07b31 AK |
162 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
163 | lp->is_leaf = true; | |
c19e8800 | 164 | lp->ptr = leaf; |
07f07b31 AK |
165 | *index += step; |
166 | *nb -= step; | |
2999097b AK |
167 | } else { |
168 | phys_page_set_level(lp, index, nb, leaf, level - 1); | |
169 | } | |
170 | ++lp; | |
f7bf5461 AK |
171 | } |
172 | } | |
173 | ||
ac1970fb | 174 | static void phys_page_set(AddressSpaceDispatch *d, |
a8170e5e | 175 | hwaddr index, hwaddr nb, |
2999097b | 176 | uint16_t leaf) |
f7bf5461 | 177 | { |
2999097b | 178 | /* Wildly overreserve - it doesn't matter much. */ |
07f07b31 | 179 | phys_map_node_reserve(3 * P_L2_LEVELS); |
5cd2c5b6 | 180 | |
ac1970fb | 181 | phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
182 | } |
183 | ||
149f54b5 | 184 | static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index) |
92e873b9 | 185 | { |
ac1970fb | 186 | PhysPageEntry lp = d->phys_map; |
31ab2b4a AK |
187 | PhysPageEntry *p; |
188 | int i; | |
f1f6e3b8 | 189 | |
07f07b31 | 190 | for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) { |
c19e8800 | 191 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
fd298934 | 192 | return &phys_sections[phys_section_unassigned]; |
31ab2b4a | 193 | } |
c19e8800 | 194 | p = phys_map_nodes[lp.ptr]; |
31ab2b4a | 195 | lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)]; |
5312bd8b | 196 | } |
fd298934 | 197 | return &phys_sections[lp.ptr]; |
f3705d53 AK |
198 | } |
199 | ||
e5548617 BS |
200 | bool memory_region_is_unassigned(MemoryRegion *mr) |
201 | { | |
2a8e7499 | 202 | return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device |
5b6dd868 | 203 | && mr != &io_mem_watch; |
fd6ce8f6 | 204 | } |
149f54b5 | 205 | |
9f029603 JK |
206 | static MemoryRegionSection *address_space_lookup_region(AddressSpace *as, |
207 | hwaddr addr) | |
208 | { | |
209 | return phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS); | |
210 | } | |
211 | ||
149f54b5 PB |
212 | MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr, |
213 | hwaddr *xlat, hwaddr *plen, | |
214 | bool is_write) | |
215 | { | |
216 | MemoryRegionSection *section; | |
217 | Int128 diff; | |
218 | ||
9f029603 | 219 | section = address_space_lookup_region(as, addr); |
149f54b5 PB |
220 | /* Compute offset within MemoryRegionSection */ |
221 | addr -= section->offset_within_address_space; | |
222 | ||
223 | /* Compute offset within MemoryRegion */ | |
224 | *xlat = addr + section->offset_within_region; | |
225 | ||
226 | diff = int128_sub(section->mr->size, int128_make64(addr)); | |
3752a036 | 227 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
149f54b5 PB |
228 | return section; |
229 | } | |
5b6dd868 | 230 | #endif |
fd6ce8f6 | 231 | |
5b6dd868 | 232 | void cpu_exec_init_all(void) |
fdbb84d1 | 233 | { |
5b6dd868 | 234 | #if !defined(CONFIG_USER_ONLY) |
b2a8658e | 235 | qemu_mutex_init(&ram_list.mutex); |
5b6dd868 BS |
236 | memory_map_init(); |
237 | io_mem_init(); | |
fdbb84d1 | 238 | #endif |
5b6dd868 | 239 | } |
fdbb84d1 | 240 | |
b170fce3 | 241 | #if !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
242 | |
243 | static int cpu_common_post_load(void *opaque, int version_id) | |
fd6ce8f6 | 244 | { |
259186a7 | 245 | CPUState *cpu = opaque; |
a513fe19 | 246 | |
5b6dd868 BS |
247 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
248 | version_id is increased. */ | |
259186a7 AF |
249 | cpu->interrupt_request &= ~0x01; |
250 | tlb_flush(cpu->env_ptr, 1); | |
5b6dd868 BS |
251 | |
252 | return 0; | |
a513fe19 | 253 | } |
7501267e | 254 | |
5b6dd868 BS |
255 | static const VMStateDescription vmstate_cpu_common = { |
256 | .name = "cpu_common", | |
257 | .version_id = 1, | |
258 | .minimum_version_id = 1, | |
259 | .minimum_version_id_old = 1, | |
260 | .post_load = cpu_common_post_load, | |
261 | .fields = (VMStateField []) { | |
259186a7 AF |
262 | VMSTATE_UINT32(halted, CPUState), |
263 | VMSTATE_UINT32(interrupt_request, CPUState), | |
5b6dd868 BS |
264 | VMSTATE_END_OF_LIST() |
265 | } | |
266 | }; | |
b170fce3 AF |
267 | #else |
268 | #define vmstate_cpu_common vmstate_dummy | |
5b6dd868 | 269 | #endif |
ea041c0e | 270 | |
38d8f5c8 | 271 | CPUState *qemu_get_cpu(int index) |
ea041c0e | 272 | { |
5b6dd868 | 273 | CPUArchState *env = first_cpu; |
38d8f5c8 | 274 | CPUState *cpu = NULL; |
ea041c0e | 275 | |
5b6dd868 | 276 | while (env) { |
55e5c285 AF |
277 | cpu = ENV_GET_CPU(env); |
278 | if (cpu->cpu_index == index) { | |
5b6dd868 | 279 | break; |
55e5c285 | 280 | } |
5b6dd868 | 281 | env = env->next_cpu; |
ea041c0e | 282 | } |
5b6dd868 | 283 | |
d76fddae | 284 | return env ? cpu : NULL; |
ea041c0e FB |
285 | } |
286 | ||
d6b9e0d6 MT |
287 | void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data) |
288 | { | |
289 | CPUArchState *env = first_cpu; | |
290 | ||
291 | while (env) { | |
292 | func(ENV_GET_CPU(env), data); | |
293 | env = env->next_cpu; | |
294 | } | |
295 | } | |
296 | ||
5b6dd868 | 297 | void cpu_exec_init(CPUArchState *env) |
ea041c0e | 298 | { |
5b6dd868 | 299 | CPUState *cpu = ENV_GET_CPU(env); |
b170fce3 | 300 | CPUClass *cc = CPU_GET_CLASS(cpu); |
5b6dd868 BS |
301 | CPUArchState **penv; |
302 | int cpu_index; | |
303 | ||
304 | #if defined(CONFIG_USER_ONLY) | |
305 | cpu_list_lock(); | |
306 | #endif | |
307 | env->next_cpu = NULL; | |
308 | penv = &first_cpu; | |
309 | cpu_index = 0; | |
310 | while (*penv != NULL) { | |
311 | penv = &(*penv)->next_cpu; | |
312 | cpu_index++; | |
313 | } | |
55e5c285 | 314 | cpu->cpu_index = cpu_index; |
1b1ed8dc | 315 | cpu->numa_node = 0; |
5b6dd868 BS |
316 | QTAILQ_INIT(&env->breakpoints); |
317 | QTAILQ_INIT(&env->watchpoints); | |
318 | #ifndef CONFIG_USER_ONLY | |
319 | cpu->thread_id = qemu_get_thread_id(); | |
320 | #endif | |
321 | *penv = env; | |
322 | #if defined(CONFIG_USER_ONLY) | |
323 | cpu_list_unlock(); | |
324 | #endif | |
259186a7 | 325 | vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); |
5b6dd868 | 326 | #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
327 | register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, |
328 | cpu_save, cpu_load, env); | |
b170fce3 | 329 | assert(cc->vmsd == NULL); |
5b6dd868 | 330 | #endif |
b170fce3 AF |
331 | if (cc->vmsd != NULL) { |
332 | vmstate_register(NULL, cpu_index, cc->vmsd, cpu); | |
333 | } | |
ea041c0e FB |
334 | } |
335 | ||
1fddef4b | 336 | #if defined(TARGET_HAS_ICE) |
94df27fd | 337 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 338 | static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
94df27fd PB |
339 | { |
340 | tb_invalidate_phys_page_range(pc, pc + 1, 0); | |
341 | } | |
342 | #else | |
1e7855a5 MF |
343 | static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) |
344 | { | |
9d70c4b7 MF |
345 | tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | |
346 | (pc & ~TARGET_PAGE_MASK)); | |
1e7855a5 | 347 | } |
c27004ec | 348 | #endif |
94df27fd | 349 | #endif /* TARGET_HAS_ICE */ |
d720b93d | 350 | |
c527ee8f | 351 | #if defined(CONFIG_USER_ONLY) |
9349b4f9 | 352 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
c527ee8f PB |
353 | |
354 | { | |
355 | } | |
356 | ||
9349b4f9 | 357 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
c527ee8f PB |
358 | int flags, CPUWatchpoint **watchpoint) |
359 | { | |
360 | return -ENOSYS; | |
361 | } | |
362 | #else | |
6658ffb8 | 363 | /* Add a watchpoint. */ |
9349b4f9 | 364 | int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 365 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 366 | { |
b4051334 | 367 | target_ulong len_mask = ~(len - 1); |
c0ce998e | 368 | CPUWatchpoint *wp; |
6658ffb8 | 369 | |
b4051334 | 370 | /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */ |
0dc23828 MF |
371 | if ((len & (len - 1)) || (addr & ~len_mask) || |
372 | len == 0 || len > TARGET_PAGE_SIZE) { | |
b4051334 AL |
373 | fprintf(stderr, "qemu: tried to set invalid watchpoint at " |
374 | TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len); | |
375 | return -EINVAL; | |
376 | } | |
7267c094 | 377 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
378 | |
379 | wp->vaddr = addr; | |
b4051334 | 380 | wp->len_mask = len_mask; |
a1d1bb31 AL |
381 | wp->flags = flags; |
382 | ||
2dc9f411 | 383 | /* keep all GDB-injected watchpoints in front */ |
c0ce998e | 384 | if (flags & BP_GDB) |
72cf2d4f | 385 | QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry); |
c0ce998e | 386 | else |
72cf2d4f | 387 | QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry); |
6658ffb8 | 388 | |
6658ffb8 | 389 | tlb_flush_page(env, addr); |
a1d1bb31 AL |
390 | |
391 | if (watchpoint) | |
392 | *watchpoint = wp; | |
393 | return 0; | |
6658ffb8 PB |
394 | } |
395 | ||
a1d1bb31 | 396 | /* Remove a specific watchpoint. */ |
9349b4f9 | 397 | int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len, |
a1d1bb31 | 398 | int flags) |
6658ffb8 | 399 | { |
b4051334 | 400 | target_ulong len_mask = ~(len - 1); |
a1d1bb31 | 401 | CPUWatchpoint *wp; |
6658ffb8 | 402 | |
72cf2d4f | 403 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 | 404 | if (addr == wp->vaddr && len_mask == wp->len_mask |
6e140f28 | 405 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
a1d1bb31 | 406 | cpu_watchpoint_remove_by_ref(env, wp); |
6658ffb8 PB |
407 | return 0; |
408 | } | |
409 | } | |
a1d1bb31 | 410 | return -ENOENT; |
6658ffb8 PB |
411 | } |
412 | ||
a1d1bb31 | 413 | /* Remove a specific watchpoint by reference. */ |
9349b4f9 | 414 | void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint) |
a1d1bb31 | 415 | { |
72cf2d4f | 416 | QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry); |
7d03f82f | 417 | |
a1d1bb31 AL |
418 | tlb_flush_page(env, watchpoint->vaddr); |
419 | ||
7267c094 | 420 | g_free(watchpoint); |
a1d1bb31 AL |
421 | } |
422 | ||
423 | /* Remove all matching watchpoints. */ | |
9349b4f9 | 424 | void cpu_watchpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 | 425 | { |
c0ce998e | 426 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 427 | |
72cf2d4f | 428 | QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) { |
a1d1bb31 AL |
429 | if (wp->flags & mask) |
430 | cpu_watchpoint_remove_by_ref(env, wp); | |
c0ce998e | 431 | } |
7d03f82f | 432 | } |
c527ee8f | 433 | #endif |
7d03f82f | 434 | |
a1d1bb31 | 435 | /* Add a breakpoint. */ |
9349b4f9 | 436 | int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags, |
a1d1bb31 | 437 | CPUBreakpoint **breakpoint) |
4c3a88a2 | 438 | { |
1fddef4b | 439 | #if defined(TARGET_HAS_ICE) |
c0ce998e | 440 | CPUBreakpoint *bp; |
3b46e624 | 441 | |
7267c094 | 442 | bp = g_malloc(sizeof(*bp)); |
4c3a88a2 | 443 | |
a1d1bb31 AL |
444 | bp->pc = pc; |
445 | bp->flags = flags; | |
446 | ||
2dc9f411 | 447 | /* keep all GDB-injected breakpoints in front */ |
c0ce998e | 448 | if (flags & BP_GDB) |
72cf2d4f | 449 | QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry); |
c0ce998e | 450 | else |
72cf2d4f | 451 | QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry); |
3b46e624 | 452 | |
d720b93d | 453 | breakpoint_invalidate(env, pc); |
a1d1bb31 AL |
454 | |
455 | if (breakpoint) | |
456 | *breakpoint = bp; | |
4c3a88a2 FB |
457 | return 0; |
458 | #else | |
a1d1bb31 | 459 | return -ENOSYS; |
4c3a88a2 FB |
460 | #endif |
461 | } | |
462 | ||
a1d1bb31 | 463 | /* Remove a specific breakpoint. */ |
9349b4f9 | 464 | int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags) |
a1d1bb31 | 465 | { |
7d03f82f | 466 | #if defined(TARGET_HAS_ICE) |
a1d1bb31 AL |
467 | CPUBreakpoint *bp; |
468 | ||
72cf2d4f | 469 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
a1d1bb31 AL |
470 | if (bp->pc == pc && bp->flags == flags) { |
471 | cpu_breakpoint_remove_by_ref(env, bp); | |
472 | return 0; | |
473 | } | |
7d03f82f | 474 | } |
a1d1bb31 AL |
475 | return -ENOENT; |
476 | #else | |
477 | return -ENOSYS; | |
7d03f82f EI |
478 | #endif |
479 | } | |
480 | ||
a1d1bb31 | 481 | /* Remove a specific breakpoint by reference. */ |
9349b4f9 | 482 | void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint) |
4c3a88a2 | 483 | { |
1fddef4b | 484 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 485 | QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry); |
d720b93d | 486 | |
a1d1bb31 AL |
487 | breakpoint_invalidate(env, breakpoint->pc); |
488 | ||
7267c094 | 489 | g_free(breakpoint); |
a1d1bb31 AL |
490 | #endif |
491 | } | |
492 | ||
493 | /* Remove all matching breakpoints. */ | |
9349b4f9 | 494 | void cpu_breakpoint_remove_all(CPUArchState *env, int mask) |
a1d1bb31 AL |
495 | { |
496 | #if defined(TARGET_HAS_ICE) | |
c0ce998e | 497 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 498 | |
72cf2d4f | 499 | QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) { |
a1d1bb31 AL |
500 | if (bp->flags & mask) |
501 | cpu_breakpoint_remove_by_ref(env, bp); | |
c0ce998e | 502 | } |
4c3a88a2 FB |
503 | #endif |
504 | } | |
505 | ||
c33a346e FB |
506 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
507 | CPU loop after each instruction */ | |
9349b4f9 | 508 | void cpu_single_step(CPUArchState *env, int enabled) |
c33a346e | 509 | { |
1fddef4b | 510 | #if defined(TARGET_HAS_ICE) |
c33a346e FB |
511 | if (env->singlestep_enabled != enabled) { |
512 | env->singlestep_enabled = enabled; | |
e22a25c9 AL |
513 | if (kvm_enabled()) |
514 | kvm_update_guest_debug(env, 0); | |
515 | else { | |
ccbb4d44 | 516 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 AL |
517 | /* XXX: only flush what is necessary */ |
518 | tb_flush(env); | |
519 | } | |
c33a346e FB |
520 | } |
521 | #endif | |
522 | } | |
523 | ||
9349b4f9 | 524 | void cpu_exit(CPUArchState *env) |
3098dba0 | 525 | { |
fcd7d003 AF |
526 | CPUState *cpu = ENV_GET_CPU(env); |
527 | ||
528 | cpu->exit_request = 1; | |
378df4b2 | 529 | cpu->tcg_exit_req = 1; |
3098dba0 AJ |
530 | } |
531 | ||
9349b4f9 | 532 | void cpu_abort(CPUArchState *env, const char *fmt, ...) |
7501267e FB |
533 | { |
534 | va_list ap; | |
493ae1f0 | 535 | va_list ap2; |
7501267e FB |
536 | |
537 | va_start(ap, fmt); | |
493ae1f0 | 538 | va_copy(ap2, ap); |
7501267e FB |
539 | fprintf(stderr, "qemu: fatal: "); |
540 | vfprintf(stderr, fmt, ap); | |
541 | fprintf(stderr, "\n"); | |
6fd2a026 | 542 | cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
93fcfe39 AL |
543 | if (qemu_log_enabled()) { |
544 | qemu_log("qemu: fatal: "); | |
545 | qemu_log_vprintf(fmt, ap2); | |
546 | qemu_log("\n"); | |
6fd2a026 | 547 | log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
31b1a7b4 | 548 | qemu_log_flush(); |
93fcfe39 | 549 | qemu_log_close(); |
924edcae | 550 | } |
493ae1f0 | 551 | va_end(ap2); |
f9373291 | 552 | va_end(ap); |
fd052bf6 RV |
553 | #if defined(CONFIG_USER_ONLY) |
554 | { | |
555 | struct sigaction act; | |
556 | sigfillset(&act.sa_mask); | |
557 | act.sa_handler = SIG_DFL; | |
558 | sigaction(SIGABRT, &act, NULL); | |
559 | } | |
560 | #endif | |
7501267e FB |
561 | abort(); |
562 | } | |
563 | ||
9349b4f9 | 564 | CPUArchState *cpu_copy(CPUArchState *env) |
c5be9f08 | 565 | { |
9349b4f9 AF |
566 | CPUArchState *new_env = cpu_init(env->cpu_model_str); |
567 | CPUArchState *next_cpu = new_env->next_cpu; | |
5a38f081 AL |
568 | #if defined(TARGET_HAS_ICE) |
569 | CPUBreakpoint *bp; | |
570 | CPUWatchpoint *wp; | |
571 | #endif | |
572 | ||
9349b4f9 | 573 | memcpy(new_env, env, sizeof(CPUArchState)); |
5a38f081 | 574 | |
55e5c285 | 575 | /* Preserve chaining. */ |
c5be9f08 | 576 | new_env->next_cpu = next_cpu; |
5a38f081 AL |
577 | |
578 | /* Clone all break/watchpoints. | |
579 | Note: Once we support ptrace with hw-debug register access, make sure | |
580 | BP_CPU break/watchpoints are handled correctly on clone. */ | |
72cf2d4f BS |
581 | QTAILQ_INIT(&env->breakpoints); |
582 | QTAILQ_INIT(&env->watchpoints); | |
5a38f081 | 583 | #if defined(TARGET_HAS_ICE) |
72cf2d4f | 584 | QTAILQ_FOREACH(bp, &env->breakpoints, entry) { |
5a38f081 AL |
585 | cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL); |
586 | } | |
72cf2d4f | 587 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
5a38f081 AL |
588 | cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1, |
589 | wp->flags, NULL); | |
590 | } | |
591 | #endif | |
592 | ||
c5be9f08 TS |
593 | return new_env; |
594 | } | |
595 | ||
0124311e | 596 | #if !defined(CONFIG_USER_ONLY) |
d24981d3 JQ |
597 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, |
598 | uintptr_t length) | |
599 | { | |
600 | uintptr_t start1; | |
601 | ||
602 | /* we modify the TLB cache so that the dirty bit will be set again | |
603 | when accessing the range */ | |
604 | start1 = (uintptr_t)qemu_safe_ram_ptr(start); | |
605 | /* Check that we don't span multiple blocks - this breaks the | |
606 | address comparisons below. */ | |
607 | if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1 | |
608 | != (end - 1) - start) { | |
609 | abort(); | |
610 | } | |
611 | cpu_tlb_reset_dirty_all(start1, length); | |
612 | ||
613 | } | |
614 | ||
5579c7f3 | 615 | /* Note: start and end must be within the same ram block. */ |
c227f099 | 616 | void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, |
0a962c02 | 617 | int dirty_flags) |
1ccde1cb | 618 | { |
d24981d3 | 619 | uintptr_t length; |
1ccde1cb FB |
620 | |
621 | start &= TARGET_PAGE_MASK; | |
622 | end = TARGET_PAGE_ALIGN(end); | |
623 | ||
624 | length = end - start; | |
625 | if (length == 0) | |
626 | return; | |
f7c11b53 | 627 | cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); |
f23db169 | 628 | |
d24981d3 JQ |
629 | if (tcg_enabled()) { |
630 | tlb_reset_dirty_range_all(start, end, length); | |
5579c7f3 | 631 | } |
1ccde1cb FB |
632 | } |
633 | ||
8b9c99d9 | 634 | static int cpu_physical_memory_set_dirty_tracking(int enable) |
74576198 | 635 | { |
f6f3fbca | 636 | int ret = 0; |
74576198 | 637 | in_migration = enable; |
f6f3fbca | 638 | return ret; |
74576198 AL |
639 | } |
640 | ||
a8170e5e | 641 | hwaddr memory_region_section_get_iotlb(CPUArchState *env, |
149f54b5 PB |
642 | MemoryRegionSection *section, |
643 | target_ulong vaddr, | |
644 | hwaddr paddr, hwaddr xlat, | |
645 | int prot, | |
646 | target_ulong *address) | |
e5548617 | 647 | { |
a8170e5e | 648 | hwaddr iotlb; |
e5548617 BS |
649 | CPUWatchpoint *wp; |
650 | ||
cc5bea60 | 651 | if (memory_region_is_ram(section->mr)) { |
e5548617 BS |
652 | /* Normal RAM. */ |
653 | iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) | |
149f54b5 | 654 | + xlat; |
e5548617 BS |
655 | if (!section->readonly) { |
656 | iotlb |= phys_section_notdirty; | |
657 | } else { | |
658 | iotlb |= phys_section_rom; | |
659 | } | |
660 | } else { | |
e5548617 | 661 | iotlb = section - phys_sections; |
149f54b5 | 662 | iotlb += xlat; |
e5548617 BS |
663 | } |
664 | ||
665 | /* Make accesses to pages with watchpoints go via the | |
666 | watchpoint trap routines. */ | |
667 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { | |
668 | if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { | |
669 | /* Avoid trapping reads of pages with a write breakpoint. */ | |
670 | if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | |
671 | iotlb = phys_section_watch + paddr; | |
672 | *address |= TLB_MMIO; | |
673 | break; | |
674 | } | |
675 | } | |
676 | } | |
677 | ||
678 | return iotlb; | |
679 | } | |
9fa3e853 FB |
680 | #endif /* defined(CONFIG_USER_ONLY) */ |
681 | ||
e2eef170 | 682 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 683 | |
c04b2b78 PB |
684 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
685 | typedef struct subpage_t { | |
70c68e44 | 686 | MemoryRegion iomem; |
a8170e5e | 687 | hwaddr base; |
5312bd8b | 688 | uint16_t sub_section[TARGET_PAGE_SIZE]; |
c04b2b78 PB |
689 | } subpage_t; |
690 | ||
c227f099 | 691 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 692 | uint16_t section); |
a8170e5e | 693 | static subpage_t *subpage_init(hwaddr base); |
5312bd8b | 694 | static void destroy_page_desc(uint16_t section_index) |
54688b1e | 695 | { |
5312bd8b AK |
696 | MemoryRegionSection *section = &phys_sections[section_index]; |
697 | MemoryRegion *mr = section->mr; | |
54688b1e AK |
698 | |
699 | if (mr->subpage) { | |
700 | subpage_t *subpage = container_of(mr, subpage_t, iomem); | |
701 | memory_region_destroy(&subpage->iomem); | |
702 | g_free(subpage); | |
703 | } | |
704 | } | |
705 | ||
4346ae3e | 706 | static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level) |
54688b1e AK |
707 | { |
708 | unsigned i; | |
d6f2ea22 | 709 | PhysPageEntry *p; |
54688b1e | 710 | |
c19e8800 | 711 | if (lp->ptr == PHYS_MAP_NODE_NIL) { |
54688b1e AK |
712 | return; |
713 | } | |
714 | ||
c19e8800 | 715 | p = phys_map_nodes[lp->ptr]; |
4346ae3e | 716 | for (i = 0; i < L2_SIZE; ++i) { |
07f07b31 | 717 | if (!p[i].is_leaf) { |
54688b1e | 718 | destroy_l2_mapping(&p[i], level - 1); |
4346ae3e | 719 | } else { |
c19e8800 | 720 | destroy_page_desc(p[i].ptr); |
54688b1e | 721 | } |
54688b1e | 722 | } |
07f07b31 | 723 | lp->is_leaf = 0; |
c19e8800 | 724 | lp->ptr = PHYS_MAP_NODE_NIL; |
54688b1e AK |
725 | } |
726 | ||
ac1970fb | 727 | static void destroy_all_mappings(AddressSpaceDispatch *d) |
54688b1e | 728 | { |
ac1970fb | 729 | destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); |
d6f2ea22 | 730 | phys_map_nodes_reset(); |
54688b1e AK |
731 | } |
732 | ||
5312bd8b AK |
733 | static uint16_t phys_section_add(MemoryRegionSection *section) |
734 | { | |
68f3f65b PB |
735 | /* The physical section number is ORed with a page-aligned |
736 | * pointer to produce the iotlb entries. Thus it should | |
737 | * never overflow into the page-aligned value. | |
738 | */ | |
739 | assert(phys_sections_nb < TARGET_PAGE_SIZE); | |
740 | ||
5312bd8b AK |
741 | if (phys_sections_nb == phys_sections_nb_alloc) { |
742 | phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16); | |
743 | phys_sections = g_renew(MemoryRegionSection, phys_sections, | |
744 | phys_sections_nb_alloc); | |
745 | } | |
746 | phys_sections[phys_sections_nb] = *section; | |
747 | return phys_sections_nb++; | |
748 | } | |
749 | ||
750 | static void phys_sections_clear(void) | |
751 | { | |
752 | phys_sections_nb = 0; | |
753 | } | |
754 | ||
ac1970fb | 755 | static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
0f0cb164 AK |
756 | { |
757 | subpage_t *subpage; | |
a8170e5e | 758 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 759 | & TARGET_PAGE_MASK; |
ac1970fb | 760 | MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS); |
0f0cb164 AK |
761 | MemoryRegionSection subsection = { |
762 | .offset_within_address_space = base, | |
763 | .size = TARGET_PAGE_SIZE, | |
764 | }; | |
a8170e5e | 765 | hwaddr start, end; |
0f0cb164 | 766 | |
f3705d53 | 767 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 768 | |
f3705d53 | 769 | if (!(existing->mr->subpage)) { |
0f0cb164 AK |
770 | subpage = subpage_init(base); |
771 | subsection.mr = &subpage->iomem; | |
ac1970fb | 772 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
2999097b | 773 | phys_section_add(&subsection)); |
0f0cb164 | 774 | } else { |
f3705d53 | 775 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
776 | } |
777 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
adb2a9b5 | 778 | end = start + section->size - 1; |
0f0cb164 AK |
779 | subpage_register(subpage, start, end, phys_section_add(section)); |
780 | } | |
781 | ||
782 | ||
ac1970fb | 783 | static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
33417e70 | 784 | { |
a8170e5e | 785 | hwaddr start_addr = section->offset_within_address_space; |
dd81124b | 786 | ram_addr_t size = section->size; |
a8170e5e | 787 | hwaddr addr; |
5312bd8b | 788 | uint16_t section_index = phys_section_add(section); |
dd81124b | 789 | |
3b8e6a2d | 790 | assert(size); |
f6f3fbca | 791 | |
3b8e6a2d | 792 | addr = start_addr; |
ac1970fb | 793 | phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS, |
2999097b | 794 | section_index); |
33417e70 FB |
795 | } |
796 | ||
86a86236 AK |
797 | QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS) |
798 | ||
799 | static MemoryRegionSection limit(MemoryRegionSection section) | |
800 | { | |
801 | section.size = MIN(section.offset_within_address_space + section.size, | |
802 | MAX_PHYS_ADDR + 1) | |
803 | - section.offset_within_address_space; | |
804 | ||
805 | return section; | |
806 | } | |
807 | ||
ac1970fb | 808 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
0f0cb164 | 809 | { |
ac1970fb | 810 | AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); |
86a86236 | 811 | MemoryRegionSection now = limit(*section), remain = limit(*section); |
0f0cb164 AK |
812 | |
813 | if ((now.offset_within_address_space & ~TARGET_PAGE_MASK) | |
814 | || (now.size < TARGET_PAGE_SIZE)) { | |
815 | now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space) | |
816 | - now.offset_within_address_space, | |
817 | now.size); | |
ac1970fb | 818 | register_subpage(d, &now); |
0f0cb164 AK |
819 | remain.size -= now.size; |
820 | remain.offset_within_address_space += now.size; | |
821 | remain.offset_within_region += now.size; | |
822 | } | |
69b67646 TH |
823 | while (remain.size >= TARGET_PAGE_SIZE) { |
824 | now = remain; | |
825 | if (remain.offset_within_region & ~TARGET_PAGE_MASK) { | |
826 | now.size = TARGET_PAGE_SIZE; | |
ac1970fb | 827 | register_subpage(d, &now); |
69b67646 TH |
828 | } else { |
829 | now.size &= TARGET_PAGE_MASK; | |
ac1970fb | 830 | register_multipage(d, &now); |
69b67646 | 831 | } |
0f0cb164 AK |
832 | remain.size -= now.size; |
833 | remain.offset_within_address_space += now.size; | |
834 | remain.offset_within_region += now.size; | |
835 | } | |
836 | now = remain; | |
837 | if (now.size) { | |
ac1970fb | 838 | register_subpage(d, &now); |
0f0cb164 AK |
839 | } |
840 | } | |
841 | ||
62a2744c SY |
842 | void qemu_flush_coalesced_mmio_buffer(void) |
843 | { | |
844 | if (kvm_enabled()) | |
845 | kvm_flush_coalesced_mmio_buffer(); | |
846 | } | |
847 | ||
b2a8658e UD |
848 | void qemu_mutex_lock_ramlist(void) |
849 | { | |
850 | qemu_mutex_lock(&ram_list.mutex); | |
851 | } | |
852 | ||
853 | void qemu_mutex_unlock_ramlist(void) | |
854 | { | |
855 | qemu_mutex_unlock(&ram_list.mutex); | |
856 | } | |
857 | ||
c902760f MT |
858 | #if defined(__linux__) && !defined(TARGET_S390X) |
859 | ||
860 | #include <sys/vfs.h> | |
861 | ||
862 | #define HUGETLBFS_MAGIC 0x958458f6 | |
863 | ||
864 | static long gethugepagesize(const char *path) | |
865 | { | |
866 | struct statfs fs; | |
867 | int ret; | |
868 | ||
869 | do { | |
9742bf26 | 870 | ret = statfs(path, &fs); |
c902760f MT |
871 | } while (ret != 0 && errno == EINTR); |
872 | ||
873 | if (ret != 0) { | |
9742bf26 YT |
874 | perror(path); |
875 | return 0; | |
c902760f MT |
876 | } |
877 | ||
878 | if (fs.f_type != HUGETLBFS_MAGIC) | |
9742bf26 | 879 | fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); |
c902760f MT |
880 | |
881 | return fs.f_bsize; | |
882 | } | |
883 | ||
04b16653 AW |
884 | static void *file_ram_alloc(RAMBlock *block, |
885 | ram_addr_t memory, | |
886 | const char *path) | |
c902760f MT |
887 | { |
888 | char *filename; | |
8ca761f6 PF |
889 | char *sanitized_name; |
890 | char *c; | |
c902760f MT |
891 | void *area; |
892 | int fd; | |
893 | #ifdef MAP_POPULATE | |
894 | int flags; | |
895 | #endif | |
896 | unsigned long hpagesize; | |
897 | ||
898 | hpagesize = gethugepagesize(path); | |
899 | if (!hpagesize) { | |
9742bf26 | 900 | return NULL; |
c902760f MT |
901 | } |
902 | ||
903 | if (memory < hpagesize) { | |
904 | return NULL; | |
905 | } | |
906 | ||
907 | if (kvm_enabled() && !kvm_has_sync_mmu()) { | |
908 | fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); | |
909 | return NULL; | |
910 | } | |
911 | ||
8ca761f6 PF |
912 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ |
913 | sanitized_name = g_strdup(block->mr->name); | |
914 | for (c = sanitized_name; *c != '\0'; c++) { | |
915 | if (*c == '/') | |
916 | *c = '_'; | |
917 | } | |
918 | ||
919 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, | |
920 | sanitized_name); | |
921 | g_free(sanitized_name); | |
c902760f MT |
922 | |
923 | fd = mkstemp(filename); | |
924 | if (fd < 0) { | |
9742bf26 | 925 | perror("unable to create backing store for hugepages"); |
e4ada482 | 926 | g_free(filename); |
9742bf26 | 927 | return NULL; |
c902760f MT |
928 | } |
929 | unlink(filename); | |
e4ada482 | 930 | g_free(filename); |
c902760f MT |
931 | |
932 | memory = (memory+hpagesize-1) & ~(hpagesize-1); | |
933 | ||
934 | /* | |
935 | * ftruncate is not supported by hugetlbfs in older | |
936 | * hosts, so don't bother bailing out on errors. | |
937 | * If anything goes wrong with it under other filesystems, | |
938 | * mmap will fail. | |
939 | */ | |
940 | if (ftruncate(fd, memory)) | |
9742bf26 | 941 | perror("ftruncate"); |
c902760f MT |
942 | |
943 | #ifdef MAP_POPULATE | |
944 | /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case | |
945 | * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED | |
946 | * to sidestep this quirk. | |
947 | */ | |
948 | flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; | |
949 | area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); | |
950 | #else | |
951 | area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); | |
952 | #endif | |
953 | if (area == MAP_FAILED) { | |
9742bf26 YT |
954 | perror("file_ram_alloc: can't mmap RAM pages"); |
955 | close(fd); | |
956 | return (NULL); | |
c902760f | 957 | } |
04b16653 | 958 | block->fd = fd; |
c902760f MT |
959 | return area; |
960 | } | |
961 | #endif | |
962 | ||
d17b5288 | 963 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
964 | { |
965 | RAMBlock *block, *next_block; | |
3e837b2c | 966 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 967 | |
49cd9ac6 SH |
968 | assert(size != 0); /* it would hand out same offset multiple times */ |
969 | ||
a3161038 | 970 | if (QTAILQ_EMPTY(&ram_list.blocks)) |
04b16653 AW |
971 | return 0; |
972 | ||
a3161038 | 973 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f15fbc4b | 974 | ram_addr_t end, next = RAM_ADDR_MAX; |
04b16653 AW |
975 | |
976 | end = block->offset + block->length; | |
977 | ||
a3161038 | 978 | QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { |
04b16653 AW |
979 | if (next_block->offset >= end) { |
980 | next = MIN(next, next_block->offset); | |
981 | } | |
982 | } | |
983 | if (next - end >= size && next - end < mingap) { | |
3e837b2c | 984 | offset = end; |
04b16653 AW |
985 | mingap = next - end; |
986 | } | |
987 | } | |
3e837b2c AW |
988 | |
989 | if (offset == RAM_ADDR_MAX) { | |
990 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
991 | (uint64_t)size); | |
992 | abort(); | |
993 | } | |
994 | ||
04b16653 AW |
995 | return offset; |
996 | } | |
997 | ||
652d7ec2 | 998 | ram_addr_t last_ram_offset(void) |
d17b5288 AW |
999 | { |
1000 | RAMBlock *block; | |
1001 | ram_addr_t last = 0; | |
1002 | ||
a3161038 | 1003 | QTAILQ_FOREACH(block, &ram_list.blocks, next) |
d17b5288 AW |
1004 | last = MAX(last, block->offset + block->length); |
1005 | ||
1006 | return last; | |
1007 | } | |
1008 | ||
ddb97f1d JB |
1009 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1010 | { | |
1011 | int ret; | |
1012 | QemuOpts *machine_opts; | |
1013 | ||
1014 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
1015 | machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0); | |
1016 | if (machine_opts && | |
1017 | !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) { | |
1018 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); | |
1019 | if (ret) { | |
1020 | perror("qemu_madvise"); | |
1021 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1022 | "but dump_guest_core=off specified\n"); | |
1023 | } | |
1024 | } | |
1025 | } | |
1026 | ||
c5705a77 | 1027 | void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) |
84b89d78 CM |
1028 | { |
1029 | RAMBlock *new_block, *block; | |
1030 | ||
c5705a77 | 1031 | new_block = NULL; |
a3161038 | 1032 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 AK |
1033 | if (block->offset == addr) { |
1034 | new_block = block; | |
1035 | break; | |
1036 | } | |
1037 | } | |
1038 | assert(new_block); | |
1039 | assert(!new_block->idstr[0]); | |
84b89d78 | 1040 | |
09e5ab63 AL |
1041 | if (dev) { |
1042 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1043 | if (id) { |
1044 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1045 | g_free(id); |
84b89d78 CM |
1046 | } |
1047 | } | |
1048 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1049 | ||
b2a8658e UD |
1050 | /* This assumes the iothread lock is taken here too. */ |
1051 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1052 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
c5705a77 | 1053 | if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { |
84b89d78 CM |
1054 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1055 | new_block->idstr); | |
1056 | abort(); | |
1057 | } | |
1058 | } | |
b2a8658e | 1059 | qemu_mutex_unlock_ramlist(); |
c5705a77 AK |
1060 | } |
1061 | ||
8490fc78 LC |
1062 | static int memory_try_enable_merging(void *addr, size_t len) |
1063 | { | |
1064 | QemuOpts *opts; | |
1065 | ||
1066 | opts = qemu_opts_find(qemu_find_opts("machine"), 0); | |
1067 | if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) { | |
1068 | /* disabled by the user */ | |
1069 | return 0; | |
1070 | } | |
1071 | ||
1072 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1073 | } | |
1074 | ||
c5705a77 AK |
1075 | ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
1076 | MemoryRegion *mr) | |
1077 | { | |
abb26d63 | 1078 | RAMBlock *block, *new_block; |
c5705a77 AK |
1079 | |
1080 | size = TARGET_PAGE_ALIGN(size); | |
1081 | new_block = g_malloc0(sizeof(*new_block)); | |
84b89d78 | 1082 | |
b2a8658e UD |
1083 | /* This assumes the iothread lock is taken here too. */ |
1084 | qemu_mutex_lock_ramlist(); | |
7c637366 | 1085 | new_block->mr = mr; |
432d268c | 1086 | new_block->offset = find_ram_offset(size); |
6977dfe6 YT |
1087 | if (host) { |
1088 | new_block->host = host; | |
cd19cfa2 | 1089 | new_block->flags |= RAM_PREALLOC_MASK; |
6977dfe6 YT |
1090 | } else { |
1091 | if (mem_path) { | |
c902760f | 1092 | #if defined (__linux__) && !defined(TARGET_S390X) |
6977dfe6 YT |
1093 | new_block->host = file_ram_alloc(new_block, size, mem_path); |
1094 | if (!new_block->host) { | |
6eebf958 | 1095 | new_block->host = qemu_anon_ram_alloc(size); |
8490fc78 | 1096 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1097 | } |
c902760f | 1098 | #else |
6977dfe6 YT |
1099 | fprintf(stderr, "-mem-path option unsupported\n"); |
1100 | exit(1); | |
c902760f | 1101 | #endif |
6977dfe6 | 1102 | } else { |
868bb33f | 1103 | if (xen_enabled()) { |
fce537d4 | 1104 | xen_ram_alloc(new_block->offset, size, mr); |
fdec9918 CB |
1105 | } else if (kvm_enabled()) { |
1106 | /* some s390/kvm configurations have special constraints */ | |
6eebf958 | 1107 | new_block->host = kvm_ram_alloc(size); |
432d268c | 1108 | } else { |
6eebf958 | 1109 | new_block->host = qemu_anon_ram_alloc(size); |
432d268c | 1110 | } |
8490fc78 | 1111 | memory_try_enable_merging(new_block->host, size); |
6977dfe6 | 1112 | } |
c902760f | 1113 | } |
94a6b54f PB |
1114 | new_block->length = size; |
1115 | ||
abb26d63 PB |
1116 | /* Keep the list sorted from biggest to smallest block. */ |
1117 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { | |
1118 | if (block->length < new_block->length) { | |
1119 | break; | |
1120 | } | |
1121 | } | |
1122 | if (block) { | |
1123 | QTAILQ_INSERT_BEFORE(block, new_block, next); | |
1124 | } else { | |
1125 | QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); | |
1126 | } | |
0d6d3c87 | 1127 | ram_list.mru_block = NULL; |
94a6b54f | 1128 | |
f798b07f | 1129 | ram_list.version++; |
b2a8658e | 1130 | qemu_mutex_unlock_ramlist(); |
f798b07f | 1131 | |
7267c094 | 1132 | ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, |
04b16653 | 1133 | last_ram_offset() >> TARGET_PAGE_BITS); |
5fda043f IM |
1134 | memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), |
1135 | 0, size >> TARGET_PAGE_BITS); | |
1720aeee | 1136 | cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff); |
94a6b54f | 1137 | |
ddb97f1d | 1138 | qemu_ram_setup_dump(new_block->host, size); |
ad0b5321 | 1139 | qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE); |
ddb97f1d | 1140 | |
6f0437e8 JK |
1141 | if (kvm_enabled()) |
1142 | kvm_setup_guest_memory(new_block->host, size); | |
1143 | ||
94a6b54f PB |
1144 | return new_block->offset; |
1145 | } | |
e9a1ab19 | 1146 | |
c5705a77 | 1147 | ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr) |
6977dfe6 | 1148 | { |
c5705a77 | 1149 | return qemu_ram_alloc_from_ptr(size, NULL, mr); |
6977dfe6 YT |
1150 | } |
1151 | ||
1f2e98b6 AW |
1152 | void qemu_ram_free_from_ptr(ram_addr_t addr) |
1153 | { | |
1154 | RAMBlock *block; | |
1155 | ||
b2a8658e UD |
1156 | /* This assumes the iothread lock is taken here too. */ |
1157 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1158 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
1f2e98b6 | 1159 | if (addr == block->offset) { |
a3161038 | 1160 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1161 | ram_list.mru_block = NULL; |
f798b07f | 1162 | ram_list.version++; |
7267c094 | 1163 | g_free(block); |
b2a8658e | 1164 | break; |
1f2e98b6 AW |
1165 | } |
1166 | } | |
b2a8658e | 1167 | qemu_mutex_unlock_ramlist(); |
1f2e98b6 AW |
1168 | } |
1169 | ||
c227f099 | 1170 | void qemu_ram_free(ram_addr_t addr) |
e9a1ab19 | 1171 | { |
04b16653 AW |
1172 | RAMBlock *block; |
1173 | ||
b2a8658e UD |
1174 | /* This assumes the iothread lock is taken here too. */ |
1175 | qemu_mutex_lock_ramlist(); | |
a3161038 | 1176 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
04b16653 | 1177 | if (addr == block->offset) { |
a3161038 | 1178 | QTAILQ_REMOVE(&ram_list.blocks, block, next); |
0d6d3c87 | 1179 | ram_list.mru_block = NULL; |
f798b07f | 1180 | ram_list.version++; |
cd19cfa2 HY |
1181 | if (block->flags & RAM_PREALLOC_MASK) { |
1182 | ; | |
1183 | } else if (mem_path) { | |
04b16653 AW |
1184 | #if defined (__linux__) && !defined(TARGET_S390X) |
1185 | if (block->fd) { | |
1186 | munmap(block->host, block->length); | |
1187 | close(block->fd); | |
1188 | } else { | |
e7a09b92 | 1189 | qemu_anon_ram_free(block->host, block->length); |
04b16653 | 1190 | } |
fd28aa13 JK |
1191 | #else |
1192 | abort(); | |
04b16653 AW |
1193 | #endif |
1194 | } else { | |
868bb33f | 1195 | if (xen_enabled()) { |
e41d7c69 | 1196 | xen_invalidate_map_cache_entry(block->host); |
432d268c | 1197 | } else { |
e7a09b92 | 1198 | qemu_anon_ram_free(block->host, block->length); |
432d268c | 1199 | } |
04b16653 | 1200 | } |
7267c094 | 1201 | g_free(block); |
b2a8658e | 1202 | break; |
04b16653 AW |
1203 | } |
1204 | } | |
b2a8658e | 1205 | qemu_mutex_unlock_ramlist(); |
04b16653 | 1206 | |
e9a1ab19 FB |
1207 | } |
1208 | ||
cd19cfa2 HY |
1209 | #ifndef _WIN32 |
1210 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
1211 | { | |
1212 | RAMBlock *block; | |
1213 | ram_addr_t offset; | |
1214 | int flags; | |
1215 | void *area, *vaddr; | |
1216 | ||
a3161038 | 1217 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
cd19cfa2 HY |
1218 | offset = addr - block->offset; |
1219 | if (offset < block->length) { | |
1220 | vaddr = block->host + offset; | |
1221 | if (block->flags & RAM_PREALLOC_MASK) { | |
1222 | ; | |
1223 | } else { | |
1224 | flags = MAP_FIXED; | |
1225 | munmap(vaddr, length); | |
1226 | if (mem_path) { | |
1227 | #if defined(__linux__) && !defined(TARGET_S390X) | |
1228 | if (block->fd) { | |
1229 | #ifdef MAP_POPULATE | |
1230 | flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED : | |
1231 | MAP_PRIVATE; | |
1232 | #else | |
1233 | flags |= MAP_PRIVATE; | |
1234 | #endif | |
1235 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1236 | flags, block->fd, offset); | |
1237 | } else { | |
1238 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; | |
1239 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1240 | flags, -1, 0); | |
1241 | } | |
fd28aa13 JK |
1242 | #else |
1243 | abort(); | |
cd19cfa2 HY |
1244 | #endif |
1245 | } else { | |
1246 | #if defined(TARGET_S390X) && defined(CONFIG_KVM) | |
1247 | flags |= MAP_SHARED | MAP_ANONYMOUS; | |
1248 | area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE, | |
1249 | flags, -1, 0); | |
1250 | #else | |
1251 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; | |
1252 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
1253 | flags, -1, 0); | |
1254 | #endif | |
1255 | } | |
1256 | if (area != vaddr) { | |
f15fbc4b AP |
1257 | fprintf(stderr, "Could not remap addr: " |
1258 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", | |
cd19cfa2 HY |
1259 | length, addr); |
1260 | exit(1); | |
1261 | } | |
8490fc78 | 1262 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 1263 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 HY |
1264 | } |
1265 | return; | |
1266 | } | |
1267 | } | |
1268 | } | |
1269 | #endif /* !_WIN32 */ | |
1270 | ||
dc828ca1 | 1271 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
5579c7f3 PB |
1272 | With the exception of the softmmu code in this file, this should |
1273 | only be used for local memory (e.g. video ram) that the device owns, | |
1274 | and knows it isn't going to access beyond the end of the block. | |
1275 | ||
1276 | It should not be used for general purpose DMA. | |
1277 | Use cpu_physical_memory_map/cpu_physical_memory_rw instead. | |
1278 | */ | |
c227f099 | 1279 | void *qemu_get_ram_ptr(ram_addr_t addr) |
dc828ca1 | 1280 | { |
94a6b54f PB |
1281 | RAMBlock *block; |
1282 | ||
b2a8658e | 1283 | /* The list is protected by the iothread lock here. */ |
0d6d3c87 PB |
1284 | block = ram_list.mru_block; |
1285 | if (block && addr - block->offset < block->length) { | |
1286 | goto found; | |
1287 | } | |
a3161038 | 1288 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
f471a17e | 1289 | if (addr - block->offset < block->length) { |
0d6d3c87 | 1290 | goto found; |
f471a17e | 1291 | } |
94a6b54f | 1292 | } |
f471a17e AW |
1293 | |
1294 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1295 | abort(); | |
1296 | ||
0d6d3c87 PB |
1297 | found: |
1298 | ram_list.mru_block = block; | |
1299 | if (xen_enabled()) { | |
1300 | /* We need to check if the requested address is in the RAM | |
1301 | * because we don't want to map the entire memory in QEMU. | |
1302 | * In that case just map until the end of the page. | |
1303 | */ | |
1304 | if (block->offset == 0) { | |
1305 | return xen_map_cache(addr, 0, 0); | |
1306 | } else if (block->host == NULL) { | |
1307 | block->host = | |
1308 | xen_map_cache(block->offset, block->length, 1); | |
1309 | } | |
1310 | } | |
1311 | return block->host + (addr - block->offset); | |
dc828ca1 PB |
1312 | } |
1313 | ||
0d6d3c87 PB |
1314 | /* Return a host pointer to ram allocated with qemu_ram_alloc. Same as |
1315 | * qemu_get_ram_ptr but do not touch ram_list.mru_block. | |
1316 | * | |
1317 | * ??? Is this still necessary? | |
b2e0a138 | 1318 | */ |
8b9c99d9 | 1319 | static void *qemu_safe_ram_ptr(ram_addr_t addr) |
b2e0a138 MT |
1320 | { |
1321 | RAMBlock *block; | |
1322 | ||
b2a8658e | 1323 | /* The list is protected by the iothread lock here. */ |
a3161038 | 1324 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
b2e0a138 | 1325 | if (addr - block->offset < block->length) { |
868bb33f | 1326 | if (xen_enabled()) { |
432d268c JN |
1327 | /* We need to check if the requested address is in the RAM |
1328 | * because we don't want to map the entire memory in QEMU. | |
712c2b41 | 1329 | * In that case just map until the end of the page. |
432d268c JN |
1330 | */ |
1331 | if (block->offset == 0) { | |
e41d7c69 | 1332 | return xen_map_cache(addr, 0, 0); |
432d268c | 1333 | } else if (block->host == NULL) { |
e41d7c69 JK |
1334 | block->host = |
1335 | xen_map_cache(block->offset, block->length, 1); | |
432d268c JN |
1336 | } |
1337 | } | |
b2e0a138 MT |
1338 | return block->host + (addr - block->offset); |
1339 | } | |
1340 | } | |
1341 | ||
1342 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1343 | abort(); | |
1344 | ||
1345 | return NULL; | |
1346 | } | |
1347 | ||
38bee5dc SS |
1348 | /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr |
1349 | * but takes a size argument */ | |
8b9c99d9 | 1350 | static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) |
38bee5dc | 1351 | { |
8ab934f9 SS |
1352 | if (*size == 0) { |
1353 | return NULL; | |
1354 | } | |
868bb33f | 1355 | if (xen_enabled()) { |
e41d7c69 | 1356 | return xen_map_cache(addr, *size, 1); |
868bb33f | 1357 | } else { |
38bee5dc SS |
1358 | RAMBlock *block; |
1359 | ||
a3161038 | 1360 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
38bee5dc SS |
1361 | if (addr - block->offset < block->length) { |
1362 | if (addr - block->offset + *size > block->length) | |
1363 | *size = block->length - addr + block->offset; | |
1364 | return block->host + (addr - block->offset); | |
1365 | } | |
1366 | } | |
1367 | ||
1368 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1369 | abort(); | |
38bee5dc SS |
1370 | } |
1371 | } | |
1372 | ||
e890261f | 1373 | int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) |
5579c7f3 | 1374 | { |
94a6b54f PB |
1375 | RAMBlock *block; |
1376 | uint8_t *host = ptr; | |
1377 | ||
868bb33f | 1378 | if (xen_enabled()) { |
e41d7c69 | 1379 | *ram_addr = xen_ram_addr_from_mapcache(ptr); |
712c2b41 SS |
1380 | return 0; |
1381 | } | |
1382 | ||
a3161038 | 1383 | QTAILQ_FOREACH(block, &ram_list.blocks, next) { |
432d268c JN |
1384 | /* This case append when the block is not mapped. */ |
1385 | if (block->host == NULL) { | |
1386 | continue; | |
1387 | } | |
f471a17e | 1388 | if (host - block->host < block->length) { |
e890261f MT |
1389 | *ram_addr = block->offset + (host - block->host); |
1390 | return 0; | |
f471a17e | 1391 | } |
94a6b54f | 1392 | } |
432d268c | 1393 | |
e890261f MT |
1394 | return -1; |
1395 | } | |
f471a17e | 1396 | |
e890261f MT |
1397 | /* Some of the softmmu routines need to translate from a host pointer |
1398 | (typically a TLB entry) back to a ram offset. */ | |
1399 | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) | |
1400 | { | |
1401 | ram_addr_t ram_addr; | |
f471a17e | 1402 | |
e890261f MT |
1403 | if (qemu_ram_addr_from_host(ptr, &ram_addr)) { |
1404 | fprintf(stderr, "Bad ram pointer %p\n", ptr); | |
1405 | abort(); | |
1406 | } | |
1407 | return ram_addr; | |
5579c7f3 PB |
1408 | } |
1409 | ||
a8170e5e | 1410 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
0e0df1e2 | 1411 | uint64_t val, unsigned size) |
9fa3e853 | 1412 | { |
3a7d929e | 1413 | int dirty_flags; |
f7c11b53 | 1414 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1415 | if (!(dirty_flags & CODE_DIRTY_FLAG)) { |
0e0df1e2 | 1416 | tb_invalidate_phys_page_fast(ram_addr, size); |
f7c11b53 | 1417 | dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); |
3a7d929e | 1418 | } |
0e0df1e2 AK |
1419 | switch (size) { |
1420 | case 1: | |
1421 | stb_p(qemu_get_ram_ptr(ram_addr), val); | |
1422 | break; | |
1423 | case 2: | |
1424 | stw_p(qemu_get_ram_ptr(ram_addr), val); | |
1425 | break; | |
1426 | case 4: | |
1427 | stl_p(qemu_get_ram_ptr(ram_addr), val); | |
1428 | break; | |
1429 | default: | |
1430 | abort(); | |
3a7d929e | 1431 | } |
f23db169 | 1432 | dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); |
f7c11b53 | 1433 | cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); |
f23db169 FB |
1434 | /* we remove the notdirty callback only if the code has been |
1435 | flushed */ | |
1436 | if (dirty_flags == 0xff) | |
2e70f6ef | 1437 | tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr); |
9fa3e853 FB |
1438 | } |
1439 | ||
b018ddf6 PB |
1440 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
1441 | unsigned size, bool is_write) | |
1442 | { | |
1443 | return is_write; | |
1444 | } | |
1445 | ||
0e0df1e2 | 1446 | static const MemoryRegionOps notdirty_mem_ops = { |
0e0df1e2 | 1447 | .write = notdirty_mem_write, |
b018ddf6 | 1448 | .valid.accepts = notdirty_mem_accepts, |
0e0df1e2 | 1449 | .endianness = DEVICE_NATIVE_ENDIAN, |
1ccde1cb FB |
1450 | }; |
1451 | ||
0f459d16 | 1452 | /* Generate a debug exception if a watchpoint has been hit. */ |
b4051334 | 1453 | static void check_watchpoint(int offset, int len_mask, int flags) |
0f459d16 | 1454 | { |
9349b4f9 | 1455 | CPUArchState *env = cpu_single_env; |
06d55cc1 | 1456 | target_ulong pc, cs_base; |
0f459d16 | 1457 | target_ulong vaddr; |
a1d1bb31 | 1458 | CPUWatchpoint *wp; |
06d55cc1 | 1459 | int cpu_flags; |
0f459d16 | 1460 | |
06d55cc1 AL |
1461 | if (env->watchpoint_hit) { |
1462 | /* We re-entered the check after replacing the TB. Now raise | |
1463 | * the debug interrupt so that is will trigger after the | |
1464 | * current instruction. */ | |
c3affe56 | 1465 | cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); |
06d55cc1 AL |
1466 | return; |
1467 | } | |
2e70f6ef | 1468 | vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
72cf2d4f | 1469 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { |
b4051334 AL |
1470 | if ((vaddr == (wp->vaddr & len_mask) || |
1471 | (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) { | |
6e140f28 AL |
1472 | wp->flags |= BP_WATCHPOINT_HIT; |
1473 | if (!env->watchpoint_hit) { | |
1474 | env->watchpoint_hit = wp; | |
5a316526 | 1475 | tb_check_watchpoint(env); |
6e140f28 AL |
1476 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
1477 | env->exception_index = EXCP_DEBUG; | |
488d6577 | 1478 | cpu_loop_exit(env); |
6e140f28 AL |
1479 | } else { |
1480 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
1481 | tb_gen_code(env, pc, cs_base, cpu_flags, 1); | |
488d6577 | 1482 | cpu_resume_from_signal(env, NULL); |
6e140f28 | 1483 | } |
06d55cc1 | 1484 | } |
6e140f28 AL |
1485 | } else { |
1486 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
1487 | } |
1488 | } | |
1489 | } | |
1490 | ||
6658ffb8 PB |
1491 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
1492 | so these check for a hit then pass through to the normal out-of-line | |
1493 | phys routines. */ | |
a8170e5e | 1494 | static uint64_t watch_mem_read(void *opaque, hwaddr addr, |
1ec9b909 | 1495 | unsigned size) |
6658ffb8 | 1496 | { |
1ec9b909 AK |
1497 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ); |
1498 | switch (size) { | |
1499 | case 1: return ldub_phys(addr); | |
1500 | case 2: return lduw_phys(addr); | |
1501 | case 4: return ldl_phys(addr); | |
1502 | default: abort(); | |
1503 | } | |
6658ffb8 PB |
1504 | } |
1505 | ||
a8170e5e | 1506 | static void watch_mem_write(void *opaque, hwaddr addr, |
1ec9b909 | 1507 | uint64_t val, unsigned size) |
6658ffb8 | 1508 | { |
1ec9b909 AK |
1509 | check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE); |
1510 | switch (size) { | |
67364150 MF |
1511 | case 1: |
1512 | stb_phys(addr, val); | |
1513 | break; | |
1514 | case 2: | |
1515 | stw_phys(addr, val); | |
1516 | break; | |
1517 | case 4: | |
1518 | stl_phys(addr, val); | |
1519 | break; | |
1ec9b909 AK |
1520 | default: abort(); |
1521 | } | |
6658ffb8 PB |
1522 | } |
1523 | ||
1ec9b909 AK |
1524 | static const MemoryRegionOps watch_mem_ops = { |
1525 | .read = watch_mem_read, | |
1526 | .write = watch_mem_write, | |
1527 | .endianness = DEVICE_NATIVE_ENDIAN, | |
6658ffb8 | 1528 | }; |
6658ffb8 | 1529 | |
a8170e5e | 1530 | static uint64_t subpage_read(void *opaque, hwaddr addr, |
70c68e44 | 1531 | unsigned len) |
db7b5426 | 1532 | { |
70c68e44 | 1533 | subpage_t *mmio = opaque; |
f6405247 | 1534 | unsigned int idx = SUBPAGE_IDX(addr); |
791af8c8 PB |
1535 | uint64_t val; |
1536 | ||
5312bd8b | 1537 | MemoryRegionSection *section; |
db7b5426 BS |
1538 | #if defined(DEBUG_SUBPAGE) |
1539 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, | |
1540 | mmio, len, addr, idx); | |
1541 | #endif | |
db7b5426 | 1542 | |
5312bd8b AK |
1543 | section = &phys_sections[mmio->sub_section[idx]]; |
1544 | addr += mmio->base; | |
1545 | addr -= section->offset_within_address_space; | |
1546 | addr += section->offset_within_region; | |
791af8c8 PB |
1547 | io_mem_read(section->mr, addr, &val, len); |
1548 | return val; | |
db7b5426 BS |
1549 | } |
1550 | ||
a8170e5e | 1551 | static void subpage_write(void *opaque, hwaddr addr, |
70c68e44 | 1552 | uint64_t value, unsigned len) |
db7b5426 | 1553 | { |
70c68e44 | 1554 | subpage_t *mmio = opaque; |
f6405247 | 1555 | unsigned int idx = SUBPAGE_IDX(addr); |
5312bd8b | 1556 | MemoryRegionSection *section; |
db7b5426 | 1557 | #if defined(DEBUG_SUBPAGE) |
70c68e44 AK |
1558 | printf("%s: subpage %p len %d addr " TARGET_FMT_plx |
1559 | " idx %d value %"PRIx64"\n", | |
f6405247 | 1560 | __func__, mmio, len, addr, idx, value); |
db7b5426 | 1561 | #endif |
f6405247 | 1562 | |
5312bd8b AK |
1563 | section = &phys_sections[mmio->sub_section[idx]]; |
1564 | addr += mmio->base; | |
1565 | addr -= section->offset_within_address_space; | |
1566 | addr += section->offset_within_region; | |
37ec01d4 | 1567 | io_mem_write(section->mr, addr, value, len); |
db7b5426 BS |
1568 | } |
1569 | ||
c353e4cc PB |
1570 | static bool subpage_accepts(void *opaque, hwaddr addr, |
1571 | unsigned size, bool is_write) | |
1572 | { | |
1573 | subpage_t *mmio = opaque; | |
1574 | unsigned int idx = SUBPAGE_IDX(addr); | |
1575 | MemoryRegionSection *section; | |
1576 | #if defined(DEBUG_SUBPAGE) | |
1577 | printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx | |
1578 | " idx %d\n", __func__, mmio, | |
1579 | is_write ? 'w' : 'r', len, addr, idx); | |
1580 | #endif | |
1581 | ||
1582 | section = &phys_sections[mmio->sub_section[idx]]; | |
1583 | addr += mmio->base; | |
1584 | addr -= section->offset_within_address_space; | |
1585 | addr += section->offset_within_region; | |
1586 | return memory_region_access_valid(section->mr, addr, size, is_write); | |
1587 | } | |
1588 | ||
70c68e44 AK |
1589 | static const MemoryRegionOps subpage_ops = { |
1590 | .read = subpage_read, | |
1591 | .write = subpage_write, | |
c353e4cc | 1592 | .valid.accepts = subpage_accepts, |
70c68e44 | 1593 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
1594 | }; |
1595 | ||
a8170e5e | 1596 | static uint64_t subpage_ram_read(void *opaque, hwaddr addr, |
de712f94 | 1597 | unsigned size) |
56384e8b AF |
1598 | { |
1599 | ram_addr_t raddr = addr; | |
1600 | void *ptr = qemu_get_ram_ptr(raddr); | |
de712f94 AK |
1601 | switch (size) { |
1602 | case 1: return ldub_p(ptr); | |
1603 | case 2: return lduw_p(ptr); | |
1604 | case 4: return ldl_p(ptr); | |
1605 | default: abort(); | |
1606 | } | |
56384e8b AF |
1607 | } |
1608 | ||
a8170e5e | 1609 | static void subpage_ram_write(void *opaque, hwaddr addr, |
de712f94 | 1610 | uint64_t value, unsigned size) |
56384e8b AF |
1611 | { |
1612 | ram_addr_t raddr = addr; | |
1613 | void *ptr = qemu_get_ram_ptr(raddr); | |
de712f94 AK |
1614 | switch (size) { |
1615 | case 1: return stb_p(ptr, value); | |
1616 | case 2: return stw_p(ptr, value); | |
1617 | case 4: return stl_p(ptr, value); | |
1618 | default: abort(); | |
1619 | } | |
56384e8b AF |
1620 | } |
1621 | ||
de712f94 AK |
1622 | static const MemoryRegionOps subpage_ram_ops = { |
1623 | .read = subpage_ram_read, | |
1624 | .write = subpage_ram_write, | |
1625 | .endianness = DEVICE_NATIVE_ENDIAN, | |
56384e8b AF |
1626 | }; |
1627 | ||
c227f099 | 1628 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 1629 | uint16_t section) |
db7b5426 BS |
1630 | { |
1631 | int idx, eidx; | |
1632 | ||
1633 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
1634 | return -1; | |
1635 | idx = SUBPAGE_IDX(start); | |
1636 | eidx = SUBPAGE_IDX(end); | |
1637 | #if defined(DEBUG_SUBPAGE) | |
0bf9e31a | 1638 | printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__, |
db7b5426 BS |
1639 | mmio, start, end, idx, eidx, memory); |
1640 | #endif | |
5312bd8b AK |
1641 | if (memory_region_is_ram(phys_sections[section].mr)) { |
1642 | MemoryRegionSection new_section = phys_sections[section]; | |
1643 | new_section.mr = &io_mem_subpage_ram; | |
1644 | section = phys_section_add(&new_section); | |
56384e8b | 1645 | } |
db7b5426 | 1646 | for (; idx <= eidx; idx++) { |
5312bd8b | 1647 | mmio->sub_section[idx] = section; |
db7b5426 BS |
1648 | } |
1649 | ||
1650 | return 0; | |
1651 | } | |
1652 | ||
a8170e5e | 1653 | static subpage_t *subpage_init(hwaddr base) |
db7b5426 | 1654 | { |
c227f099 | 1655 | subpage_t *mmio; |
db7b5426 | 1656 | |
7267c094 | 1657 | mmio = g_malloc0(sizeof(subpage_t)); |
1eec614b AL |
1658 | |
1659 | mmio->base = base; | |
70c68e44 AK |
1660 | memory_region_init_io(&mmio->iomem, &subpage_ops, mmio, |
1661 | "subpage", TARGET_PAGE_SIZE); | |
b3b00c78 | 1662 | mmio->iomem.subpage = true; |
db7b5426 | 1663 | #if defined(DEBUG_SUBPAGE) |
1eec614b AL |
1664 | printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__, |
1665 | mmio, base, TARGET_PAGE_SIZE, subpage_memory); | |
db7b5426 | 1666 | #endif |
0f0cb164 | 1667 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned); |
db7b5426 BS |
1668 | |
1669 | return mmio; | |
1670 | } | |
1671 | ||
5312bd8b AK |
1672 | static uint16_t dummy_section(MemoryRegion *mr) |
1673 | { | |
1674 | MemoryRegionSection section = { | |
1675 | .mr = mr, | |
1676 | .offset_within_address_space = 0, | |
1677 | .offset_within_region = 0, | |
1678 | .size = UINT64_MAX, | |
1679 | }; | |
1680 | ||
1681 | return phys_section_add(§ion); | |
1682 | } | |
1683 | ||
a8170e5e | 1684 | MemoryRegion *iotlb_to_region(hwaddr index) |
aa102231 | 1685 | { |
37ec01d4 | 1686 | return phys_sections[index & ~TARGET_PAGE_MASK].mr; |
aa102231 AK |
1687 | } |
1688 | ||
e9179ce1 AK |
1689 | static void io_mem_init(void) |
1690 | { | |
bf8d5166 | 1691 | memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX); |
0e0df1e2 AK |
1692 | memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL, |
1693 | "unassigned", UINT64_MAX); | |
1694 | memory_region_init_io(&io_mem_notdirty, ¬dirty_mem_ops, NULL, | |
1695 | "notdirty", UINT64_MAX); | |
de712f94 AK |
1696 | memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL, |
1697 | "subpage-ram", UINT64_MAX); | |
1ec9b909 AK |
1698 | memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL, |
1699 | "watch", UINT64_MAX); | |
e9179ce1 AK |
1700 | } |
1701 | ||
ac1970fb AK |
1702 | static void mem_begin(MemoryListener *listener) |
1703 | { | |
1704 | AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener); | |
1705 | ||
1706 | destroy_all_mappings(d); | |
1707 | d->phys_map.ptr = PHYS_MAP_NODE_NIL; | |
1708 | } | |
1709 | ||
50c1e149 AK |
1710 | static void core_begin(MemoryListener *listener) |
1711 | { | |
5312bd8b AK |
1712 | phys_sections_clear(); |
1713 | phys_section_unassigned = dummy_section(&io_mem_unassigned); | |
aa102231 AK |
1714 | phys_section_notdirty = dummy_section(&io_mem_notdirty); |
1715 | phys_section_rom = dummy_section(&io_mem_rom); | |
1716 | phys_section_watch = dummy_section(&io_mem_watch); | |
50c1e149 AK |
1717 | } |
1718 | ||
1d71148e | 1719 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 1720 | { |
9349b4f9 | 1721 | CPUArchState *env; |
117712c3 AK |
1722 | |
1723 | /* since each CPU stores ram addresses in its TLB cache, we must | |
1724 | reset the modified entries */ | |
1725 | /* XXX: slow ! */ | |
1726 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
1727 | tlb_flush(env, 1); | |
1728 | } | |
50c1e149 AK |
1729 | } |
1730 | ||
93632747 AK |
1731 | static void core_log_global_start(MemoryListener *listener) |
1732 | { | |
1733 | cpu_physical_memory_set_dirty_tracking(1); | |
1734 | } | |
1735 | ||
1736 | static void core_log_global_stop(MemoryListener *listener) | |
1737 | { | |
1738 | cpu_physical_memory_set_dirty_tracking(0); | |
1739 | } | |
1740 | ||
4855d41a AK |
1741 | static void io_region_add(MemoryListener *listener, |
1742 | MemoryRegionSection *section) | |
1743 | { | |
a2d33521 AK |
1744 | MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1); |
1745 | ||
1746 | mrio->mr = section->mr; | |
1747 | mrio->offset = section->offset_within_region; | |
1748 | iorange_init(&mrio->iorange, &memory_region_iorange_ops, | |
4855d41a | 1749 | section->offset_within_address_space, section->size); |
a2d33521 | 1750 | ioport_register(&mrio->iorange); |
4855d41a AK |
1751 | } |
1752 | ||
1753 | static void io_region_del(MemoryListener *listener, | |
1754 | MemoryRegionSection *section) | |
1755 | { | |
1756 | isa_unassign_ioport(section->offset_within_address_space, section->size); | |
1757 | } | |
1758 | ||
93632747 | 1759 | static MemoryListener core_memory_listener = { |
50c1e149 | 1760 | .begin = core_begin, |
93632747 AK |
1761 | .log_global_start = core_log_global_start, |
1762 | .log_global_stop = core_log_global_stop, | |
ac1970fb | 1763 | .priority = 1, |
93632747 AK |
1764 | }; |
1765 | ||
4855d41a AK |
1766 | static MemoryListener io_memory_listener = { |
1767 | .region_add = io_region_add, | |
1768 | .region_del = io_region_del, | |
4855d41a AK |
1769 | .priority = 0, |
1770 | }; | |
1771 | ||
1d71148e AK |
1772 | static MemoryListener tcg_memory_listener = { |
1773 | .commit = tcg_commit, | |
1774 | }; | |
1775 | ||
ac1970fb AK |
1776 | void address_space_init_dispatch(AddressSpace *as) |
1777 | { | |
1778 | AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1); | |
1779 | ||
1780 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 }; | |
1781 | d->listener = (MemoryListener) { | |
1782 | .begin = mem_begin, | |
1783 | .region_add = mem_add, | |
1784 | .region_nop = mem_add, | |
1785 | .priority = 0, | |
1786 | }; | |
1787 | as->dispatch = d; | |
1788 | memory_listener_register(&d->listener, as); | |
1789 | } | |
1790 | ||
83f3c251 AK |
1791 | void address_space_destroy_dispatch(AddressSpace *as) |
1792 | { | |
1793 | AddressSpaceDispatch *d = as->dispatch; | |
1794 | ||
1795 | memory_listener_unregister(&d->listener); | |
1796 | destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1); | |
1797 | g_free(d); | |
1798 | as->dispatch = NULL; | |
1799 | } | |
1800 | ||
62152b8a AK |
1801 | static void memory_map_init(void) |
1802 | { | |
7267c094 | 1803 | system_memory = g_malloc(sizeof(*system_memory)); |
8417cebf | 1804 | memory_region_init(system_memory, "system", INT64_MAX); |
2673a5da AK |
1805 | address_space_init(&address_space_memory, system_memory); |
1806 | address_space_memory.name = "memory"; | |
309cb471 | 1807 | |
7267c094 | 1808 | system_io = g_malloc(sizeof(*system_io)); |
309cb471 | 1809 | memory_region_init(system_io, "io", 65536); |
2673a5da AK |
1810 | address_space_init(&address_space_io, system_io); |
1811 | address_space_io.name = "I/O"; | |
93632747 | 1812 | |
f6790af6 AK |
1813 | memory_listener_register(&core_memory_listener, &address_space_memory); |
1814 | memory_listener_register(&io_memory_listener, &address_space_io); | |
1815 | memory_listener_register(&tcg_memory_listener, &address_space_memory); | |
9e11908f PM |
1816 | |
1817 | dma_context_init(&dma_context_memory, &address_space_memory, | |
1818 | NULL, NULL, NULL); | |
62152b8a AK |
1819 | } |
1820 | ||
1821 | MemoryRegion *get_system_memory(void) | |
1822 | { | |
1823 | return system_memory; | |
1824 | } | |
1825 | ||
309cb471 AK |
1826 | MemoryRegion *get_system_io(void) |
1827 | { | |
1828 | return system_io; | |
1829 | } | |
1830 | ||
e2eef170 PB |
1831 | #endif /* !defined(CONFIG_USER_ONLY) */ |
1832 | ||
13eb76e0 FB |
1833 | /* physical memory access (slow version, mainly for debug) */ |
1834 | #if defined(CONFIG_USER_ONLY) | |
9349b4f9 | 1835 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
a68fe89c | 1836 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
1837 | { |
1838 | int l, flags; | |
1839 | target_ulong page; | |
53a5960a | 1840 | void * p; |
13eb76e0 FB |
1841 | |
1842 | while (len > 0) { | |
1843 | page = addr & TARGET_PAGE_MASK; | |
1844 | l = (page + TARGET_PAGE_SIZE) - addr; | |
1845 | if (l > len) | |
1846 | l = len; | |
1847 | flags = page_get_flags(page); | |
1848 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 1849 | return -1; |
13eb76e0 FB |
1850 | if (is_write) { |
1851 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 1852 | return -1; |
579a97f7 | 1853 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1854 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 1855 | return -1; |
72fb7daa AJ |
1856 | memcpy(p, buf, l); |
1857 | unlock_user(p, addr, l); | |
13eb76e0 FB |
1858 | } else { |
1859 | if (!(flags & PAGE_READ)) | |
a68fe89c | 1860 | return -1; |
579a97f7 | 1861 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 1862 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 1863 | return -1; |
72fb7daa | 1864 | memcpy(buf, p, l); |
5b257578 | 1865 | unlock_user(p, addr, 0); |
13eb76e0 FB |
1866 | } |
1867 | len -= l; | |
1868 | buf += l; | |
1869 | addr += l; | |
1870 | } | |
a68fe89c | 1871 | return 0; |
13eb76e0 | 1872 | } |
8df1cd07 | 1873 | |
13eb76e0 | 1874 | #else |
51d7a9eb | 1875 | |
a8170e5e AK |
1876 | static void invalidate_and_set_dirty(hwaddr addr, |
1877 | hwaddr length) | |
51d7a9eb AP |
1878 | { |
1879 | if (!cpu_physical_memory_is_dirty(addr)) { | |
1880 | /* invalidate code */ | |
1881 | tb_invalidate_phys_page_range(addr, addr + length, 0); | |
1882 | /* set dirty bit */ | |
1883 | cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG)); | |
1884 | } | |
e226939d | 1885 | xen_modified_memory(addr, length); |
51d7a9eb AP |
1886 | } |
1887 | ||
2bbfa05d PB |
1888 | static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) |
1889 | { | |
1890 | if (memory_region_is_ram(mr)) { | |
1891 | return !(is_write && mr->readonly); | |
1892 | } | |
1893 | if (memory_region_is_romd(mr)) { | |
1894 | return !is_write; | |
1895 | } | |
1896 | ||
1897 | return false; | |
1898 | } | |
1899 | ||
82f2563f PB |
1900 | static inline int memory_access_size(int l, hwaddr addr) |
1901 | { | |
1902 | if (l >= 4 && ((addr & 3) == 0)) { | |
1903 | return 4; | |
1904 | } | |
1905 | if (l >= 2 && ((addr & 1) == 0)) { | |
1906 | return 2; | |
1907 | } | |
1908 | return 1; | |
1909 | } | |
1910 | ||
fd8aaa76 | 1911 | bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, |
ac1970fb | 1912 | int len, bool is_write) |
13eb76e0 | 1913 | { |
149f54b5 | 1914 | hwaddr l; |
13eb76e0 | 1915 | uint8_t *ptr; |
791af8c8 | 1916 | uint64_t val; |
149f54b5 | 1917 | hwaddr addr1; |
f3705d53 | 1918 | MemoryRegionSection *section; |
fd8aaa76 | 1919 | bool error = false; |
3b46e624 | 1920 | |
13eb76e0 | 1921 | while (len > 0) { |
149f54b5 PB |
1922 | l = len; |
1923 | section = address_space_translate(as, addr, &addr1, &l, is_write); | |
3b46e624 | 1924 | |
13eb76e0 | 1925 | if (is_write) { |
2bbfa05d | 1926 | if (!memory_access_is_direct(section->mr, is_write)) { |
82f2563f | 1927 | l = memory_access_size(l, addr1); |
6a00d601 FB |
1928 | /* XXX: could force cpu_single_env to NULL to avoid |
1929 | potential bugs */ | |
82f2563f | 1930 | if (l == 4) { |
1c213d19 | 1931 | /* 32 bit write access */ |
c27004ec | 1932 | val = ldl_p(buf); |
fd8aaa76 | 1933 | error |= io_mem_write(section->mr, addr1, val, 4); |
82f2563f | 1934 | } else if (l == 2) { |
1c213d19 | 1935 | /* 16 bit write access */ |
c27004ec | 1936 | val = lduw_p(buf); |
fd8aaa76 | 1937 | error |= io_mem_write(section->mr, addr1, val, 2); |
13eb76e0 | 1938 | } else { |
1c213d19 | 1939 | /* 8 bit write access */ |
c27004ec | 1940 | val = ldub_p(buf); |
fd8aaa76 | 1941 | error |= io_mem_write(section->mr, addr1, val, 1); |
13eb76e0 | 1942 | } |
2bbfa05d | 1943 | } else { |
149f54b5 | 1944 | addr1 += memory_region_get_ram_addr(section->mr); |
13eb76e0 | 1945 | /* RAM case */ |
5579c7f3 | 1946 | ptr = qemu_get_ram_ptr(addr1); |
13eb76e0 | 1947 | memcpy(ptr, buf, l); |
51d7a9eb | 1948 | invalidate_and_set_dirty(addr1, l); |
13eb76e0 FB |
1949 | } |
1950 | } else { | |
2bbfa05d | 1951 | if (!memory_access_is_direct(section->mr, is_write)) { |
13eb76e0 | 1952 | /* I/O case */ |
82f2563f PB |
1953 | l = memory_access_size(l, addr1); |
1954 | if (l == 4) { | |
13eb76e0 | 1955 | /* 32 bit read access */ |
fd8aaa76 | 1956 | error |= io_mem_read(section->mr, addr1, &val, 4); |
c27004ec | 1957 | stl_p(buf, val); |
82f2563f | 1958 | } else if (l == 2) { |
13eb76e0 | 1959 | /* 16 bit read access */ |
fd8aaa76 | 1960 | error |= io_mem_read(section->mr, addr1, &val, 2); |
c27004ec | 1961 | stw_p(buf, val); |
13eb76e0 | 1962 | } else { |
1c213d19 | 1963 | /* 8 bit read access */ |
fd8aaa76 | 1964 | error |= io_mem_read(section->mr, addr1, &val, 1); |
c27004ec | 1965 | stb_p(buf, val); |
13eb76e0 FB |
1966 | } |
1967 | } else { | |
1968 | /* RAM case */ | |
149f54b5 | 1969 | ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1); |
f3705d53 | 1970 | memcpy(buf, ptr, l); |
13eb76e0 FB |
1971 | } |
1972 | } | |
1973 | len -= l; | |
1974 | buf += l; | |
1975 | addr += l; | |
1976 | } | |
fd8aaa76 PB |
1977 | |
1978 | return error; | |
13eb76e0 | 1979 | } |
8df1cd07 | 1980 | |
fd8aaa76 | 1981 | bool address_space_write(AddressSpace *as, hwaddr addr, |
ac1970fb AK |
1982 | const uint8_t *buf, int len) |
1983 | { | |
fd8aaa76 | 1984 | return address_space_rw(as, addr, (uint8_t *)buf, len, true); |
ac1970fb AK |
1985 | } |
1986 | ||
fd8aaa76 | 1987 | bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len) |
ac1970fb | 1988 | { |
fd8aaa76 | 1989 | return address_space_rw(as, addr, buf, len, false); |
ac1970fb AK |
1990 | } |
1991 | ||
1992 | ||
a8170e5e | 1993 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
ac1970fb AK |
1994 | int len, int is_write) |
1995 | { | |
fd8aaa76 | 1996 | address_space_rw(&address_space_memory, addr, buf, len, is_write); |
ac1970fb AK |
1997 | } |
1998 | ||
d0ecd2aa | 1999 | /* used for ROM loading : can write in RAM and ROM */ |
a8170e5e | 2000 | void cpu_physical_memory_write_rom(hwaddr addr, |
d0ecd2aa FB |
2001 | const uint8_t *buf, int len) |
2002 | { | |
149f54b5 | 2003 | hwaddr l; |
d0ecd2aa | 2004 | uint8_t *ptr; |
149f54b5 | 2005 | hwaddr addr1; |
f3705d53 | 2006 | MemoryRegionSection *section; |
3b46e624 | 2007 | |
d0ecd2aa | 2008 | while (len > 0) { |
149f54b5 PB |
2009 | l = len; |
2010 | section = address_space_translate(&address_space_memory, | |
2011 | addr, &addr1, &l, true); | |
3b46e624 | 2012 | |
cc5bea60 BS |
2013 | if (!(memory_region_is_ram(section->mr) || |
2014 | memory_region_is_romd(section->mr))) { | |
d0ecd2aa FB |
2015 | /* do nothing */ |
2016 | } else { | |
149f54b5 | 2017 | addr1 += memory_region_get_ram_addr(section->mr); |
d0ecd2aa | 2018 | /* ROM/RAM case */ |
5579c7f3 | 2019 | ptr = qemu_get_ram_ptr(addr1); |
d0ecd2aa | 2020 | memcpy(ptr, buf, l); |
51d7a9eb | 2021 | invalidate_and_set_dirty(addr1, l); |
d0ecd2aa FB |
2022 | } |
2023 | len -= l; | |
2024 | buf += l; | |
2025 | addr += l; | |
2026 | } | |
2027 | } | |
2028 | ||
6d16c2f8 AL |
2029 | typedef struct { |
2030 | void *buffer; | |
a8170e5e AK |
2031 | hwaddr addr; |
2032 | hwaddr len; | |
6d16c2f8 AL |
2033 | } BounceBuffer; |
2034 | ||
2035 | static BounceBuffer bounce; | |
2036 | ||
ba223c29 AL |
2037 | typedef struct MapClient { |
2038 | void *opaque; | |
2039 | void (*callback)(void *opaque); | |
72cf2d4f | 2040 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
2041 | } MapClient; |
2042 | ||
72cf2d4f BS |
2043 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
2044 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 AL |
2045 | |
2046 | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) | |
2047 | { | |
7267c094 | 2048 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 AL |
2049 | |
2050 | client->opaque = opaque; | |
2051 | client->callback = callback; | |
72cf2d4f | 2052 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
ba223c29 AL |
2053 | return client; |
2054 | } | |
2055 | ||
8b9c99d9 | 2056 | static void cpu_unregister_map_client(void *_client) |
ba223c29 AL |
2057 | { |
2058 | MapClient *client = (MapClient *)_client; | |
2059 | ||
72cf2d4f | 2060 | QLIST_REMOVE(client, link); |
7267c094 | 2061 | g_free(client); |
ba223c29 AL |
2062 | } |
2063 | ||
2064 | static void cpu_notify_map_clients(void) | |
2065 | { | |
2066 | MapClient *client; | |
2067 | ||
72cf2d4f BS |
2068 | while (!QLIST_EMPTY(&map_client_list)) { |
2069 | client = QLIST_FIRST(&map_client_list); | |
ba223c29 | 2070 | client->callback(client->opaque); |
34d5e948 | 2071 | cpu_unregister_map_client(client); |
ba223c29 AL |
2072 | } |
2073 | } | |
2074 | ||
51644ab7 PB |
2075 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
2076 | { | |
2077 | MemoryRegionSection *section; | |
2078 | hwaddr l, xlat; | |
2079 | ||
2080 | while (len > 0) { | |
2081 | l = len; | |
2082 | section = address_space_translate(as, addr, &xlat, &l, is_write); | |
2083 | if (!memory_access_is_direct(section->mr, is_write)) { | |
2084 | l = memory_access_size(l, addr); | |
2085 | if (!memory_region_access_valid(section->mr, xlat, l, is_write)) { | |
2086 | return false; | |
2087 | } | |
2088 | } | |
2089 | ||
2090 | len -= l; | |
2091 | addr += l; | |
2092 | } | |
2093 | return true; | |
2094 | } | |
2095 | ||
6d16c2f8 AL |
2096 | /* Map a physical memory region into a host virtual address. |
2097 | * May map a subset of the requested range, given by and returned in *plen. | |
2098 | * May return NULL if resources needed to perform the mapping are exhausted. | |
2099 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
2100 | * Use cpu_register_map_client() to know when retrying the map operation is |
2101 | * likely to succeed. | |
6d16c2f8 | 2102 | */ |
ac1970fb | 2103 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
2104 | hwaddr addr, |
2105 | hwaddr *plen, | |
ac1970fb | 2106 | bool is_write) |
6d16c2f8 | 2107 | { |
a8170e5e AK |
2108 | hwaddr len = *plen; |
2109 | hwaddr todo = 0; | |
149f54b5 | 2110 | hwaddr l, xlat; |
f3705d53 | 2111 | MemoryRegionSection *section; |
f15fbc4b | 2112 | ram_addr_t raddr = RAM_ADDR_MAX; |
8ab934f9 SS |
2113 | ram_addr_t rlen; |
2114 | void *ret; | |
6d16c2f8 AL |
2115 | |
2116 | while (len > 0) { | |
149f54b5 PB |
2117 | l = len; |
2118 | section = address_space_translate(as, addr, &xlat, &l, is_write); | |
6d16c2f8 | 2119 | |
2bbfa05d | 2120 | if (!memory_access_is_direct(section->mr, is_write)) { |
38bee5dc | 2121 | if (todo || bounce.buffer) { |
6d16c2f8 AL |
2122 | break; |
2123 | } | |
2124 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); | |
2125 | bounce.addr = addr; | |
2126 | bounce.len = l; | |
2127 | if (!is_write) { | |
ac1970fb | 2128 | address_space_read(as, addr, bounce.buffer, l); |
6d16c2f8 | 2129 | } |
38bee5dc SS |
2130 | |
2131 | *plen = l; | |
2132 | return bounce.buffer; | |
6d16c2f8 | 2133 | } |
8ab934f9 | 2134 | if (!todo) { |
149f54b5 PB |
2135 | raddr = memory_region_get_ram_addr(section->mr) + xlat; |
2136 | } else { | |
2137 | if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) { | |
2138 | break; | |
2139 | } | |
8ab934f9 | 2140 | } |
6d16c2f8 AL |
2141 | |
2142 | len -= l; | |
2143 | addr += l; | |
38bee5dc | 2144 | todo += l; |
6d16c2f8 | 2145 | } |
8ab934f9 SS |
2146 | rlen = todo; |
2147 | ret = qemu_ram_ptr_length(raddr, &rlen); | |
2148 | *plen = rlen; | |
2149 | return ret; | |
6d16c2f8 AL |
2150 | } |
2151 | ||
ac1970fb | 2152 | /* Unmaps a memory region previously mapped by address_space_map(). |
6d16c2f8 AL |
2153 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
2154 | * the amount of memory that was actually read or written by the caller. | |
2155 | */ | |
a8170e5e AK |
2156 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
2157 | int is_write, hwaddr access_len) | |
6d16c2f8 AL |
2158 | { |
2159 | if (buffer != bounce.buffer) { | |
2160 | if (is_write) { | |
e890261f | 2161 | ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer); |
6d16c2f8 AL |
2162 | while (access_len) { |
2163 | unsigned l; | |
2164 | l = TARGET_PAGE_SIZE; | |
2165 | if (l > access_len) | |
2166 | l = access_len; | |
51d7a9eb | 2167 | invalidate_and_set_dirty(addr1, l); |
6d16c2f8 AL |
2168 | addr1 += l; |
2169 | access_len -= l; | |
2170 | } | |
2171 | } | |
868bb33f | 2172 | if (xen_enabled()) { |
e41d7c69 | 2173 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 2174 | } |
6d16c2f8 AL |
2175 | return; |
2176 | } | |
2177 | if (is_write) { | |
ac1970fb | 2178 | address_space_write(as, bounce.addr, bounce.buffer, access_len); |
6d16c2f8 | 2179 | } |
f8a83245 | 2180 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 2181 | bounce.buffer = NULL; |
ba223c29 | 2182 | cpu_notify_map_clients(); |
6d16c2f8 | 2183 | } |
d0ecd2aa | 2184 | |
a8170e5e AK |
2185 | void *cpu_physical_memory_map(hwaddr addr, |
2186 | hwaddr *plen, | |
ac1970fb AK |
2187 | int is_write) |
2188 | { | |
2189 | return address_space_map(&address_space_memory, addr, plen, is_write); | |
2190 | } | |
2191 | ||
a8170e5e AK |
2192 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
2193 | int is_write, hwaddr access_len) | |
ac1970fb AK |
2194 | { |
2195 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
2196 | } | |
2197 | ||
8df1cd07 | 2198 | /* warning: addr must be aligned */ |
a8170e5e | 2199 | static inline uint32_t ldl_phys_internal(hwaddr addr, |
1e78bcc1 | 2200 | enum device_endian endian) |
8df1cd07 | 2201 | { |
8df1cd07 | 2202 | uint8_t *ptr; |
791af8c8 | 2203 | uint64_t val; |
f3705d53 | 2204 | MemoryRegionSection *section; |
149f54b5 PB |
2205 | hwaddr l = 4; |
2206 | hwaddr addr1; | |
8df1cd07 | 2207 | |
149f54b5 PB |
2208 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2209 | false); | |
2bbfa05d | 2210 | if (l < 4 || !memory_access_is_direct(section->mr, false)) { |
8df1cd07 | 2211 | /* I/O case */ |
791af8c8 | 2212 | io_mem_read(section->mr, addr1, &val, 4); |
1e78bcc1 AG |
2213 | #if defined(TARGET_WORDS_BIGENDIAN) |
2214 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2215 | val = bswap32(val); | |
2216 | } | |
2217 | #else | |
2218 | if (endian == DEVICE_BIG_ENDIAN) { | |
2219 | val = bswap32(val); | |
2220 | } | |
2221 | #endif | |
8df1cd07 FB |
2222 | } else { |
2223 | /* RAM case */ | |
f3705d53 | 2224 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
06ef3525 | 2225 | & TARGET_PAGE_MASK) |
149f54b5 | 2226 | + addr1); |
1e78bcc1 AG |
2227 | switch (endian) { |
2228 | case DEVICE_LITTLE_ENDIAN: | |
2229 | val = ldl_le_p(ptr); | |
2230 | break; | |
2231 | case DEVICE_BIG_ENDIAN: | |
2232 | val = ldl_be_p(ptr); | |
2233 | break; | |
2234 | default: | |
2235 | val = ldl_p(ptr); | |
2236 | break; | |
2237 | } | |
8df1cd07 FB |
2238 | } |
2239 | return val; | |
2240 | } | |
2241 | ||
a8170e5e | 2242 | uint32_t ldl_phys(hwaddr addr) |
1e78bcc1 AG |
2243 | { |
2244 | return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2245 | } | |
2246 | ||
a8170e5e | 2247 | uint32_t ldl_le_phys(hwaddr addr) |
1e78bcc1 AG |
2248 | { |
2249 | return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2250 | } | |
2251 | ||
a8170e5e | 2252 | uint32_t ldl_be_phys(hwaddr addr) |
1e78bcc1 AG |
2253 | { |
2254 | return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2255 | } | |
2256 | ||
84b7b8e7 | 2257 | /* warning: addr must be aligned */ |
a8170e5e | 2258 | static inline uint64_t ldq_phys_internal(hwaddr addr, |
1e78bcc1 | 2259 | enum device_endian endian) |
84b7b8e7 | 2260 | { |
84b7b8e7 FB |
2261 | uint8_t *ptr; |
2262 | uint64_t val; | |
f3705d53 | 2263 | MemoryRegionSection *section; |
149f54b5 PB |
2264 | hwaddr l = 8; |
2265 | hwaddr addr1; | |
84b7b8e7 | 2266 | |
149f54b5 PB |
2267 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2268 | false); | |
2bbfa05d | 2269 | if (l < 8 || !memory_access_is_direct(section->mr, false)) { |
84b7b8e7 | 2270 | /* I/O case */ |
791af8c8 | 2271 | io_mem_read(section->mr, addr1, &val, 8); |
968a5627 PB |
2272 | #if defined(TARGET_WORDS_BIGENDIAN) |
2273 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2274 | val = bswap64(val); | |
2275 | } | |
2276 | #else | |
2277 | if (endian == DEVICE_BIG_ENDIAN) { | |
2278 | val = bswap64(val); | |
2279 | } | |
84b7b8e7 FB |
2280 | #endif |
2281 | } else { | |
2282 | /* RAM case */ | |
f3705d53 | 2283 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
06ef3525 | 2284 | & TARGET_PAGE_MASK) |
149f54b5 | 2285 | + addr1); |
1e78bcc1 AG |
2286 | switch (endian) { |
2287 | case DEVICE_LITTLE_ENDIAN: | |
2288 | val = ldq_le_p(ptr); | |
2289 | break; | |
2290 | case DEVICE_BIG_ENDIAN: | |
2291 | val = ldq_be_p(ptr); | |
2292 | break; | |
2293 | default: | |
2294 | val = ldq_p(ptr); | |
2295 | break; | |
2296 | } | |
84b7b8e7 FB |
2297 | } |
2298 | return val; | |
2299 | } | |
2300 | ||
a8170e5e | 2301 | uint64_t ldq_phys(hwaddr addr) |
1e78bcc1 AG |
2302 | { |
2303 | return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2304 | } | |
2305 | ||
a8170e5e | 2306 | uint64_t ldq_le_phys(hwaddr addr) |
1e78bcc1 AG |
2307 | { |
2308 | return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2309 | } | |
2310 | ||
a8170e5e | 2311 | uint64_t ldq_be_phys(hwaddr addr) |
1e78bcc1 AG |
2312 | { |
2313 | return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2314 | } | |
2315 | ||
aab33094 | 2316 | /* XXX: optimize */ |
a8170e5e | 2317 | uint32_t ldub_phys(hwaddr addr) |
aab33094 FB |
2318 | { |
2319 | uint8_t val; | |
2320 | cpu_physical_memory_read(addr, &val, 1); | |
2321 | return val; | |
2322 | } | |
2323 | ||
733f0b02 | 2324 | /* warning: addr must be aligned */ |
a8170e5e | 2325 | static inline uint32_t lduw_phys_internal(hwaddr addr, |
1e78bcc1 | 2326 | enum device_endian endian) |
aab33094 | 2327 | { |
733f0b02 MT |
2328 | uint8_t *ptr; |
2329 | uint64_t val; | |
f3705d53 | 2330 | MemoryRegionSection *section; |
149f54b5 PB |
2331 | hwaddr l = 2; |
2332 | hwaddr addr1; | |
733f0b02 | 2333 | |
149f54b5 PB |
2334 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2335 | false); | |
2bbfa05d | 2336 | if (l < 2 || !memory_access_is_direct(section->mr, false)) { |
733f0b02 | 2337 | /* I/O case */ |
791af8c8 | 2338 | io_mem_read(section->mr, addr1, &val, 2); |
1e78bcc1 AG |
2339 | #if defined(TARGET_WORDS_BIGENDIAN) |
2340 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2341 | val = bswap16(val); | |
2342 | } | |
2343 | #else | |
2344 | if (endian == DEVICE_BIG_ENDIAN) { | |
2345 | val = bswap16(val); | |
2346 | } | |
2347 | #endif | |
733f0b02 MT |
2348 | } else { |
2349 | /* RAM case */ | |
f3705d53 | 2350 | ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr) |
06ef3525 | 2351 | & TARGET_PAGE_MASK) |
149f54b5 | 2352 | + addr1); |
1e78bcc1 AG |
2353 | switch (endian) { |
2354 | case DEVICE_LITTLE_ENDIAN: | |
2355 | val = lduw_le_p(ptr); | |
2356 | break; | |
2357 | case DEVICE_BIG_ENDIAN: | |
2358 | val = lduw_be_p(ptr); | |
2359 | break; | |
2360 | default: | |
2361 | val = lduw_p(ptr); | |
2362 | break; | |
2363 | } | |
733f0b02 MT |
2364 | } |
2365 | return val; | |
aab33094 FB |
2366 | } |
2367 | ||
a8170e5e | 2368 | uint32_t lduw_phys(hwaddr addr) |
1e78bcc1 AG |
2369 | { |
2370 | return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN); | |
2371 | } | |
2372 | ||
a8170e5e | 2373 | uint32_t lduw_le_phys(hwaddr addr) |
1e78bcc1 AG |
2374 | { |
2375 | return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN); | |
2376 | } | |
2377 | ||
a8170e5e | 2378 | uint32_t lduw_be_phys(hwaddr addr) |
1e78bcc1 AG |
2379 | { |
2380 | return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN); | |
2381 | } | |
2382 | ||
8df1cd07 FB |
2383 | /* warning: addr must be aligned. The ram page is not masked as dirty |
2384 | and the code inside is not invalidated. It is useful if the dirty | |
2385 | bits are used to track modified PTEs */ | |
a8170e5e | 2386 | void stl_phys_notdirty(hwaddr addr, uint32_t val) |
8df1cd07 | 2387 | { |
8df1cd07 | 2388 | uint8_t *ptr; |
f3705d53 | 2389 | MemoryRegionSection *section; |
149f54b5 PB |
2390 | hwaddr l = 4; |
2391 | hwaddr addr1; | |
8df1cd07 | 2392 | |
149f54b5 PB |
2393 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2394 | true); | |
2bbfa05d | 2395 | if (l < 4 || !memory_access_is_direct(section->mr, true)) { |
149f54b5 | 2396 | io_mem_write(section->mr, addr1, val, 4); |
8df1cd07 | 2397 | } else { |
149f54b5 | 2398 | addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2399 | ptr = qemu_get_ram_ptr(addr1); |
8df1cd07 | 2400 | stl_p(ptr, val); |
74576198 AL |
2401 | |
2402 | if (unlikely(in_migration)) { | |
2403 | if (!cpu_physical_memory_is_dirty(addr1)) { | |
2404 | /* invalidate code */ | |
2405 | tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); | |
2406 | /* set dirty bit */ | |
f7c11b53 YT |
2407 | cpu_physical_memory_set_dirty_flags( |
2408 | addr1, (0xff & ~CODE_DIRTY_FLAG)); | |
74576198 AL |
2409 | } |
2410 | } | |
8df1cd07 FB |
2411 | } |
2412 | } | |
2413 | ||
2414 | /* warning: addr must be aligned */ | |
a8170e5e | 2415 | static inline void stl_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2416 | enum device_endian endian) |
8df1cd07 | 2417 | { |
8df1cd07 | 2418 | uint8_t *ptr; |
f3705d53 | 2419 | MemoryRegionSection *section; |
149f54b5 PB |
2420 | hwaddr l = 4; |
2421 | hwaddr addr1; | |
8df1cd07 | 2422 | |
149f54b5 PB |
2423 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2424 | true); | |
2bbfa05d | 2425 | if (l < 4 || !memory_access_is_direct(section->mr, true)) { |
1e78bcc1 AG |
2426 | #if defined(TARGET_WORDS_BIGENDIAN) |
2427 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2428 | val = bswap32(val); | |
2429 | } | |
2430 | #else | |
2431 | if (endian == DEVICE_BIG_ENDIAN) { | |
2432 | val = bswap32(val); | |
2433 | } | |
2434 | #endif | |
149f54b5 | 2435 | io_mem_write(section->mr, addr1, val, 4); |
8df1cd07 | 2436 | } else { |
8df1cd07 | 2437 | /* RAM case */ |
149f54b5 | 2438 | addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; |
5579c7f3 | 2439 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2440 | switch (endian) { |
2441 | case DEVICE_LITTLE_ENDIAN: | |
2442 | stl_le_p(ptr, val); | |
2443 | break; | |
2444 | case DEVICE_BIG_ENDIAN: | |
2445 | stl_be_p(ptr, val); | |
2446 | break; | |
2447 | default: | |
2448 | stl_p(ptr, val); | |
2449 | break; | |
2450 | } | |
51d7a9eb | 2451 | invalidate_and_set_dirty(addr1, 4); |
8df1cd07 FB |
2452 | } |
2453 | } | |
2454 | ||
a8170e5e | 2455 | void stl_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2456 | { |
2457 | stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2458 | } | |
2459 | ||
a8170e5e | 2460 | void stl_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2461 | { |
2462 | stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2463 | } | |
2464 | ||
a8170e5e | 2465 | void stl_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2466 | { |
2467 | stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2468 | } | |
2469 | ||
aab33094 | 2470 | /* XXX: optimize */ |
a8170e5e | 2471 | void stb_phys(hwaddr addr, uint32_t val) |
aab33094 FB |
2472 | { |
2473 | uint8_t v = val; | |
2474 | cpu_physical_memory_write(addr, &v, 1); | |
2475 | } | |
2476 | ||
733f0b02 | 2477 | /* warning: addr must be aligned */ |
a8170e5e | 2478 | static inline void stw_phys_internal(hwaddr addr, uint32_t val, |
1e78bcc1 | 2479 | enum device_endian endian) |
aab33094 | 2480 | { |
733f0b02 | 2481 | uint8_t *ptr; |
f3705d53 | 2482 | MemoryRegionSection *section; |
149f54b5 PB |
2483 | hwaddr l = 2; |
2484 | hwaddr addr1; | |
733f0b02 | 2485 | |
149f54b5 PB |
2486 | section = address_space_translate(&address_space_memory, addr, &addr1, &l, |
2487 | true); | |
2bbfa05d | 2488 | if (l < 2 || !memory_access_is_direct(section->mr, true)) { |
1e78bcc1 AG |
2489 | #if defined(TARGET_WORDS_BIGENDIAN) |
2490 | if (endian == DEVICE_LITTLE_ENDIAN) { | |
2491 | val = bswap16(val); | |
2492 | } | |
2493 | #else | |
2494 | if (endian == DEVICE_BIG_ENDIAN) { | |
2495 | val = bswap16(val); | |
2496 | } | |
2497 | #endif | |
149f54b5 | 2498 | io_mem_write(section->mr, addr1, val, 2); |
733f0b02 | 2499 | } else { |
733f0b02 | 2500 | /* RAM case */ |
149f54b5 | 2501 | addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK; |
733f0b02 | 2502 | ptr = qemu_get_ram_ptr(addr1); |
1e78bcc1 AG |
2503 | switch (endian) { |
2504 | case DEVICE_LITTLE_ENDIAN: | |
2505 | stw_le_p(ptr, val); | |
2506 | break; | |
2507 | case DEVICE_BIG_ENDIAN: | |
2508 | stw_be_p(ptr, val); | |
2509 | break; | |
2510 | default: | |
2511 | stw_p(ptr, val); | |
2512 | break; | |
2513 | } | |
51d7a9eb | 2514 | invalidate_and_set_dirty(addr1, 2); |
733f0b02 | 2515 | } |
aab33094 FB |
2516 | } |
2517 | ||
a8170e5e | 2518 | void stw_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2519 | { |
2520 | stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN); | |
2521 | } | |
2522 | ||
a8170e5e | 2523 | void stw_le_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2524 | { |
2525 | stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN); | |
2526 | } | |
2527 | ||
a8170e5e | 2528 | void stw_be_phys(hwaddr addr, uint32_t val) |
1e78bcc1 AG |
2529 | { |
2530 | stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN); | |
2531 | } | |
2532 | ||
aab33094 | 2533 | /* XXX: optimize */ |
a8170e5e | 2534 | void stq_phys(hwaddr addr, uint64_t val) |
aab33094 FB |
2535 | { |
2536 | val = tswap64(val); | |
71d2b725 | 2537 | cpu_physical_memory_write(addr, &val, 8); |
aab33094 FB |
2538 | } |
2539 | ||
a8170e5e | 2540 | void stq_le_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2541 | { |
2542 | val = cpu_to_le64(val); | |
2543 | cpu_physical_memory_write(addr, &val, 8); | |
2544 | } | |
2545 | ||
a8170e5e | 2546 | void stq_be_phys(hwaddr addr, uint64_t val) |
1e78bcc1 AG |
2547 | { |
2548 | val = cpu_to_be64(val); | |
2549 | cpu_physical_memory_write(addr, &val, 8); | |
2550 | } | |
2551 | ||
5e2972fd | 2552 | /* virtual memory access for debug (includes writing to ROM) */ |
9349b4f9 | 2553 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, |
b448f2f3 | 2554 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
2555 | { |
2556 | int l; | |
a8170e5e | 2557 | hwaddr phys_addr; |
9b3c35e0 | 2558 | target_ulong page; |
13eb76e0 FB |
2559 | |
2560 | while (len > 0) { | |
2561 | page = addr & TARGET_PAGE_MASK; | |
2562 | phys_addr = cpu_get_phys_page_debug(env, page); | |
2563 | /* if no physical page mapped, return an error */ | |
2564 | if (phys_addr == -1) | |
2565 | return -1; | |
2566 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2567 | if (l > len) | |
2568 | l = len; | |
5e2972fd | 2569 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
5e2972fd AL |
2570 | if (is_write) |
2571 | cpu_physical_memory_write_rom(phys_addr, buf, l); | |
2572 | else | |
5e2972fd | 2573 | cpu_physical_memory_rw(phys_addr, buf, l, is_write); |
13eb76e0 FB |
2574 | len -= l; |
2575 | buf += l; | |
2576 | addr += l; | |
2577 | } | |
2578 | return 0; | |
2579 | } | |
a68fe89c | 2580 | #endif |
13eb76e0 | 2581 | |
8e4a424b BS |
2582 | #if !defined(CONFIG_USER_ONLY) |
2583 | ||
2584 | /* | |
2585 | * A helper function for the _utterly broken_ virtio device model to find out if | |
2586 | * it's running on a big endian machine. Don't do this at home kids! | |
2587 | */ | |
2588 | bool virtio_is_big_endian(void); | |
2589 | bool virtio_is_big_endian(void) | |
2590 | { | |
2591 | #if defined(TARGET_WORDS_BIGENDIAN) | |
2592 | return true; | |
2593 | #else | |
2594 | return false; | |
2595 | #endif | |
2596 | } | |
2597 | ||
2598 | #endif | |
2599 | ||
76f35538 | 2600 | #ifndef CONFIG_USER_ONLY |
a8170e5e | 2601 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 WC |
2602 | { |
2603 | MemoryRegionSection *section; | |
149f54b5 | 2604 | hwaddr l = 1; |
76f35538 | 2605 | |
149f54b5 PB |
2606 | section = address_space_translate(&address_space_memory, |
2607 | phys_addr, &phys_addr, &l, false); | |
76f35538 WC |
2608 | |
2609 | return !(memory_region_is_ram(section->mr) || | |
2610 | memory_region_is_romd(section->mr)); | |
2611 | } | |
2612 | #endif |