]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
virtio: add subsections to the migration stream
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
376 iotlb = mr->iommu_ops->translate(mr, addr);
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
1a1562f5 433const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
434 .name = "cpu_common",
435 .version_id = 1,
436 .minimum_version_id = 1,
5b6dd868 437 .post_load = cpu_common_post_load,
35d08458 438 .fields = (VMStateField[]) {
259186a7
AF
439 VMSTATE_UINT32(halted, CPUState),
440 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
441 VMSTATE_END_OF_LIST()
442 }
443};
1a1562f5 444
5b6dd868 445#endif
ea041c0e 446
38d8f5c8 447CPUState *qemu_get_cpu(int index)
ea041c0e 448{
bdc44640 449 CPUState *cpu;
ea041c0e 450
bdc44640 451 CPU_FOREACH(cpu) {
55e5c285 452 if (cpu->cpu_index == index) {
bdc44640 453 return cpu;
55e5c285 454 }
ea041c0e 455 }
5b6dd868 456
bdc44640 457 return NULL;
ea041c0e
FB
458}
459
09daed84
EI
460#if !defined(CONFIG_USER_ONLY)
461void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
462{
463 /* We only support one address space per cpu at the moment. */
464 assert(cpu->as == as);
465
466 if (cpu->tcg_as_listener) {
467 memory_listener_unregister(cpu->tcg_as_listener);
468 } else {
469 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
470 }
471 cpu->tcg_as_listener->commit = tcg_commit;
472 memory_listener_register(cpu->tcg_as_listener, as);
473}
474#endif
475
5b6dd868 476void cpu_exec_init(CPUArchState *env)
ea041c0e 477{
5b6dd868 478 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 479 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 480 CPUState *some_cpu;
5b6dd868
BS
481 int cpu_index;
482
483#if defined(CONFIG_USER_ONLY)
484 cpu_list_lock();
485#endif
5b6dd868 486 cpu_index = 0;
bdc44640 487 CPU_FOREACH(some_cpu) {
5b6dd868
BS
488 cpu_index++;
489 }
55e5c285 490 cpu->cpu_index = cpu_index;
1b1ed8dc 491 cpu->numa_node = 0;
f0c3c505 492 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 493 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 494#ifndef CONFIG_USER_ONLY
09daed84 495 cpu->as = &address_space_memory;
5b6dd868
BS
496 cpu->thread_id = qemu_get_thread_id();
497#endif
bdc44640 498 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
499#if defined(CONFIG_USER_ONLY)
500 cpu_list_unlock();
501#endif
e0d47944
AF
502 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
503 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
504 }
5b6dd868 505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
506 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
507 cpu_save, cpu_load, env);
b170fce3 508 assert(cc->vmsd == NULL);
e0d47944 509 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 510#endif
b170fce3
AF
511 if (cc->vmsd != NULL) {
512 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
513 }
ea041c0e
FB
514}
515
1fddef4b 516#if defined(TARGET_HAS_ICE)
94df27fd 517#if defined(CONFIG_USER_ONLY)
00b941e5 518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
519{
520 tb_invalidate_phys_page_range(pc, pc + 1, 0);
521}
522#else
00b941e5 523static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 524{
e8262a1b
MF
525 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
526 if (phys != -1) {
09daed84 527 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 528 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 529 }
1e7855a5 530}
c27004ec 531#endif
94df27fd 532#endif /* TARGET_HAS_ICE */
d720b93d 533
c527ee8f 534#if defined(CONFIG_USER_ONLY)
75a34036 535void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
536
537{
538}
539
75a34036 540int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
541 int flags, CPUWatchpoint **watchpoint)
542{
543 return -ENOSYS;
544}
545#else
6658ffb8 546/* Add a watchpoint. */
75a34036 547int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 548 int flags, CPUWatchpoint **watchpoint)
6658ffb8 549{
75a34036 550 vaddr len_mask = ~(len - 1);
c0ce998e 551 CPUWatchpoint *wp;
6658ffb8 552
b4051334 553 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
554 if ((len & (len - 1)) || (addr & ~len_mask) ||
555 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
556 error_report("tried to set invalid watchpoint at %"
557 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
558 return -EINVAL;
559 }
7267c094 560 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
561
562 wp->vaddr = addr;
b4051334 563 wp->len_mask = len_mask;
a1d1bb31
AL
564 wp->flags = flags;
565
2dc9f411 566 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
567 if (flags & BP_GDB) {
568 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
569 } else {
570 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
571 }
6658ffb8 572
31b030d4 573 tlb_flush_page(cpu, addr);
a1d1bb31
AL
574
575 if (watchpoint)
576 *watchpoint = wp;
577 return 0;
6658ffb8
PB
578}
579
a1d1bb31 580/* Remove a specific watchpoint. */
75a34036 581int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 582 int flags)
6658ffb8 583{
75a34036 584 vaddr len_mask = ~(len - 1);
a1d1bb31 585 CPUWatchpoint *wp;
6658ffb8 586
ff4700b0 587 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 588 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 589 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 590 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
591 return 0;
592 }
593 }
a1d1bb31 594 return -ENOENT;
6658ffb8
PB
595}
596
a1d1bb31 597/* Remove a specific watchpoint by reference. */
75a34036 598void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 599{
ff4700b0 600 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 601
31b030d4 602 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 603
7267c094 604 g_free(watchpoint);
a1d1bb31
AL
605}
606
607/* Remove all matching watchpoints. */
75a34036 608void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 609{
c0ce998e 610 CPUWatchpoint *wp, *next;
a1d1bb31 611
ff4700b0 612 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
613 if (wp->flags & mask) {
614 cpu_watchpoint_remove_by_ref(cpu, wp);
615 }
c0ce998e 616 }
7d03f82f 617}
c527ee8f 618#endif
7d03f82f 619
a1d1bb31 620/* Add a breakpoint. */
b3310ab3 621int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 622 CPUBreakpoint **breakpoint)
4c3a88a2 623{
1fddef4b 624#if defined(TARGET_HAS_ICE)
c0ce998e 625 CPUBreakpoint *bp;
3b46e624 626
7267c094 627 bp = g_malloc(sizeof(*bp));
4c3a88a2 628
a1d1bb31
AL
629 bp->pc = pc;
630 bp->flags = flags;
631
2dc9f411 632 /* keep all GDB-injected breakpoints in front */
00b941e5 633 if (flags & BP_GDB) {
f0c3c505 634 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 635 } else {
f0c3c505 636 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 637 }
3b46e624 638
f0c3c505 639 breakpoint_invalidate(cpu, pc);
a1d1bb31 640
00b941e5 641 if (breakpoint) {
a1d1bb31 642 *breakpoint = bp;
00b941e5 643 }
4c3a88a2
FB
644 return 0;
645#else
a1d1bb31 646 return -ENOSYS;
4c3a88a2
FB
647#endif
648}
649
a1d1bb31 650/* Remove a specific breakpoint. */
b3310ab3 651int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 652{
7d03f82f 653#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
654 CPUBreakpoint *bp;
655
f0c3c505 656 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 657 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 658 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
659 return 0;
660 }
7d03f82f 661 }
a1d1bb31
AL
662 return -ENOENT;
663#else
664 return -ENOSYS;
7d03f82f
EI
665#endif
666}
667
a1d1bb31 668/* Remove a specific breakpoint by reference. */
b3310ab3 669void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 670{
1fddef4b 671#if defined(TARGET_HAS_ICE)
f0c3c505
AF
672 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
673
674 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 675
7267c094 676 g_free(breakpoint);
a1d1bb31
AL
677#endif
678}
679
680/* Remove all matching breakpoints. */
b3310ab3 681void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
682{
683#if defined(TARGET_HAS_ICE)
c0ce998e 684 CPUBreakpoint *bp, *next;
a1d1bb31 685
f0c3c505 686 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
687 if (bp->flags & mask) {
688 cpu_breakpoint_remove_by_ref(cpu, bp);
689 }
c0ce998e 690 }
4c3a88a2
FB
691#endif
692}
693
c33a346e
FB
694/* enable or disable single step mode. EXCP_DEBUG is returned by the
695 CPU loop after each instruction */
3825b28f 696void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 697{
1fddef4b 698#if defined(TARGET_HAS_ICE)
ed2803da
AF
699 if (cpu->singlestep_enabled != enabled) {
700 cpu->singlestep_enabled = enabled;
701 if (kvm_enabled()) {
38e478ec 702 kvm_update_guest_debug(cpu, 0);
ed2803da 703 } else {
ccbb4d44 704 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 705 /* XXX: only flush what is necessary */
38e478ec 706 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
707 tb_flush(env);
708 }
c33a346e
FB
709 }
710#endif
711}
712
a47dddd7 713void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
714{
715 va_list ap;
493ae1f0 716 va_list ap2;
7501267e
FB
717
718 va_start(ap, fmt);
493ae1f0 719 va_copy(ap2, ap);
7501267e
FB
720 fprintf(stderr, "qemu: fatal: ");
721 vfprintf(stderr, fmt, ap);
722 fprintf(stderr, "\n");
878096ee 723 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
724 if (qemu_log_enabled()) {
725 qemu_log("qemu: fatal: ");
726 qemu_log_vprintf(fmt, ap2);
727 qemu_log("\n");
a0762859 728 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 729 qemu_log_flush();
93fcfe39 730 qemu_log_close();
924edcae 731 }
493ae1f0 732 va_end(ap2);
f9373291 733 va_end(ap);
fd052bf6
RV
734#if defined(CONFIG_USER_ONLY)
735 {
736 struct sigaction act;
737 sigfillset(&act.sa_mask);
738 act.sa_handler = SIG_DFL;
739 sigaction(SIGABRT, &act, NULL);
740 }
741#endif
7501267e
FB
742 abort();
743}
744
0124311e 745#if !defined(CONFIG_USER_ONLY)
041603fe
PB
746static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
747{
748 RAMBlock *block;
749
750 /* The list is protected by the iothread lock here. */
751 block = ram_list.mru_block;
752 if (block && addr - block->offset < block->length) {
753 goto found;
754 }
755 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
756 if (addr - block->offset < block->length) {
757 goto found;
758 }
759 }
760
761 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
762 abort();
763
764found:
765 ram_list.mru_block = block;
766 return block;
767}
768
a2f4d5be 769static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 770{
041603fe 771 ram_addr_t start1;
a2f4d5be
JQ
772 RAMBlock *block;
773 ram_addr_t end;
774
775 end = TARGET_PAGE_ALIGN(start + length);
776 start &= TARGET_PAGE_MASK;
d24981d3 777
041603fe
PB
778 block = qemu_get_ram_block(start);
779 assert(block == qemu_get_ram_block(end - 1));
780 start1 = (uintptr_t)block->host + (start - block->offset);
781 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
782}
783
5579c7f3 784/* Note: start and end must be within the same ram block. */
a2f4d5be 785void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 786 unsigned client)
1ccde1cb 787{
1ccde1cb
FB
788 if (length == 0)
789 return;
ace694cc 790 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 791
d24981d3 792 if (tcg_enabled()) {
a2f4d5be 793 tlb_reset_dirty_range_all(start, length);
5579c7f3 794 }
1ccde1cb
FB
795}
796
981fdf23 797static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
798{
799 in_migration = enable;
74576198
AL
800}
801
bb0e627a 802hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
803 MemoryRegionSection *section,
804 target_ulong vaddr,
805 hwaddr paddr, hwaddr xlat,
806 int prot,
807 target_ulong *address)
e5548617 808{
a8170e5e 809 hwaddr iotlb;
e5548617
BS
810 CPUWatchpoint *wp;
811
cc5bea60 812 if (memory_region_is_ram(section->mr)) {
e5548617
BS
813 /* Normal RAM. */
814 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 815 + xlat;
e5548617 816 if (!section->readonly) {
b41aac4f 817 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 818 } else {
b41aac4f 819 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
820 }
821 } else {
1b3fb98f 822 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 823 iotlb += xlat;
e5548617
BS
824 }
825
826 /* Make accesses to pages with watchpoints go via the
827 watchpoint trap routines. */
ff4700b0 828 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
829 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
830 /* Avoid trapping reads of pages with a write breakpoint. */
831 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 832 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
833 *address |= TLB_MMIO;
834 break;
835 }
836 }
837 }
838
839 return iotlb;
840}
9fa3e853
FB
841#endif /* defined(CONFIG_USER_ONLY) */
842
e2eef170 843#if !defined(CONFIG_USER_ONLY)
8da3ff18 844
c227f099 845static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 846 uint16_t section);
acc9d80b 847static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 848
575ddeb4 849static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
850
851/*
852 * Set a custom physical guest memory alloator.
853 * Accelerators with unusual needs may need this. Hopefully, we can
854 * get rid of it eventually.
855 */
575ddeb4 856void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
857{
858 phys_mem_alloc = alloc;
859}
860
53cb28cb
MA
861static uint16_t phys_section_add(PhysPageMap *map,
862 MemoryRegionSection *section)
5312bd8b 863{
68f3f65b
PB
864 /* The physical section number is ORed with a page-aligned
865 * pointer to produce the iotlb entries. Thus it should
866 * never overflow into the page-aligned value.
867 */
53cb28cb 868 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 869
53cb28cb
MA
870 if (map->sections_nb == map->sections_nb_alloc) {
871 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
872 map->sections = g_renew(MemoryRegionSection, map->sections,
873 map->sections_nb_alloc);
5312bd8b 874 }
53cb28cb 875 map->sections[map->sections_nb] = *section;
dfde4e6e 876 memory_region_ref(section->mr);
53cb28cb 877 return map->sections_nb++;
5312bd8b
AK
878}
879
058bc4b5
PB
880static void phys_section_destroy(MemoryRegion *mr)
881{
dfde4e6e
PB
882 memory_region_unref(mr);
883
058bc4b5
PB
884 if (mr->subpage) {
885 subpage_t *subpage = container_of(mr, subpage_t, iomem);
886 memory_region_destroy(&subpage->iomem);
887 g_free(subpage);
888 }
889}
890
6092666e 891static void phys_sections_free(PhysPageMap *map)
5312bd8b 892{
9affd6fc
PB
893 while (map->sections_nb > 0) {
894 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
895 phys_section_destroy(section->mr);
896 }
9affd6fc
PB
897 g_free(map->sections);
898 g_free(map->nodes);
5312bd8b
AK
899}
900
ac1970fb 901static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
902{
903 subpage_t *subpage;
a8170e5e 904 hwaddr base = section->offset_within_address_space
0f0cb164 905 & TARGET_PAGE_MASK;
97115a8d 906 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 907 d->map.nodes, d->map.sections);
0f0cb164
AK
908 MemoryRegionSection subsection = {
909 .offset_within_address_space = base,
052e87b0 910 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 911 };
a8170e5e 912 hwaddr start, end;
0f0cb164 913
f3705d53 914 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 915
f3705d53 916 if (!(existing->mr->subpage)) {
acc9d80b 917 subpage = subpage_init(d->as, base);
3be91e86 918 subsection.address_space = d->as;
0f0cb164 919 subsection.mr = &subpage->iomem;
ac1970fb 920 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 921 phys_section_add(&d->map, &subsection));
0f0cb164 922 } else {
f3705d53 923 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
924 }
925 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 926 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
927 subpage_register(subpage, start, end,
928 phys_section_add(&d->map, section));
0f0cb164
AK
929}
930
931
052e87b0
PB
932static void register_multipage(AddressSpaceDispatch *d,
933 MemoryRegionSection *section)
33417e70 934{
a8170e5e 935 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 936 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
937 uint64_t num_pages = int128_get64(int128_rshift(section->size,
938 TARGET_PAGE_BITS));
dd81124b 939
733d5ef5
PB
940 assert(num_pages);
941 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
942}
943
ac1970fb 944static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 945{
89ae337a 946 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 947 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 948 MemoryRegionSection now = *section, remain = *section;
052e87b0 949 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 950
733d5ef5
PB
951 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
952 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
953 - now.offset_within_address_space;
954
052e87b0 955 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 956 register_subpage(d, &now);
733d5ef5 957 } else {
052e87b0 958 now.size = int128_zero();
733d5ef5 959 }
052e87b0
PB
960 while (int128_ne(remain.size, now.size)) {
961 remain.size = int128_sub(remain.size, now.size);
962 remain.offset_within_address_space += int128_get64(now.size);
963 remain.offset_within_region += int128_get64(now.size);
69b67646 964 now = remain;
052e87b0 965 if (int128_lt(remain.size, page_size)) {
733d5ef5 966 register_subpage(d, &now);
88266249 967 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 968 now.size = page_size;
ac1970fb 969 register_subpage(d, &now);
69b67646 970 } else {
052e87b0 971 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 972 register_multipage(d, &now);
69b67646 973 }
0f0cb164
AK
974 }
975}
976
62a2744c
SY
977void qemu_flush_coalesced_mmio_buffer(void)
978{
979 if (kvm_enabled())
980 kvm_flush_coalesced_mmio_buffer();
981}
982
b2a8658e
UD
983void qemu_mutex_lock_ramlist(void)
984{
985 qemu_mutex_lock(&ram_list.mutex);
986}
987
988void qemu_mutex_unlock_ramlist(void)
989{
990 qemu_mutex_unlock(&ram_list.mutex);
991}
992
e1e84ba0 993#ifdef __linux__
c902760f
MT
994
995#include <sys/vfs.h>
996
997#define HUGETLBFS_MAGIC 0x958458f6
998
999static long gethugepagesize(const char *path)
1000{
1001 struct statfs fs;
1002 int ret;
1003
1004 do {
9742bf26 1005 ret = statfs(path, &fs);
c902760f
MT
1006 } while (ret != 0 && errno == EINTR);
1007
1008 if (ret != 0) {
9742bf26
YT
1009 perror(path);
1010 return 0;
c902760f
MT
1011 }
1012
1013 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1014 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1015
1016 return fs.f_bsize;
1017}
1018
04b16653
AW
1019static void *file_ram_alloc(RAMBlock *block,
1020 ram_addr_t memory,
7f56e740
PB
1021 const char *path,
1022 Error **errp)
c902760f
MT
1023{
1024 char *filename;
8ca761f6
PF
1025 char *sanitized_name;
1026 char *c;
c902760f
MT
1027 void *area;
1028 int fd;
c902760f
MT
1029 unsigned long hpagesize;
1030
1031 hpagesize = gethugepagesize(path);
1032 if (!hpagesize) {
f9a49dfa 1033 goto error;
c902760f
MT
1034 }
1035
1036 if (memory < hpagesize) {
1037 return NULL;
1038 }
1039
1040 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1041 error_setg(errp,
1042 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1043 goto error;
c902760f
MT
1044 }
1045
8ca761f6
PF
1046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name = g_strdup(block->mr->name);
1048 for (c = sanitized_name; *c != '\0'; c++) {
1049 if (*c == '/')
1050 *c = '_';
1051 }
1052
1053 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1054 sanitized_name);
1055 g_free(sanitized_name);
c902760f
MT
1056
1057 fd = mkstemp(filename);
1058 if (fd < 0) {
7f56e740
PB
1059 error_setg_errno(errp, errno,
1060 "unable to create backing store for hugepages");
e4ada482 1061 g_free(filename);
f9a49dfa 1062 goto error;
c902760f
MT
1063 }
1064 unlink(filename);
e4ada482 1065 g_free(filename);
c902760f
MT
1066
1067 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1068
1069 /*
1070 * ftruncate is not supported by hugetlbfs in older
1071 * hosts, so don't bother bailing out on errors.
1072 * If anything goes wrong with it under other filesystems,
1073 * mmap will fail.
1074 */
7f56e740 1075 if (ftruncate(fd, memory)) {
9742bf26 1076 perror("ftruncate");
7f56e740 1077 }
c902760f 1078
dbcb8981
PB
1079 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1080 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1081 fd, 0);
c902760f 1082 if (area == MAP_FAILED) {
7f56e740
PB
1083 error_setg_errno(errp, errno,
1084 "unable to map backing store for hugepages");
9742bf26 1085 close(fd);
f9a49dfa 1086 goto error;
c902760f 1087 }
ef36fa14
MT
1088
1089 if (mem_prealloc) {
38183310 1090 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1091 }
1092
04b16653 1093 block->fd = fd;
c902760f 1094 return area;
f9a49dfa
MT
1095
1096error:
1097 if (mem_prealloc) {
1098 exit(1);
1099 }
1100 return NULL;
c902760f
MT
1101}
1102#endif
1103
d17b5288 1104static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1105{
1106 RAMBlock *block, *next_block;
3e837b2c 1107 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1108
49cd9ac6
SH
1109 assert(size != 0); /* it would hand out same offset multiple times */
1110
a3161038 1111 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1112 return 0;
1113
a3161038 1114 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1115 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1116
1117 end = block->offset + block->length;
1118
a3161038 1119 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1120 if (next_block->offset >= end) {
1121 next = MIN(next, next_block->offset);
1122 }
1123 }
1124 if (next - end >= size && next - end < mingap) {
3e837b2c 1125 offset = end;
04b16653
AW
1126 mingap = next - end;
1127 }
1128 }
3e837b2c
AW
1129
1130 if (offset == RAM_ADDR_MAX) {
1131 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1132 (uint64_t)size);
1133 abort();
1134 }
1135
04b16653
AW
1136 return offset;
1137}
1138
652d7ec2 1139ram_addr_t last_ram_offset(void)
d17b5288
AW
1140{
1141 RAMBlock *block;
1142 ram_addr_t last = 0;
1143
a3161038 1144 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1145 last = MAX(last, block->offset + block->length);
1146
1147 return last;
1148}
1149
ddb97f1d
JB
1150static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1151{
1152 int ret;
ddb97f1d
JB
1153
1154 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1155 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1156 "dump-guest-core", true)) {
ddb97f1d
JB
1157 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1158 if (ret) {
1159 perror("qemu_madvise");
1160 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1161 "but dump_guest_core=off specified\n");
1162 }
1163 }
1164}
1165
20cfe881 1166static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1167{
20cfe881 1168 RAMBlock *block;
84b89d78 1169
a3161038 1170 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1171 if (block->offset == addr) {
20cfe881 1172 return block;
c5705a77
AK
1173 }
1174 }
20cfe881
HT
1175
1176 return NULL;
1177}
1178
1179void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1180{
1181 RAMBlock *new_block = find_ram_block(addr);
1182 RAMBlock *block;
1183
c5705a77
AK
1184 assert(new_block);
1185 assert(!new_block->idstr[0]);
84b89d78 1186
09e5ab63
AL
1187 if (dev) {
1188 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1189 if (id) {
1190 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1191 g_free(id);
84b89d78
CM
1192 }
1193 }
1194 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1195
b2a8658e
UD
1196 /* This assumes the iothread lock is taken here too. */
1197 qemu_mutex_lock_ramlist();
a3161038 1198 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1199 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1200 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1201 new_block->idstr);
1202 abort();
1203 }
1204 }
b2a8658e 1205 qemu_mutex_unlock_ramlist();
c5705a77
AK
1206}
1207
20cfe881
HT
1208void qemu_ram_unset_idstr(ram_addr_t addr)
1209{
1210 RAMBlock *block = find_ram_block(addr);
1211
1212 if (block) {
1213 memset(block->idstr, 0, sizeof(block->idstr));
1214 }
1215}
1216
8490fc78
LC
1217static int memory_try_enable_merging(void *addr, size_t len)
1218{
2ff3de68 1219 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1220 /* disabled by the user */
1221 return 0;
1222 }
1223
1224 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1225}
1226
e1c57ab8 1227static ram_addr_t ram_block_add(RAMBlock *new_block)
c5705a77 1228{
e1c57ab8 1229 RAMBlock *block;
2152f5ca
JQ
1230 ram_addr_t old_ram_size, new_ram_size;
1231
1232 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1233
b2a8658e
UD
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1236 new_block->offset = find_ram_offset(new_block->length);
1237
1238 if (!new_block->host) {
1239 if (xen_enabled()) {
1240 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1241 } else {
1242 new_block->host = phys_mem_alloc(new_block->length);
39228250
MA
1243 if (!new_block->host) {
1244 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1245 new_block->mr->name, strerror(errno));
1246 exit(1);
1247 }
e1c57ab8 1248 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1249 }
c902760f 1250 }
94a6b54f 1251
abb26d63
PB
1252 /* Keep the list sorted from biggest to smallest block. */
1253 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1254 if (block->length < new_block->length) {
1255 break;
1256 }
1257 }
1258 if (block) {
1259 QTAILQ_INSERT_BEFORE(block, new_block, next);
1260 } else {
1261 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1262 }
0d6d3c87 1263 ram_list.mru_block = NULL;
94a6b54f 1264
f798b07f 1265 ram_list.version++;
b2a8658e 1266 qemu_mutex_unlock_ramlist();
f798b07f 1267
2152f5ca
JQ
1268 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1269
1270 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1271 int i;
1272 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1273 ram_list.dirty_memory[i] =
1274 bitmap_zero_extend(ram_list.dirty_memory[i],
1275 old_ram_size, new_ram_size);
1276 }
2152f5ca 1277 }
e1c57ab8 1278 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1279
e1c57ab8
PB
1280 qemu_ram_setup_dump(new_block->host, new_block->length);
1281 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1282 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1283
e1c57ab8
PB
1284 if (kvm_enabled()) {
1285 kvm_setup_guest_memory(new_block->host, new_block->length);
1286 }
6f0437e8 1287
94a6b54f
PB
1288 return new_block->offset;
1289}
e9a1ab19 1290
0b183fc8 1291#ifdef __linux__
e1c57ab8 1292ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1293 bool share, const char *mem_path,
7f56e740 1294 Error **errp)
e1c57ab8
PB
1295{
1296 RAMBlock *new_block;
1297
1298 if (xen_enabled()) {
7f56e740
PB
1299 error_setg(errp, "-mem-path not supported with Xen");
1300 return -1;
e1c57ab8
PB
1301 }
1302
1303 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1304 /*
1305 * file_ram_alloc() needs to allocate just like
1306 * phys_mem_alloc, but we haven't bothered to provide
1307 * a hook there.
1308 */
7f56e740
PB
1309 error_setg(errp,
1310 "-mem-path not supported with this accelerator");
1311 return -1;
e1c57ab8
PB
1312 }
1313
1314 size = TARGET_PAGE_ALIGN(size);
1315 new_block = g_malloc0(sizeof(*new_block));
1316 new_block->mr = mr;
1317 new_block->length = size;
dbcb8981 1318 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1319 new_block->host = file_ram_alloc(new_block, size,
1320 mem_path, errp);
1321 if (!new_block->host) {
1322 g_free(new_block);
1323 return -1;
1324 }
1325
e1c57ab8
PB
1326 return ram_block_add(new_block);
1327}
0b183fc8 1328#endif
e1c57ab8
PB
1329
1330ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1331 MemoryRegion *mr)
1332{
1333 RAMBlock *new_block;
1334
1335 size = TARGET_PAGE_ALIGN(size);
1336 new_block = g_malloc0(sizeof(*new_block));
1337 new_block->mr = mr;
1338 new_block->length = size;
1339 new_block->fd = -1;
1340 new_block->host = host;
1341 if (host) {
7bd4f430 1342 new_block->flags |= RAM_PREALLOC;
e1c57ab8
PB
1343 }
1344 return ram_block_add(new_block);
1345}
1346
c5705a77 1347ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1348{
c5705a77 1349 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1350}
1351
1f2e98b6
AW
1352void qemu_ram_free_from_ptr(ram_addr_t addr)
1353{
1354 RAMBlock *block;
1355
b2a8658e
UD
1356 /* This assumes the iothread lock is taken here too. */
1357 qemu_mutex_lock_ramlist();
a3161038 1358 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1359 if (addr == block->offset) {
a3161038 1360 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1361 ram_list.mru_block = NULL;
f798b07f 1362 ram_list.version++;
7267c094 1363 g_free(block);
b2a8658e 1364 break;
1f2e98b6
AW
1365 }
1366 }
b2a8658e 1367 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1368}
1369
c227f099 1370void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1371{
04b16653
AW
1372 RAMBlock *block;
1373
b2a8658e
UD
1374 /* This assumes the iothread lock is taken here too. */
1375 qemu_mutex_lock_ramlist();
a3161038 1376 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1377 if (addr == block->offset) {
a3161038 1378 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1379 ram_list.mru_block = NULL;
f798b07f 1380 ram_list.version++;
7bd4f430 1381 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1382 ;
dfeaf2ab
MA
1383 } else if (xen_enabled()) {
1384 xen_invalidate_map_cache_entry(block->host);
089f3f76 1385#ifndef _WIN32
3435f395
MA
1386 } else if (block->fd >= 0) {
1387 munmap(block->host, block->length);
1388 close(block->fd);
089f3f76 1389#endif
04b16653 1390 } else {
dfeaf2ab 1391 qemu_anon_ram_free(block->host, block->length);
04b16653 1392 }
7267c094 1393 g_free(block);
b2a8658e 1394 break;
04b16653
AW
1395 }
1396 }
b2a8658e 1397 qemu_mutex_unlock_ramlist();
04b16653 1398
e9a1ab19
FB
1399}
1400
cd19cfa2
HY
1401#ifndef _WIN32
1402void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1403{
1404 RAMBlock *block;
1405 ram_addr_t offset;
1406 int flags;
1407 void *area, *vaddr;
1408
a3161038 1409 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1410 offset = addr - block->offset;
1411 if (offset < block->length) {
1412 vaddr = block->host + offset;
7bd4f430 1413 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1414 ;
dfeaf2ab
MA
1415 } else if (xen_enabled()) {
1416 abort();
cd19cfa2
HY
1417 } else {
1418 flags = MAP_FIXED;
1419 munmap(vaddr, length);
3435f395 1420 if (block->fd >= 0) {
dbcb8981
PB
1421 flags |= (block->flags & RAM_SHARED ?
1422 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1423 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1424 flags, block->fd, offset);
cd19cfa2 1425 } else {
2eb9fbaa
MA
1426 /*
1427 * Remap needs to match alloc. Accelerators that
1428 * set phys_mem_alloc never remap. If they did,
1429 * we'd need a remap hook here.
1430 */
1431 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1432
cd19cfa2
HY
1433 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1434 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1435 flags, -1, 0);
cd19cfa2
HY
1436 }
1437 if (area != vaddr) {
f15fbc4b
AP
1438 fprintf(stderr, "Could not remap addr: "
1439 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1440 length, addr);
1441 exit(1);
1442 }
8490fc78 1443 memory_try_enable_merging(vaddr, length);
ddb97f1d 1444 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1445 }
1446 return;
1447 }
1448 }
1449}
1450#endif /* !_WIN32 */
1451
a35ba7be
PB
1452int qemu_get_ram_fd(ram_addr_t addr)
1453{
1454 RAMBlock *block = qemu_get_ram_block(addr);
1455
1456 return block->fd;
1457}
1458
3fd74b84
DM
1459void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1460{
1461 RAMBlock *block = qemu_get_ram_block(addr);
1462
1463 return block->host;
1464}
1465
1b5ec234
PB
1466/* Return a host pointer to ram allocated with qemu_ram_alloc.
1467 With the exception of the softmmu code in this file, this should
1468 only be used for local memory (e.g. video ram) that the device owns,
1469 and knows it isn't going to access beyond the end of the block.
1470
1471 It should not be used for general purpose DMA.
1472 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1473 */
1474void *qemu_get_ram_ptr(ram_addr_t addr)
1475{
1476 RAMBlock *block = qemu_get_ram_block(addr);
1477
0d6d3c87
PB
1478 if (xen_enabled()) {
1479 /* We need to check if the requested address is in the RAM
1480 * because we don't want to map the entire memory in QEMU.
1481 * In that case just map until the end of the page.
1482 */
1483 if (block->offset == 0) {
1484 return xen_map_cache(addr, 0, 0);
1485 } else if (block->host == NULL) {
1486 block->host =
1487 xen_map_cache(block->offset, block->length, 1);
1488 }
1489 }
1490 return block->host + (addr - block->offset);
dc828ca1
PB
1491}
1492
38bee5dc
SS
1493/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1494 * but takes a size argument */
cb85f7ab 1495static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1496{
8ab934f9
SS
1497 if (*size == 0) {
1498 return NULL;
1499 }
868bb33f 1500 if (xen_enabled()) {
e41d7c69 1501 return xen_map_cache(addr, *size, 1);
868bb33f 1502 } else {
38bee5dc
SS
1503 RAMBlock *block;
1504
a3161038 1505 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1506 if (addr - block->offset < block->length) {
1507 if (addr - block->offset + *size > block->length)
1508 *size = block->length - addr + block->offset;
1509 return block->host + (addr - block->offset);
1510 }
1511 }
1512
1513 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1514 abort();
38bee5dc
SS
1515 }
1516}
1517
7443b437
PB
1518/* Some of the softmmu routines need to translate from a host pointer
1519 (typically a TLB entry) back to a ram offset. */
1b5ec234 1520MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1521{
94a6b54f
PB
1522 RAMBlock *block;
1523 uint8_t *host = ptr;
1524
868bb33f 1525 if (xen_enabled()) {
e41d7c69 1526 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1527 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1528 }
1529
23887b79
PB
1530 block = ram_list.mru_block;
1531 if (block && block->host && host - block->host < block->length) {
1532 goto found;
1533 }
1534
a3161038 1535 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1536 /* This case append when the block is not mapped. */
1537 if (block->host == NULL) {
1538 continue;
1539 }
f471a17e 1540 if (host - block->host < block->length) {
23887b79 1541 goto found;
f471a17e 1542 }
94a6b54f 1543 }
432d268c 1544
1b5ec234 1545 return NULL;
23887b79
PB
1546
1547found:
1548 *ram_addr = block->offset + (host - block->host);
1b5ec234 1549 return block->mr;
e890261f 1550}
f471a17e 1551
a8170e5e 1552static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1553 uint64_t val, unsigned size)
9fa3e853 1554{
52159192 1555 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1556 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1557 }
0e0df1e2
AK
1558 switch (size) {
1559 case 1:
1560 stb_p(qemu_get_ram_ptr(ram_addr), val);
1561 break;
1562 case 2:
1563 stw_p(qemu_get_ram_ptr(ram_addr), val);
1564 break;
1565 case 4:
1566 stl_p(qemu_get_ram_ptr(ram_addr), val);
1567 break;
1568 default:
1569 abort();
3a7d929e 1570 }
52159192
JQ
1571 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1572 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
f23db169
FB
1573 /* we remove the notdirty callback only if the code has been
1574 flushed */
a2cd8c85 1575 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1576 CPUArchState *env = current_cpu->env_ptr;
93afeade 1577 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1578 }
9fa3e853
FB
1579}
1580
b018ddf6
PB
1581static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1582 unsigned size, bool is_write)
1583{
1584 return is_write;
1585}
1586
0e0df1e2 1587static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1588 .write = notdirty_mem_write,
b018ddf6 1589 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1590 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1591};
1592
0f459d16 1593/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1594static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1595{
93afeade
AF
1596 CPUState *cpu = current_cpu;
1597 CPUArchState *env = cpu->env_ptr;
06d55cc1 1598 target_ulong pc, cs_base;
0f459d16 1599 target_ulong vaddr;
a1d1bb31 1600 CPUWatchpoint *wp;
06d55cc1 1601 int cpu_flags;
0f459d16 1602
ff4700b0 1603 if (cpu->watchpoint_hit) {
06d55cc1
AL
1604 /* We re-entered the check after replacing the TB. Now raise
1605 * the debug interrupt so that is will trigger after the
1606 * current instruction. */
93afeade 1607 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1608 return;
1609 }
93afeade 1610 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1611 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1612 if ((vaddr == (wp->vaddr & len_mask) ||
1613 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1614 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1615 if (!cpu->watchpoint_hit) {
1616 cpu->watchpoint_hit = wp;
239c51a5 1617 tb_check_watchpoint(cpu);
6e140f28 1618 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1619 cpu->exception_index = EXCP_DEBUG;
5638d180 1620 cpu_loop_exit(cpu);
6e140f28
AL
1621 } else {
1622 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1623 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1624 cpu_resume_from_signal(cpu, NULL);
6e140f28 1625 }
06d55cc1 1626 }
6e140f28
AL
1627 } else {
1628 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1629 }
1630 }
1631}
1632
6658ffb8
PB
1633/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1634 so these check for a hit then pass through to the normal out-of-line
1635 phys routines. */
a8170e5e 1636static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1637 unsigned size)
6658ffb8 1638{
1ec9b909
AK
1639 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1640 switch (size) {
2c17449b 1641 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1642 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1643 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1644 default: abort();
1645 }
6658ffb8
PB
1646}
1647
a8170e5e 1648static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1649 uint64_t val, unsigned size)
6658ffb8 1650{
1ec9b909
AK
1651 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1652 switch (size) {
67364150 1653 case 1:
db3be60d 1654 stb_phys(&address_space_memory, addr, val);
67364150
MF
1655 break;
1656 case 2:
5ce5944d 1657 stw_phys(&address_space_memory, addr, val);
67364150
MF
1658 break;
1659 case 4:
ab1da857 1660 stl_phys(&address_space_memory, addr, val);
67364150 1661 break;
1ec9b909
AK
1662 default: abort();
1663 }
6658ffb8
PB
1664}
1665
1ec9b909
AK
1666static const MemoryRegionOps watch_mem_ops = {
1667 .read = watch_mem_read,
1668 .write = watch_mem_write,
1669 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1670};
6658ffb8 1671
a8170e5e 1672static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1673 unsigned len)
db7b5426 1674{
acc9d80b
JK
1675 subpage_t *subpage = opaque;
1676 uint8_t buf[4];
791af8c8 1677
db7b5426 1678#if defined(DEBUG_SUBPAGE)
016e9d62 1679 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1680 subpage, len, addr);
db7b5426 1681#endif
acc9d80b
JK
1682 address_space_read(subpage->as, addr + subpage->base, buf, len);
1683 switch (len) {
1684 case 1:
1685 return ldub_p(buf);
1686 case 2:
1687 return lduw_p(buf);
1688 case 4:
1689 return ldl_p(buf);
1690 default:
1691 abort();
1692 }
db7b5426
BS
1693}
1694
a8170e5e 1695static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1696 uint64_t value, unsigned len)
db7b5426 1697{
acc9d80b
JK
1698 subpage_t *subpage = opaque;
1699 uint8_t buf[4];
1700
db7b5426 1701#if defined(DEBUG_SUBPAGE)
016e9d62 1702 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1703 " value %"PRIx64"\n",
1704 __func__, subpage, len, addr, value);
db7b5426 1705#endif
acc9d80b
JK
1706 switch (len) {
1707 case 1:
1708 stb_p(buf, value);
1709 break;
1710 case 2:
1711 stw_p(buf, value);
1712 break;
1713 case 4:
1714 stl_p(buf, value);
1715 break;
1716 default:
1717 abort();
1718 }
1719 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1720}
1721
c353e4cc 1722static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1723 unsigned len, bool is_write)
c353e4cc 1724{
acc9d80b 1725 subpage_t *subpage = opaque;
c353e4cc 1726#if defined(DEBUG_SUBPAGE)
016e9d62 1727 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1728 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1729#endif
1730
acc9d80b 1731 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1732 len, is_write);
c353e4cc
PB
1733}
1734
70c68e44
AK
1735static const MemoryRegionOps subpage_ops = {
1736 .read = subpage_read,
1737 .write = subpage_write,
c353e4cc 1738 .valid.accepts = subpage_accepts,
70c68e44 1739 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1740};
1741
c227f099 1742static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1743 uint16_t section)
db7b5426
BS
1744{
1745 int idx, eidx;
1746
1747 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1748 return -1;
1749 idx = SUBPAGE_IDX(start);
1750 eidx = SUBPAGE_IDX(end);
1751#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1752 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1753 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1754#endif
db7b5426 1755 for (; idx <= eidx; idx++) {
5312bd8b 1756 mmio->sub_section[idx] = section;
db7b5426
BS
1757 }
1758
1759 return 0;
1760}
1761
acc9d80b 1762static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1763{
c227f099 1764 subpage_t *mmio;
db7b5426 1765
7267c094 1766 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1767
acc9d80b 1768 mmio->as = as;
1eec614b 1769 mmio->base = base;
2c9b15ca 1770 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1771 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1772 mmio->iomem.subpage = true;
db7b5426 1773#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1774 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1775 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1776#endif
b41aac4f 1777 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1778
1779 return mmio;
1780}
1781
a656e22f
PC
1782static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1783 MemoryRegion *mr)
5312bd8b 1784{
a656e22f 1785 assert(as);
5312bd8b 1786 MemoryRegionSection section = {
a656e22f 1787 .address_space = as,
5312bd8b
AK
1788 .mr = mr,
1789 .offset_within_address_space = 0,
1790 .offset_within_region = 0,
052e87b0 1791 .size = int128_2_64(),
5312bd8b
AK
1792 };
1793
53cb28cb 1794 return phys_section_add(map, &section);
5312bd8b
AK
1795}
1796
77717094 1797MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1798{
77717094 1799 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1800}
1801
e9179ce1
AK
1802static void io_mem_init(void)
1803{
2c9b15ca
PB
1804 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1805 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1806 "unassigned", UINT64_MAX);
2c9b15ca 1807 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1808 "notdirty", UINT64_MAX);
2c9b15ca 1809 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1810 "watch", UINT64_MAX);
e9179ce1
AK
1811}
1812
ac1970fb 1813static void mem_begin(MemoryListener *listener)
00752703
PB
1814{
1815 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1816 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1817 uint16_t n;
1818
a656e22f 1819 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1820 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1821 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1822 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1823 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1824 assert(n == PHYS_SECTION_ROM);
a656e22f 1825 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1826 assert(n == PHYS_SECTION_WATCH);
00752703 1827
9736e55b 1828 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1829 d->as = as;
1830 as->next_dispatch = d;
1831}
1832
1833static void mem_commit(MemoryListener *listener)
ac1970fb 1834{
89ae337a 1835 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1836 AddressSpaceDispatch *cur = as->dispatch;
1837 AddressSpaceDispatch *next = as->next_dispatch;
1838
53cb28cb 1839 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1840
0475d94f 1841 as->dispatch = next;
b41aac4f 1842
53cb28cb
MA
1843 if (cur) {
1844 phys_sections_free(&cur->map);
1845 g_free(cur);
1846 }
9affd6fc
PB
1847}
1848
1d71148e 1849static void tcg_commit(MemoryListener *listener)
50c1e149 1850{
182735ef 1851 CPUState *cpu;
117712c3
AK
1852
1853 /* since each CPU stores ram addresses in its TLB cache, we must
1854 reset the modified entries */
1855 /* XXX: slow ! */
bdc44640 1856 CPU_FOREACH(cpu) {
33bde2e1
EI
1857 /* FIXME: Disentangle the cpu.h circular files deps so we can
1858 directly get the right CPU from listener. */
1859 if (cpu->tcg_as_listener != listener) {
1860 continue;
1861 }
00c8cb0a 1862 tlb_flush(cpu, 1);
117712c3 1863 }
50c1e149
AK
1864}
1865
93632747
AK
1866static void core_log_global_start(MemoryListener *listener)
1867{
981fdf23 1868 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1869}
1870
1871static void core_log_global_stop(MemoryListener *listener)
1872{
981fdf23 1873 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1874}
1875
93632747 1876static MemoryListener core_memory_listener = {
93632747
AK
1877 .log_global_start = core_log_global_start,
1878 .log_global_stop = core_log_global_stop,
ac1970fb 1879 .priority = 1,
93632747
AK
1880};
1881
ac1970fb
AK
1882void address_space_init_dispatch(AddressSpace *as)
1883{
00752703 1884 as->dispatch = NULL;
89ae337a 1885 as->dispatch_listener = (MemoryListener) {
ac1970fb 1886 .begin = mem_begin,
00752703 1887 .commit = mem_commit,
ac1970fb
AK
1888 .region_add = mem_add,
1889 .region_nop = mem_add,
1890 .priority = 0,
1891 };
89ae337a 1892 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1893}
1894
83f3c251
AK
1895void address_space_destroy_dispatch(AddressSpace *as)
1896{
1897 AddressSpaceDispatch *d = as->dispatch;
1898
89ae337a 1899 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1900 g_free(d);
1901 as->dispatch = NULL;
1902}
1903
62152b8a
AK
1904static void memory_map_init(void)
1905{
7267c094 1906 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1907
57271d63 1908 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1909 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1910
7267c094 1911 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1912 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1913 65536);
7dca8043 1914 address_space_init(&address_space_io, system_io, "I/O");
93632747 1915
f6790af6 1916 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1917}
1918
1919MemoryRegion *get_system_memory(void)
1920{
1921 return system_memory;
1922}
1923
309cb471
AK
1924MemoryRegion *get_system_io(void)
1925{
1926 return system_io;
1927}
1928
e2eef170
PB
1929#endif /* !defined(CONFIG_USER_ONLY) */
1930
13eb76e0
FB
1931/* physical memory access (slow version, mainly for debug) */
1932#if defined(CONFIG_USER_ONLY)
f17ec444 1933int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1934 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1935{
1936 int l, flags;
1937 target_ulong page;
53a5960a 1938 void * p;
13eb76e0
FB
1939
1940 while (len > 0) {
1941 page = addr & TARGET_PAGE_MASK;
1942 l = (page + TARGET_PAGE_SIZE) - addr;
1943 if (l > len)
1944 l = len;
1945 flags = page_get_flags(page);
1946 if (!(flags & PAGE_VALID))
a68fe89c 1947 return -1;
13eb76e0
FB
1948 if (is_write) {
1949 if (!(flags & PAGE_WRITE))
a68fe89c 1950 return -1;
579a97f7 1951 /* XXX: this code should not depend on lock_user */
72fb7daa 1952 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1953 return -1;
72fb7daa
AJ
1954 memcpy(p, buf, l);
1955 unlock_user(p, addr, l);
13eb76e0
FB
1956 } else {
1957 if (!(flags & PAGE_READ))
a68fe89c 1958 return -1;
579a97f7 1959 /* XXX: this code should not depend on lock_user */
72fb7daa 1960 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1961 return -1;
72fb7daa 1962 memcpy(buf, p, l);
5b257578 1963 unlock_user(p, addr, 0);
13eb76e0
FB
1964 }
1965 len -= l;
1966 buf += l;
1967 addr += l;
1968 }
a68fe89c 1969 return 0;
13eb76e0 1970}
8df1cd07 1971
13eb76e0 1972#else
51d7a9eb 1973
a8170e5e
AK
1974static void invalidate_and_set_dirty(hwaddr addr,
1975 hwaddr length)
51d7a9eb 1976{
a2cd8c85 1977 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
1978 /* invalidate code */
1979 tb_invalidate_phys_page_range(addr, addr + length, 0);
1980 /* set dirty bit */
52159192
JQ
1981 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1982 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
51d7a9eb 1983 }
e226939d 1984 xen_modified_memory(addr, length);
51d7a9eb
AP
1985}
1986
23326164 1987static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1988{
e1622f4b 1989 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1990
1991 /* Regions are assumed to support 1-4 byte accesses unless
1992 otherwise specified. */
23326164
RH
1993 if (access_size_max == 0) {
1994 access_size_max = 4;
1995 }
1996
1997 /* Bound the maximum access by the alignment of the address. */
1998 if (!mr->ops->impl.unaligned) {
1999 unsigned align_size_max = addr & -addr;
2000 if (align_size_max != 0 && align_size_max < access_size_max) {
2001 access_size_max = align_size_max;
2002 }
82f2563f 2003 }
23326164
RH
2004
2005 /* Don't attempt accesses larger than the maximum. */
2006 if (l > access_size_max) {
2007 l = access_size_max;
82f2563f 2008 }
098178f2
PB
2009 if (l & (l - 1)) {
2010 l = 1 << (qemu_fls(l) - 1);
2011 }
23326164
RH
2012
2013 return l;
82f2563f
PB
2014}
2015
fd8aaa76 2016bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2017 int len, bool is_write)
13eb76e0 2018{
149f54b5 2019 hwaddr l;
13eb76e0 2020 uint8_t *ptr;
791af8c8 2021 uint64_t val;
149f54b5 2022 hwaddr addr1;
5c8a00ce 2023 MemoryRegion *mr;
fd8aaa76 2024 bool error = false;
3b46e624 2025
13eb76e0 2026 while (len > 0) {
149f54b5 2027 l = len;
5c8a00ce 2028 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2029
13eb76e0 2030 if (is_write) {
5c8a00ce
PB
2031 if (!memory_access_is_direct(mr, is_write)) {
2032 l = memory_access_size(mr, l, addr1);
4917cf44 2033 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2034 potential bugs */
23326164
RH
2035 switch (l) {
2036 case 8:
2037 /* 64 bit write access */
2038 val = ldq_p(buf);
2039 error |= io_mem_write(mr, addr1, val, 8);
2040 break;
2041 case 4:
1c213d19 2042 /* 32 bit write access */
c27004ec 2043 val = ldl_p(buf);
5c8a00ce 2044 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2045 break;
2046 case 2:
1c213d19 2047 /* 16 bit write access */
c27004ec 2048 val = lduw_p(buf);
5c8a00ce 2049 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2050 break;
2051 case 1:
1c213d19 2052 /* 8 bit write access */
c27004ec 2053 val = ldub_p(buf);
5c8a00ce 2054 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2055 break;
2056 default:
2057 abort();
13eb76e0 2058 }
2bbfa05d 2059 } else {
5c8a00ce 2060 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2061 /* RAM case */
5579c7f3 2062 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2063 memcpy(ptr, buf, l);
51d7a9eb 2064 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2065 }
2066 } else {
5c8a00ce 2067 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2068 /* I/O case */
5c8a00ce 2069 l = memory_access_size(mr, l, addr1);
23326164
RH
2070 switch (l) {
2071 case 8:
2072 /* 64 bit read access */
2073 error |= io_mem_read(mr, addr1, &val, 8);
2074 stq_p(buf, val);
2075 break;
2076 case 4:
13eb76e0 2077 /* 32 bit read access */
5c8a00ce 2078 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2079 stl_p(buf, val);
23326164
RH
2080 break;
2081 case 2:
13eb76e0 2082 /* 16 bit read access */
5c8a00ce 2083 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2084 stw_p(buf, val);
23326164
RH
2085 break;
2086 case 1:
1c213d19 2087 /* 8 bit read access */
5c8a00ce 2088 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2089 stb_p(buf, val);
23326164
RH
2090 break;
2091 default:
2092 abort();
13eb76e0
FB
2093 }
2094 } else {
2095 /* RAM case */
5c8a00ce 2096 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2097 memcpy(buf, ptr, l);
13eb76e0
FB
2098 }
2099 }
2100 len -= l;
2101 buf += l;
2102 addr += l;
2103 }
fd8aaa76
PB
2104
2105 return error;
13eb76e0 2106}
8df1cd07 2107
fd8aaa76 2108bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2109 const uint8_t *buf, int len)
2110{
fd8aaa76 2111 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2112}
2113
fd8aaa76 2114bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2115{
fd8aaa76 2116 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2117}
2118
2119
a8170e5e 2120void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2121 int len, int is_write)
2122{
fd8aaa76 2123 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2124}
2125
582b55a9
AG
2126enum write_rom_type {
2127 WRITE_DATA,
2128 FLUSH_CACHE,
2129};
2130
2a221651 2131static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2132 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2133{
149f54b5 2134 hwaddr l;
d0ecd2aa 2135 uint8_t *ptr;
149f54b5 2136 hwaddr addr1;
5c8a00ce 2137 MemoryRegion *mr;
3b46e624 2138
d0ecd2aa 2139 while (len > 0) {
149f54b5 2140 l = len;
2a221651 2141 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2142
5c8a00ce
PB
2143 if (!(memory_region_is_ram(mr) ||
2144 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2145 /* do nothing */
2146 } else {
5c8a00ce 2147 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2148 /* ROM/RAM case */
5579c7f3 2149 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2150 switch (type) {
2151 case WRITE_DATA:
2152 memcpy(ptr, buf, l);
2153 invalidate_and_set_dirty(addr1, l);
2154 break;
2155 case FLUSH_CACHE:
2156 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2157 break;
2158 }
d0ecd2aa
FB
2159 }
2160 len -= l;
2161 buf += l;
2162 addr += l;
2163 }
2164}
2165
582b55a9 2166/* used for ROM loading : can write in RAM and ROM */
2a221651 2167void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2168 const uint8_t *buf, int len)
2169{
2a221651 2170 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2171}
2172
2173void cpu_flush_icache_range(hwaddr start, int len)
2174{
2175 /*
2176 * This function should do the same thing as an icache flush that was
2177 * triggered from within the guest. For TCG we are always cache coherent,
2178 * so there is no need to flush anything. For KVM / Xen we need to flush
2179 * the host's instruction cache at least.
2180 */
2181 if (tcg_enabled()) {
2182 return;
2183 }
2184
2a221651
EI
2185 cpu_physical_memory_write_rom_internal(&address_space_memory,
2186 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2187}
2188
6d16c2f8 2189typedef struct {
d3e71559 2190 MemoryRegion *mr;
6d16c2f8 2191 void *buffer;
a8170e5e
AK
2192 hwaddr addr;
2193 hwaddr len;
6d16c2f8
AL
2194} BounceBuffer;
2195
2196static BounceBuffer bounce;
2197
ba223c29
AL
2198typedef struct MapClient {
2199 void *opaque;
2200 void (*callback)(void *opaque);
72cf2d4f 2201 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2202} MapClient;
2203
72cf2d4f
BS
2204static QLIST_HEAD(map_client_list, MapClient) map_client_list
2205 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2206
2207void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2208{
7267c094 2209 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2210
2211 client->opaque = opaque;
2212 client->callback = callback;
72cf2d4f 2213 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2214 return client;
2215}
2216
8b9c99d9 2217static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2218{
2219 MapClient *client = (MapClient *)_client;
2220
72cf2d4f 2221 QLIST_REMOVE(client, link);
7267c094 2222 g_free(client);
ba223c29
AL
2223}
2224
2225static void cpu_notify_map_clients(void)
2226{
2227 MapClient *client;
2228
72cf2d4f
BS
2229 while (!QLIST_EMPTY(&map_client_list)) {
2230 client = QLIST_FIRST(&map_client_list);
ba223c29 2231 client->callback(client->opaque);
34d5e948 2232 cpu_unregister_map_client(client);
ba223c29
AL
2233 }
2234}
2235
51644ab7
PB
2236bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2237{
5c8a00ce 2238 MemoryRegion *mr;
51644ab7
PB
2239 hwaddr l, xlat;
2240
2241 while (len > 0) {
2242 l = len;
5c8a00ce
PB
2243 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2244 if (!memory_access_is_direct(mr, is_write)) {
2245 l = memory_access_size(mr, l, addr);
2246 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2247 return false;
2248 }
2249 }
2250
2251 len -= l;
2252 addr += l;
2253 }
2254 return true;
2255}
2256
6d16c2f8
AL
2257/* Map a physical memory region into a host virtual address.
2258 * May map a subset of the requested range, given by and returned in *plen.
2259 * May return NULL if resources needed to perform the mapping are exhausted.
2260 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2261 * Use cpu_register_map_client() to know when retrying the map operation is
2262 * likely to succeed.
6d16c2f8 2263 */
ac1970fb 2264void *address_space_map(AddressSpace *as,
a8170e5e
AK
2265 hwaddr addr,
2266 hwaddr *plen,
ac1970fb 2267 bool is_write)
6d16c2f8 2268{
a8170e5e 2269 hwaddr len = *plen;
e3127ae0
PB
2270 hwaddr done = 0;
2271 hwaddr l, xlat, base;
2272 MemoryRegion *mr, *this_mr;
2273 ram_addr_t raddr;
6d16c2f8 2274
e3127ae0
PB
2275 if (len == 0) {
2276 return NULL;
2277 }
38bee5dc 2278
e3127ae0
PB
2279 l = len;
2280 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2281 if (!memory_access_is_direct(mr, is_write)) {
2282 if (bounce.buffer) {
2283 return NULL;
6d16c2f8 2284 }
e85d9db5
KW
2285 /* Avoid unbounded allocations */
2286 l = MIN(l, TARGET_PAGE_SIZE);
2287 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2288 bounce.addr = addr;
2289 bounce.len = l;
d3e71559
PB
2290
2291 memory_region_ref(mr);
2292 bounce.mr = mr;
e3127ae0
PB
2293 if (!is_write) {
2294 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2295 }
6d16c2f8 2296
e3127ae0
PB
2297 *plen = l;
2298 return bounce.buffer;
2299 }
2300
2301 base = xlat;
2302 raddr = memory_region_get_ram_addr(mr);
2303
2304 for (;;) {
6d16c2f8
AL
2305 len -= l;
2306 addr += l;
e3127ae0
PB
2307 done += l;
2308 if (len == 0) {
2309 break;
2310 }
2311
2312 l = len;
2313 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2314 if (this_mr != mr || xlat != base + done) {
2315 break;
2316 }
6d16c2f8 2317 }
e3127ae0 2318
d3e71559 2319 memory_region_ref(mr);
e3127ae0
PB
2320 *plen = done;
2321 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2322}
2323
ac1970fb 2324/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2325 * Will also mark the memory as dirty if is_write == 1. access_len gives
2326 * the amount of memory that was actually read or written by the caller.
2327 */
a8170e5e
AK
2328void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2329 int is_write, hwaddr access_len)
6d16c2f8
AL
2330{
2331 if (buffer != bounce.buffer) {
d3e71559
PB
2332 MemoryRegion *mr;
2333 ram_addr_t addr1;
2334
2335 mr = qemu_ram_addr_from_host(buffer, &addr1);
2336 assert(mr != NULL);
6d16c2f8 2337 if (is_write) {
6d16c2f8
AL
2338 while (access_len) {
2339 unsigned l;
2340 l = TARGET_PAGE_SIZE;
2341 if (l > access_len)
2342 l = access_len;
51d7a9eb 2343 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2344 addr1 += l;
2345 access_len -= l;
2346 }
2347 }
868bb33f 2348 if (xen_enabled()) {
e41d7c69 2349 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2350 }
d3e71559 2351 memory_region_unref(mr);
6d16c2f8
AL
2352 return;
2353 }
2354 if (is_write) {
ac1970fb 2355 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2356 }
f8a83245 2357 qemu_vfree(bounce.buffer);
6d16c2f8 2358 bounce.buffer = NULL;
d3e71559 2359 memory_region_unref(bounce.mr);
ba223c29 2360 cpu_notify_map_clients();
6d16c2f8 2361}
d0ecd2aa 2362
a8170e5e
AK
2363void *cpu_physical_memory_map(hwaddr addr,
2364 hwaddr *plen,
ac1970fb
AK
2365 int is_write)
2366{
2367 return address_space_map(&address_space_memory, addr, plen, is_write);
2368}
2369
a8170e5e
AK
2370void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2371 int is_write, hwaddr access_len)
ac1970fb
AK
2372{
2373 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2374}
2375
8df1cd07 2376/* warning: addr must be aligned */
fdfba1a2 2377static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2378 enum device_endian endian)
8df1cd07 2379{
8df1cd07 2380 uint8_t *ptr;
791af8c8 2381 uint64_t val;
5c8a00ce 2382 MemoryRegion *mr;
149f54b5
PB
2383 hwaddr l = 4;
2384 hwaddr addr1;
8df1cd07 2385
fdfba1a2 2386 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2387 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2388 /* I/O case */
5c8a00ce 2389 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2390#if defined(TARGET_WORDS_BIGENDIAN)
2391 if (endian == DEVICE_LITTLE_ENDIAN) {
2392 val = bswap32(val);
2393 }
2394#else
2395 if (endian == DEVICE_BIG_ENDIAN) {
2396 val = bswap32(val);
2397 }
2398#endif
8df1cd07
FB
2399 } else {
2400 /* RAM case */
5c8a00ce 2401 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2402 & TARGET_PAGE_MASK)
149f54b5 2403 + addr1);
1e78bcc1
AG
2404 switch (endian) {
2405 case DEVICE_LITTLE_ENDIAN:
2406 val = ldl_le_p(ptr);
2407 break;
2408 case DEVICE_BIG_ENDIAN:
2409 val = ldl_be_p(ptr);
2410 break;
2411 default:
2412 val = ldl_p(ptr);
2413 break;
2414 }
8df1cd07
FB
2415 }
2416 return val;
2417}
2418
fdfba1a2 2419uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2420{
fdfba1a2 2421 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2422}
2423
fdfba1a2 2424uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2425{
fdfba1a2 2426 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2427}
2428
fdfba1a2 2429uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2430{
fdfba1a2 2431 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2432}
2433
84b7b8e7 2434/* warning: addr must be aligned */
2c17449b 2435static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2436 enum device_endian endian)
84b7b8e7 2437{
84b7b8e7
FB
2438 uint8_t *ptr;
2439 uint64_t val;
5c8a00ce 2440 MemoryRegion *mr;
149f54b5
PB
2441 hwaddr l = 8;
2442 hwaddr addr1;
84b7b8e7 2443
2c17449b 2444 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2445 false);
2446 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2447 /* I/O case */
5c8a00ce 2448 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2449#if defined(TARGET_WORDS_BIGENDIAN)
2450 if (endian == DEVICE_LITTLE_ENDIAN) {
2451 val = bswap64(val);
2452 }
2453#else
2454 if (endian == DEVICE_BIG_ENDIAN) {
2455 val = bswap64(val);
2456 }
84b7b8e7
FB
2457#endif
2458 } else {
2459 /* RAM case */
5c8a00ce 2460 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2461 & TARGET_PAGE_MASK)
149f54b5 2462 + addr1);
1e78bcc1
AG
2463 switch (endian) {
2464 case DEVICE_LITTLE_ENDIAN:
2465 val = ldq_le_p(ptr);
2466 break;
2467 case DEVICE_BIG_ENDIAN:
2468 val = ldq_be_p(ptr);
2469 break;
2470 default:
2471 val = ldq_p(ptr);
2472 break;
2473 }
84b7b8e7
FB
2474 }
2475 return val;
2476}
2477
2c17449b 2478uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2479{
2c17449b 2480 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2481}
2482
2c17449b 2483uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2484{
2c17449b 2485 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2486}
2487
2c17449b 2488uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2489{
2c17449b 2490 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2491}
2492
aab33094 2493/* XXX: optimize */
2c17449b 2494uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2495{
2496 uint8_t val;
2c17449b 2497 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2498 return val;
2499}
2500
733f0b02 2501/* warning: addr must be aligned */
41701aa4 2502static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2503 enum device_endian endian)
aab33094 2504{
733f0b02
MT
2505 uint8_t *ptr;
2506 uint64_t val;
5c8a00ce 2507 MemoryRegion *mr;
149f54b5
PB
2508 hwaddr l = 2;
2509 hwaddr addr1;
733f0b02 2510
41701aa4 2511 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2512 false);
2513 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2514 /* I/O case */
5c8a00ce 2515 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2516#if defined(TARGET_WORDS_BIGENDIAN)
2517 if (endian == DEVICE_LITTLE_ENDIAN) {
2518 val = bswap16(val);
2519 }
2520#else
2521 if (endian == DEVICE_BIG_ENDIAN) {
2522 val = bswap16(val);
2523 }
2524#endif
733f0b02
MT
2525 } else {
2526 /* RAM case */
5c8a00ce 2527 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2528 & TARGET_PAGE_MASK)
149f54b5 2529 + addr1);
1e78bcc1
AG
2530 switch (endian) {
2531 case DEVICE_LITTLE_ENDIAN:
2532 val = lduw_le_p(ptr);
2533 break;
2534 case DEVICE_BIG_ENDIAN:
2535 val = lduw_be_p(ptr);
2536 break;
2537 default:
2538 val = lduw_p(ptr);
2539 break;
2540 }
733f0b02
MT
2541 }
2542 return val;
aab33094
FB
2543}
2544
41701aa4 2545uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2546{
41701aa4 2547 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2548}
2549
41701aa4 2550uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2551{
41701aa4 2552 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2553}
2554
41701aa4 2555uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2556{
41701aa4 2557 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2558}
2559
8df1cd07
FB
2560/* warning: addr must be aligned. The ram page is not masked as dirty
2561 and the code inside is not invalidated. It is useful if the dirty
2562 bits are used to track modified PTEs */
2198a121 2563void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2564{
8df1cd07 2565 uint8_t *ptr;
5c8a00ce 2566 MemoryRegion *mr;
149f54b5
PB
2567 hwaddr l = 4;
2568 hwaddr addr1;
8df1cd07 2569
2198a121 2570 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2571 true);
2572 if (l < 4 || !memory_access_is_direct(mr, true)) {
2573 io_mem_write(mr, addr1, val, 4);
8df1cd07 2574 } else {
5c8a00ce 2575 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2576 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2577 stl_p(ptr, val);
74576198
AL
2578
2579 if (unlikely(in_migration)) {
a2cd8c85 2580 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2581 /* invalidate code */
2582 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2583 /* set dirty bit */
52159192
JQ
2584 cpu_physical_memory_set_dirty_flag(addr1,
2585 DIRTY_MEMORY_MIGRATION);
2586 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
74576198
AL
2587 }
2588 }
8df1cd07
FB
2589 }
2590}
2591
2592/* warning: addr must be aligned */
ab1da857
EI
2593static inline void stl_phys_internal(AddressSpace *as,
2594 hwaddr addr, uint32_t val,
1e78bcc1 2595 enum device_endian endian)
8df1cd07 2596{
8df1cd07 2597 uint8_t *ptr;
5c8a00ce 2598 MemoryRegion *mr;
149f54b5
PB
2599 hwaddr l = 4;
2600 hwaddr addr1;
8df1cd07 2601
ab1da857 2602 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2603 true);
2604 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2605#if defined(TARGET_WORDS_BIGENDIAN)
2606 if (endian == DEVICE_LITTLE_ENDIAN) {
2607 val = bswap32(val);
2608 }
2609#else
2610 if (endian == DEVICE_BIG_ENDIAN) {
2611 val = bswap32(val);
2612 }
2613#endif
5c8a00ce 2614 io_mem_write(mr, addr1, val, 4);
8df1cd07 2615 } else {
8df1cd07 2616 /* RAM case */
5c8a00ce 2617 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2618 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2619 switch (endian) {
2620 case DEVICE_LITTLE_ENDIAN:
2621 stl_le_p(ptr, val);
2622 break;
2623 case DEVICE_BIG_ENDIAN:
2624 stl_be_p(ptr, val);
2625 break;
2626 default:
2627 stl_p(ptr, val);
2628 break;
2629 }
51d7a9eb 2630 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2631 }
2632}
2633
ab1da857 2634void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2635{
ab1da857 2636 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2637}
2638
ab1da857 2639void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2640{
ab1da857 2641 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2642}
2643
ab1da857 2644void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2645{
ab1da857 2646 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2647}
2648
aab33094 2649/* XXX: optimize */
db3be60d 2650void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2651{
2652 uint8_t v = val;
db3be60d 2653 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2654}
2655
733f0b02 2656/* warning: addr must be aligned */
5ce5944d
EI
2657static inline void stw_phys_internal(AddressSpace *as,
2658 hwaddr addr, uint32_t val,
1e78bcc1 2659 enum device_endian endian)
aab33094 2660{
733f0b02 2661 uint8_t *ptr;
5c8a00ce 2662 MemoryRegion *mr;
149f54b5
PB
2663 hwaddr l = 2;
2664 hwaddr addr1;
733f0b02 2665
5ce5944d 2666 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2667 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2668#if defined(TARGET_WORDS_BIGENDIAN)
2669 if (endian == DEVICE_LITTLE_ENDIAN) {
2670 val = bswap16(val);
2671 }
2672#else
2673 if (endian == DEVICE_BIG_ENDIAN) {
2674 val = bswap16(val);
2675 }
2676#endif
5c8a00ce 2677 io_mem_write(mr, addr1, val, 2);
733f0b02 2678 } else {
733f0b02 2679 /* RAM case */
5c8a00ce 2680 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2681 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2682 switch (endian) {
2683 case DEVICE_LITTLE_ENDIAN:
2684 stw_le_p(ptr, val);
2685 break;
2686 case DEVICE_BIG_ENDIAN:
2687 stw_be_p(ptr, val);
2688 break;
2689 default:
2690 stw_p(ptr, val);
2691 break;
2692 }
51d7a9eb 2693 invalidate_and_set_dirty(addr1, 2);
733f0b02 2694 }
aab33094
FB
2695}
2696
5ce5944d 2697void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2698{
5ce5944d 2699 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2700}
2701
5ce5944d 2702void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2703{
5ce5944d 2704 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2705}
2706
5ce5944d 2707void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2708{
5ce5944d 2709 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2710}
2711
aab33094 2712/* XXX: optimize */
f606604f 2713void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2714{
2715 val = tswap64(val);
f606604f 2716 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2717}
2718
f606604f 2719void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2720{
2721 val = cpu_to_le64(val);
f606604f 2722 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2723}
2724
f606604f 2725void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2726{
2727 val = cpu_to_be64(val);
f606604f 2728 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2729}
2730
5e2972fd 2731/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2732int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2733 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2734{
2735 int l;
a8170e5e 2736 hwaddr phys_addr;
9b3c35e0 2737 target_ulong page;
13eb76e0
FB
2738
2739 while (len > 0) {
2740 page = addr & TARGET_PAGE_MASK;
f17ec444 2741 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2742 /* if no physical page mapped, return an error */
2743 if (phys_addr == -1)
2744 return -1;
2745 l = (page + TARGET_PAGE_SIZE) - addr;
2746 if (l > len)
2747 l = len;
5e2972fd 2748 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2749 if (is_write) {
2750 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2751 } else {
2752 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2753 }
13eb76e0
FB
2754 len -= l;
2755 buf += l;
2756 addr += l;
2757 }
2758 return 0;
2759}
a68fe89c 2760#endif
13eb76e0 2761
8e4a424b
BS
2762#if !defined(CONFIG_USER_ONLY)
2763
2764/*
2765 * A helper function for the _utterly broken_ virtio device model to find out if
2766 * it's running on a big endian machine. Don't do this at home kids!
2767 */
2768bool virtio_is_big_endian(void);
2769bool virtio_is_big_endian(void)
2770{
2771#if defined(TARGET_WORDS_BIGENDIAN)
2772 return true;
2773#else
2774 return false;
2775#endif
2776}
2777
2778#endif
2779
76f35538 2780#ifndef CONFIG_USER_ONLY
a8170e5e 2781bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2782{
5c8a00ce 2783 MemoryRegion*mr;
149f54b5 2784 hwaddr l = 1;
76f35538 2785
5c8a00ce
PB
2786 mr = address_space_translate(&address_space_memory,
2787 phys_addr, &phys_addr, &l, false);
76f35538 2788
5c8a00ce
PB
2789 return !(memory_region_is_ram(mr) ||
2790 memory_region_is_romd(mr));
76f35538 2791}
bd2fa51f
MH
2792
2793void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2794{
2795 RAMBlock *block;
2796
2797 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2798 func(block->host, block->offset, block->length, opaque);
2799 }
2800}
ec3f8c99 2801#endif