]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
hw/arm/virt: Provide flash devices for boot ROMs
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
8d7b8cb9 376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
6c3bff0e
PD
433static int cpu_common_pre_load(void *opaque)
434{
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440}
441
442static bool cpu_common_exception_index_needed(void *opaque)
443{
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447}
448
449static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457};
458
1a1562f5 459const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
6c3bff0e 463 .pre_load = cpu_common_pre_load,
5b6dd868 464 .post_load = cpu_common_post_load,
35d08458 465 .fields = (VMStateField[]) {
259186a7
AF
466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 468 VMSTATE_END_OF_LIST()
6c3bff0e
PD
469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
5b6dd868
BS
477 }
478};
1a1562f5 479
5b6dd868 480#endif
ea041c0e 481
38d8f5c8 482CPUState *qemu_get_cpu(int index)
ea041c0e 483{
bdc44640 484 CPUState *cpu;
ea041c0e 485
bdc44640 486 CPU_FOREACH(cpu) {
55e5c285 487 if (cpu->cpu_index == index) {
bdc44640 488 return cpu;
55e5c285 489 }
ea041c0e 490 }
5b6dd868 491
bdc44640 492 return NULL;
ea041c0e
FB
493}
494
09daed84
EI
495#if !defined(CONFIG_USER_ONLY)
496void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497{
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508}
509#endif
510
5b6dd868 511void cpu_exec_init(CPUArchState *env)
ea041c0e 512{
5b6dd868 513 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 514 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 515 CPUState *some_cpu;
5b6dd868
BS
516 int cpu_index;
517
518#if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520#endif
5b6dd868 521 cpu_index = 0;
bdc44640 522 CPU_FOREACH(some_cpu) {
5b6dd868
BS
523 cpu_index++;
524 }
55e5c285 525 cpu->cpu_index = cpu_index;
1b1ed8dc 526 cpu->numa_node = 0;
f0c3c505 527 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 528 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 529#ifndef CONFIG_USER_ONLY
09daed84 530 cpu->as = &address_space_memory;
5b6dd868
BS
531 cpu->thread_id = qemu_get_thread_id();
532#endif
bdc44640 533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
534#if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536#endif
e0d47944
AF
537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
5b6dd868 540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542 cpu_save, cpu_load, env);
b170fce3 543 assert(cc->vmsd == NULL);
e0d47944 544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 545#endif
b170fce3
AF
546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
ea041c0e
FB
549}
550
1fddef4b 551#if defined(TARGET_HAS_ICE)
94df27fd 552#if defined(CONFIG_USER_ONLY)
00b941e5 553static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
554{
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556}
557#else
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 559{
e8262a1b
MF
560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
09daed84 562 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 563 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 564 }
1e7855a5 565}
c27004ec 566#endif
94df27fd 567#endif /* TARGET_HAS_ICE */
d720b93d 568
c527ee8f 569#if defined(CONFIG_USER_ONLY)
75a34036 570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
571
572{
573}
574
75a34036 575int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
576 int flags, CPUWatchpoint **watchpoint)
577{
578 return -ENOSYS;
579}
580#else
6658ffb8 581/* Add a watchpoint. */
75a34036 582int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 583 int flags, CPUWatchpoint **watchpoint)
6658ffb8 584{
75a34036 585 vaddr len_mask = ~(len - 1);
c0ce998e 586 CPUWatchpoint *wp;
6658ffb8 587
b4051334 588 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
589 if ((len & (len - 1)) || (addr & ~len_mask) ||
590 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
591 error_report("tried to set invalid watchpoint at %"
592 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
593 return -EINVAL;
594 }
7267c094 595 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
596
597 wp->vaddr = addr;
b4051334 598 wp->len_mask = len_mask;
a1d1bb31
AL
599 wp->flags = flags;
600
2dc9f411 601 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
602 if (flags & BP_GDB) {
603 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
604 } else {
605 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
606 }
6658ffb8 607
31b030d4 608 tlb_flush_page(cpu, addr);
a1d1bb31
AL
609
610 if (watchpoint)
611 *watchpoint = wp;
612 return 0;
6658ffb8
PB
613}
614
a1d1bb31 615/* Remove a specific watchpoint. */
75a34036 616int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 617 int flags)
6658ffb8 618{
75a34036 619 vaddr len_mask = ~(len - 1);
a1d1bb31 620 CPUWatchpoint *wp;
6658ffb8 621
ff4700b0 622 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 623 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 624 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 625 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
626 return 0;
627 }
628 }
a1d1bb31 629 return -ENOENT;
6658ffb8
PB
630}
631
a1d1bb31 632/* Remove a specific watchpoint by reference. */
75a34036 633void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 634{
ff4700b0 635 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 636
31b030d4 637 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 638
7267c094 639 g_free(watchpoint);
a1d1bb31
AL
640}
641
642/* Remove all matching watchpoints. */
75a34036 643void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 644{
c0ce998e 645 CPUWatchpoint *wp, *next;
a1d1bb31 646
ff4700b0 647 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
648 if (wp->flags & mask) {
649 cpu_watchpoint_remove_by_ref(cpu, wp);
650 }
c0ce998e 651 }
7d03f82f 652}
c527ee8f 653#endif
7d03f82f 654
a1d1bb31 655/* Add a breakpoint. */
b3310ab3 656int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 657 CPUBreakpoint **breakpoint)
4c3a88a2 658{
1fddef4b 659#if defined(TARGET_HAS_ICE)
c0ce998e 660 CPUBreakpoint *bp;
3b46e624 661
7267c094 662 bp = g_malloc(sizeof(*bp));
4c3a88a2 663
a1d1bb31
AL
664 bp->pc = pc;
665 bp->flags = flags;
666
2dc9f411 667 /* keep all GDB-injected breakpoints in front */
00b941e5 668 if (flags & BP_GDB) {
f0c3c505 669 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 670 } else {
f0c3c505 671 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 672 }
3b46e624 673
f0c3c505 674 breakpoint_invalidate(cpu, pc);
a1d1bb31 675
00b941e5 676 if (breakpoint) {
a1d1bb31 677 *breakpoint = bp;
00b941e5 678 }
4c3a88a2
FB
679 return 0;
680#else
a1d1bb31 681 return -ENOSYS;
4c3a88a2
FB
682#endif
683}
684
a1d1bb31 685/* Remove a specific breakpoint. */
b3310ab3 686int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 687{
7d03f82f 688#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
689 CPUBreakpoint *bp;
690
f0c3c505 691 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 692 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 693 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
694 return 0;
695 }
7d03f82f 696 }
a1d1bb31
AL
697 return -ENOENT;
698#else
699 return -ENOSYS;
7d03f82f
EI
700#endif
701}
702
a1d1bb31 703/* Remove a specific breakpoint by reference. */
b3310ab3 704void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 705{
1fddef4b 706#if defined(TARGET_HAS_ICE)
f0c3c505
AF
707 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
708
709 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 710
7267c094 711 g_free(breakpoint);
a1d1bb31
AL
712#endif
713}
714
715/* Remove all matching breakpoints. */
b3310ab3 716void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
717{
718#if defined(TARGET_HAS_ICE)
c0ce998e 719 CPUBreakpoint *bp, *next;
a1d1bb31 720
f0c3c505 721 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
722 if (bp->flags & mask) {
723 cpu_breakpoint_remove_by_ref(cpu, bp);
724 }
c0ce998e 725 }
4c3a88a2
FB
726#endif
727}
728
c33a346e
FB
729/* enable or disable single step mode. EXCP_DEBUG is returned by the
730 CPU loop after each instruction */
3825b28f 731void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 732{
1fddef4b 733#if defined(TARGET_HAS_ICE)
ed2803da
AF
734 if (cpu->singlestep_enabled != enabled) {
735 cpu->singlestep_enabled = enabled;
736 if (kvm_enabled()) {
38e478ec 737 kvm_update_guest_debug(cpu, 0);
ed2803da 738 } else {
ccbb4d44 739 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 740 /* XXX: only flush what is necessary */
38e478ec 741 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
742 tb_flush(env);
743 }
c33a346e
FB
744 }
745#endif
746}
747
a47dddd7 748void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
749{
750 va_list ap;
493ae1f0 751 va_list ap2;
7501267e
FB
752
753 va_start(ap, fmt);
493ae1f0 754 va_copy(ap2, ap);
7501267e
FB
755 fprintf(stderr, "qemu: fatal: ");
756 vfprintf(stderr, fmt, ap);
757 fprintf(stderr, "\n");
878096ee 758 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
759 if (qemu_log_enabled()) {
760 qemu_log("qemu: fatal: ");
761 qemu_log_vprintf(fmt, ap2);
762 qemu_log("\n");
a0762859 763 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 764 qemu_log_flush();
93fcfe39 765 qemu_log_close();
924edcae 766 }
493ae1f0 767 va_end(ap2);
f9373291 768 va_end(ap);
fd052bf6
RV
769#if defined(CONFIG_USER_ONLY)
770 {
771 struct sigaction act;
772 sigfillset(&act.sa_mask);
773 act.sa_handler = SIG_DFL;
774 sigaction(SIGABRT, &act, NULL);
775 }
776#endif
7501267e
FB
777 abort();
778}
779
0124311e 780#if !defined(CONFIG_USER_ONLY)
041603fe
PB
781static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
782{
783 RAMBlock *block;
784
785 /* The list is protected by the iothread lock here. */
786 block = ram_list.mru_block;
787 if (block && addr - block->offset < block->length) {
788 goto found;
789 }
790 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
791 if (addr - block->offset < block->length) {
792 goto found;
793 }
794 }
795
796 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
797 abort();
798
799found:
800 ram_list.mru_block = block;
801 return block;
802}
803
a2f4d5be 804static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 805{
041603fe 806 ram_addr_t start1;
a2f4d5be
JQ
807 RAMBlock *block;
808 ram_addr_t end;
809
810 end = TARGET_PAGE_ALIGN(start + length);
811 start &= TARGET_PAGE_MASK;
d24981d3 812
041603fe
PB
813 block = qemu_get_ram_block(start);
814 assert(block == qemu_get_ram_block(end - 1));
815 start1 = (uintptr_t)block->host + (start - block->offset);
816 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
817}
818
5579c7f3 819/* Note: start and end must be within the same ram block. */
a2f4d5be 820void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 821 unsigned client)
1ccde1cb 822{
1ccde1cb
FB
823 if (length == 0)
824 return;
ace694cc 825 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 826
d24981d3 827 if (tcg_enabled()) {
a2f4d5be 828 tlb_reset_dirty_range_all(start, length);
5579c7f3 829 }
1ccde1cb
FB
830}
831
981fdf23 832static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
833{
834 in_migration = enable;
74576198
AL
835}
836
bb0e627a 837hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
838 MemoryRegionSection *section,
839 target_ulong vaddr,
840 hwaddr paddr, hwaddr xlat,
841 int prot,
842 target_ulong *address)
e5548617 843{
a8170e5e 844 hwaddr iotlb;
e5548617
BS
845 CPUWatchpoint *wp;
846
cc5bea60 847 if (memory_region_is_ram(section->mr)) {
e5548617
BS
848 /* Normal RAM. */
849 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 850 + xlat;
e5548617 851 if (!section->readonly) {
b41aac4f 852 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 853 } else {
b41aac4f 854 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
855 }
856 } else {
1b3fb98f 857 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 858 iotlb += xlat;
e5548617
BS
859 }
860
861 /* Make accesses to pages with watchpoints go via the
862 watchpoint trap routines. */
ff4700b0 863 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
864 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
865 /* Avoid trapping reads of pages with a write breakpoint. */
866 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 867 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
868 *address |= TLB_MMIO;
869 break;
870 }
871 }
872 }
873
874 return iotlb;
875}
9fa3e853
FB
876#endif /* defined(CONFIG_USER_ONLY) */
877
e2eef170 878#if !defined(CONFIG_USER_ONLY)
8da3ff18 879
c227f099 880static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 881 uint16_t section);
acc9d80b 882static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 883
575ddeb4 884static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
885
886/*
887 * Set a custom physical guest memory alloator.
888 * Accelerators with unusual needs may need this. Hopefully, we can
889 * get rid of it eventually.
890 */
575ddeb4 891void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
892{
893 phys_mem_alloc = alloc;
894}
895
53cb28cb
MA
896static uint16_t phys_section_add(PhysPageMap *map,
897 MemoryRegionSection *section)
5312bd8b 898{
68f3f65b
PB
899 /* The physical section number is ORed with a page-aligned
900 * pointer to produce the iotlb entries. Thus it should
901 * never overflow into the page-aligned value.
902 */
53cb28cb 903 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 904
53cb28cb
MA
905 if (map->sections_nb == map->sections_nb_alloc) {
906 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
907 map->sections = g_renew(MemoryRegionSection, map->sections,
908 map->sections_nb_alloc);
5312bd8b 909 }
53cb28cb 910 map->sections[map->sections_nb] = *section;
dfde4e6e 911 memory_region_ref(section->mr);
53cb28cb 912 return map->sections_nb++;
5312bd8b
AK
913}
914
058bc4b5
PB
915static void phys_section_destroy(MemoryRegion *mr)
916{
dfde4e6e
PB
917 memory_region_unref(mr);
918
058bc4b5
PB
919 if (mr->subpage) {
920 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 921 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
922 g_free(subpage);
923 }
924}
925
6092666e 926static void phys_sections_free(PhysPageMap *map)
5312bd8b 927{
9affd6fc
PB
928 while (map->sections_nb > 0) {
929 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
930 phys_section_destroy(section->mr);
931 }
9affd6fc
PB
932 g_free(map->sections);
933 g_free(map->nodes);
5312bd8b
AK
934}
935
ac1970fb 936static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
937{
938 subpage_t *subpage;
a8170e5e 939 hwaddr base = section->offset_within_address_space
0f0cb164 940 & TARGET_PAGE_MASK;
97115a8d 941 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 942 d->map.nodes, d->map.sections);
0f0cb164
AK
943 MemoryRegionSection subsection = {
944 .offset_within_address_space = base,
052e87b0 945 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 946 };
a8170e5e 947 hwaddr start, end;
0f0cb164 948
f3705d53 949 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 950
f3705d53 951 if (!(existing->mr->subpage)) {
acc9d80b 952 subpage = subpage_init(d->as, base);
3be91e86 953 subsection.address_space = d->as;
0f0cb164 954 subsection.mr = &subpage->iomem;
ac1970fb 955 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 956 phys_section_add(&d->map, &subsection));
0f0cb164 957 } else {
f3705d53 958 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
959 }
960 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 961 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
962 subpage_register(subpage, start, end,
963 phys_section_add(&d->map, section));
0f0cb164
AK
964}
965
966
052e87b0
PB
967static void register_multipage(AddressSpaceDispatch *d,
968 MemoryRegionSection *section)
33417e70 969{
a8170e5e 970 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 971 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
972 uint64_t num_pages = int128_get64(int128_rshift(section->size,
973 TARGET_PAGE_BITS));
dd81124b 974
733d5ef5
PB
975 assert(num_pages);
976 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
977}
978
ac1970fb 979static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 980{
89ae337a 981 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 982 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 983 MemoryRegionSection now = *section, remain = *section;
052e87b0 984 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 985
733d5ef5
PB
986 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
987 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
988 - now.offset_within_address_space;
989
052e87b0 990 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 991 register_subpage(d, &now);
733d5ef5 992 } else {
052e87b0 993 now.size = int128_zero();
733d5ef5 994 }
052e87b0
PB
995 while (int128_ne(remain.size, now.size)) {
996 remain.size = int128_sub(remain.size, now.size);
997 remain.offset_within_address_space += int128_get64(now.size);
998 remain.offset_within_region += int128_get64(now.size);
69b67646 999 now = remain;
052e87b0 1000 if (int128_lt(remain.size, page_size)) {
733d5ef5 1001 register_subpage(d, &now);
88266249 1002 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1003 now.size = page_size;
ac1970fb 1004 register_subpage(d, &now);
69b67646 1005 } else {
052e87b0 1006 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1007 register_multipage(d, &now);
69b67646 1008 }
0f0cb164
AK
1009 }
1010}
1011
62a2744c
SY
1012void qemu_flush_coalesced_mmio_buffer(void)
1013{
1014 if (kvm_enabled())
1015 kvm_flush_coalesced_mmio_buffer();
1016}
1017
b2a8658e
UD
1018void qemu_mutex_lock_ramlist(void)
1019{
1020 qemu_mutex_lock(&ram_list.mutex);
1021}
1022
1023void qemu_mutex_unlock_ramlist(void)
1024{
1025 qemu_mutex_unlock(&ram_list.mutex);
1026}
1027
e1e84ba0 1028#ifdef __linux__
c902760f
MT
1029
1030#include <sys/vfs.h>
1031
1032#define HUGETLBFS_MAGIC 0x958458f6
1033
1034static long gethugepagesize(const char *path)
1035{
1036 struct statfs fs;
1037 int ret;
1038
1039 do {
9742bf26 1040 ret = statfs(path, &fs);
c902760f
MT
1041 } while (ret != 0 && errno == EINTR);
1042
1043 if (ret != 0) {
9742bf26
YT
1044 perror(path);
1045 return 0;
c902760f
MT
1046 }
1047
1048 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1049 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1050
1051 return fs.f_bsize;
1052}
1053
04b16653
AW
1054static void *file_ram_alloc(RAMBlock *block,
1055 ram_addr_t memory,
7f56e740
PB
1056 const char *path,
1057 Error **errp)
c902760f
MT
1058{
1059 char *filename;
8ca761f6
PF
1060 char *sanitized_name;
1061 char *c;
c902760f
MT
1062 void *area;
1063 int fd;
c902760f
MT
1064 unsigned long hpagesize;
1065
1066 hpagesize = gethugepagesize(path);
1067 if (!hpagesize) {
f9a49dfa 1068 goto error;
c902760f
MT
1069 }
1070
1071 if (memory < hpagesize) {
1072 return NULL;
1073 }
1074
1075 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1076 error_setg(errp,
1077 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1078 goto error;
c902760f
MT
1079 }
1080
8ca761f6 1081 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1082 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1083 for (c = sanitized_name; *c != '\0'; c++) {
1084 if (*c == '/')
1085 *c = '_';
1086 }
1087
1088 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1089 sanitized_name);
1090 g_free(sanitized_name);
c902760f
MT
1091
1092 fd = mkstemp(filename);
1093 if (fd < 0) {
7f56e740
PB
1094 error_setg_errno(errp, errno,
1095 "unable to create backing store for hugepages");
e4ada482 1096 g_free(filename);
f9a49dfa 1097 goto error;
c902760f
MT
1098 }
1099 unlink(filename);
e4ada482 1100 g_free(filename);
c902760f
MT
1101
1102 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1103
1104 /*
1105 * ftruncate is not supported by hugetlbfs in older
1106 * hosts, so don't bother bailing out on errors.
1107 * If anything goes wrong with it under other filesystems,
1108 * mmap will fail.
1109 */
7f56e740 1110 if (ftruncate(fd, memory)) {
9742bf26 1111 perror("ftruncate");
7f56e740 1112 }
c902760f 1113
dbcb8981
PB
1114 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1115 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1116 fd, 0);
c902760f 1117 if (area == MAP_FAILED) {
7f56e740
PB
1118 error_setg_errno(errp, errno,
1119 "unable to map backing store for hugepages");
9742bf26 1120 close(fd);
f9a49dfa 1121 goto error;
c902760f 1122 }
ef36fa14
MT
1123
1124 if (mem_prealloc) {
38183310 1125 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1126 }
1127
04b16653 1128 block->fd = fd;
c902760f 1129 return area;
f9a49dfa
MT
1130
1131error:
1132 if (mem_prealloc) {
1133 exit(1);
1134 }
1135 return NULL;
c902760f
MT
1136}
1137#endif
1138
d17b5288 1139static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1140{
1141 RAMBlock *block, *next_block;
3e837b2c 1142 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1143
49cd9ac6
SH
1144 assert(size != 0); /* it would hand out same offset multiple times */
1145
a3161038 1146 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1147 return 0;
1148
a3161038 1149 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1150 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1151
1152 end = block->offset + block->length;
1153
a3161038 1154 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1155 if (next_block->offset >= end) {
1156 next = MIN(next, next_block->offset);
1157 }
1158 }
1159 if (next - end >= size && next - end < mingap) {
3e837b2c 1160 offset = end;
04b16653
AW
1161 mingap = next - end;
1162 }
1163 }
3e837b2c
AW
1164
1165 if (offset == RAM_ADDR_MAX) {
1166 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1167 (uint64_t)size);
1168 abort();
1169 }
1170
04b16653
AW
1171 return offset;
1172}
1173
652d7ec2 1174ram_addr_t last_ram_offset(void)
d17b5288
AW
1175{
1176 RAMBlock *block;
1177 ram_addr_t last = 0;
1178
a3161038 1179 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1180 last = MAX(last, block->offset + block->length);
1181
1182 return last;
1183}
1184
ddb97f1d
JB
1185static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1186{
1187 int ret;
ddb97f1d
JB
1188
1189 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1190 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1191 "dump-guest-core", true)) {
ddb97f1d
JB
1192 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1193 if (ret) {
1194 perror("qemu_madvise");
1195 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1196 "but dump_guest_core=off specified\n");
1197 }
1198 }
1199}
1200
20cfe881 1201static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1202{
20cfe881 1203 RAMBlock *block;
84b89d78 1204
a3161038 1205 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1206 if (block->offset == addr) {
20cfe881 1207 return block;
c5705a77
AK
1208 }
1209 }
20cfe881
HT
1210
1211 return NULL;
1212}
1213
1214void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1215{
1216 RAMBlock *new_block = find_ram_block(addr);
1217 RAMBlock *block;
1218
c5705a77
AK
1219 assert(new_block);
1220 assert(!new_block->idstr[0]);
84b89d78 1221
09e5ab63
AL
1222 if (dev) {
1223 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1224 if (id) {
1225 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1226 g_free(id);
84b89d78
CM
1227 }
1228 }
1229 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1230
b2a8658e
UD
1231 /* This assumes the iothread lock is taken here too. */
1232 qemu_mutex_lock_ramlist();
a3161038 1233 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1234 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1235 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1236 new_block->idstr);
1237 abort();
1238 }
1239 }
b2a8658e 1240 qemu_mutex_unlock_ramlist();
c5705a77
AK
1241}
1242
20cfe881
HT
1243void qemu_ram_unset_idstr(ram_addr_t addr)
1244{
1245 RAMBlock *block = find_ram_block(addr);
1246
1247 if (block) {
1248 memset(block->idstr, 0, sizeof(block->idstr));
1249 }
1250}
1251
8490fc78
LC
1252static int memory_try_enable_merging(void *addr, size_t len)
1253{
2ff3de68 1254 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1255 /* disabled by the user */
1256 return 0;
1257 }
1258
1259 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1260}
1261
e1c57ab8 1262static ram_addr_t ram_block_add(RAMBlock *new_block)
c5705a77 1263{
e1c57ab8 1264 RAMBlock *block;
2152f5ca
JQ
1265 ram_addr_t old_ram_size, new_ram_size;
1266
1267 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1268
b2a8658e
UD
1269 /* This assumes the iothread lock is taken here too. */
1270 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1271 new_block->offset = find_ram_offset(new_block->length);
1272
1273 if (!new_block->host) {
1274 if (xen_enabled()) {
1275 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1276 } else {
1277 new_block->host = phys_mem_alloc(new_block->length);
39228250
MA
1278 if (!new_block->host) {
1279 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
83234bf2 1280 memory_region_name(new_block->mr), strerror(errno));
39228250
MA
1281 exit(1);
1282 }
e1c57ab8 1283 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1284 }
c902760f 1285 }
94a6b54f 1286
abb26d63
PB
1287 /* Keep the list sorted from biggest to smallest block. */
1288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1289 if (block->length < new_block->length) {
1290 break;
1291 }
1292 }
1293 if (block) {
1294 QTAILQ_INSERT_BEFORE(block, new_block, next);
1295 } else {
1296 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1297 }
0d6d3c87 1298 ram_list.mru_block = NULL;
94a6b54f 1299
f798b07f 1300 ram_list.version++;
b2a8658e 1301 qemu_mutex_unlock_ramlist();
f798b07f 1302
2152f5ca
JQ
1303 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1304
1305 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1306 int i;
1307 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1308 ram_list.dirty_memory[i] =
1309 bitmap_zero_extend(ram_list.dirty_memory[i],
1310 old_ram_size, new_ram_size);
1311 }
2152f5ca 1312 }
e1c57ab8 1313 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1314
e1c57ab8
PB
1315 qemu_ram_setup_dump(new_block->host, new_block->length);
1316 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1317 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1318
e1c57ab8
PB
1319 if (kvm_enabled()) {
1320 kvm_setup_guest_memory(new_block->host, new_block->length);
1321 }
6f0437e8 1322
94a6b54f
PB
1323 return new_block->offset;
1324}
e9a1ab19 1325
0b183fc8 1326#ifdef __linux__
e1c57ab8 1327ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1328 bool share, const char *mem_path,
7f56e740 1329 Error **errp)
e1c57ab8
PB
1330{
1331 RAMBlock *new_block;
1332
1333 if (xen_enabled()) {
7f56e740
PB
1334 error_setg(errp, "-mem-path not supported with Xen");
1335 return -1;
e1c57ab8
PB
1336 }
1337
1338 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1339 /*
1340 * file_ram_alloc() needs to allocate just like
1341 * phys_mem_alloc, but we haven't bothered to provide
1342 * a hook there.
1343 */
7f56e740
PB
1344 error_setg(errp,
1345 "-mem-path not supported with this accelerator");
1346 return -1;
e1c57ab8
PB
1347 }
1348
1349 size = TARGET_PAGE_ALIGN(size);
1350 new_block = g_malloc0(sizeof(*new_block));
1351 new_block->mr = mr;
1352 new_block->length = size;
dbcb8981 1353 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1354 new_block->host = file_ram_alloc(new_block, size,
1355 mem_path, errp);
1356 if (!new_block->host) {
1357 g_free(new_block);
1358 return -1;
1359 }
1360
e1c57ab8
PB
1361 return ram_block_add(new_block);
1362}
0b183fc8 1363#endif
e1c57ab8
PB
1364
1365ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1366 MemoryRegion *mr)
1367{
1368 RAMBlock *new_block;
1369
1370 size = TARGET_PAGE_ALIGN(size);
1371 new_block = g_malloc0(sizeof(*new_block));
1372 new_block->mr = mr;
1373 new_block->length = size;
1374 new_block->fd = -1;
1375 new_block->host = host;
1376 if (host) {
7bd4f430 1377 new_block->flags |= RAM_PREALLOC;
e1c57ab8
PB
1378 }
1379 return ram_block_add(new_block);
1380}
1381
c5705a77 1382ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1383{
c5705a77 1384 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1385}
1386
1f2e98b6
AW
1387void qemu_ram_free_from_ptr(ram_addr_t addr)
1388{
1389 RAMBlock *block;
1390
b2a8658e
UD
1391 /* This assumes the iothread lock is taken here too. */
1392 qemu_mutex_lock_ramlist();
a3161038 1393 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1394 if (addr == block->offset) {
a3161038 1395 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1396 ram_list.mru_block = NULL;
f798b07f 1397 ram_list.version++;
7267c094 1398 g_free(block);
b2a8658e 1399 break;
1f2e98b6
AW
1400 }
1401 }
b2a8658e 1402 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1403}
1404
c227f099 1405void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1406{
04b16653
AW
1407 RAMBlock *block;
1408
b2a8658e
UD
1409 /* This assumes the iothread lock is taken here too. */
1410 qemu_mutex_lock_ramlist();
a3161038 1411 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1412 if (addr == block->offset) {
a3161038 1413 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1414 ram_list.mru_block = NULL;
f798b07f 1415 ram_list.version++;
7bd4f430 1416 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1417 ;
dfeaf2ab
MA
1418 } else if (xen_enabled()) {
1419 xen_invalidate_map_cache_entry(block->host);
089f3f76 1420#ifndef _WIN32
3435f395
MA
1421 } else if (block->fd >= 0) {
1422 munmap(block->host, block->length);
1423 close(block->fd);
089f3f76 1424#endif
04b16653 1425 } else {
dfeaf2ab 1426 qemu_anon_ram_free(block->host, block->length);
04b16653 1427 }
7267c094 1428 g_free(block);
b2a8658e 1429 break;
04b16653
AW
1430 }
1431 }
b2a8658e 1432 qemu_mutex_unlock_ramlist();
04b16653 1433
e9a1ab19
FB
1434}
1435
cd19cfa2
HY
1436#ifndef _WIN32
1437void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1438{
1439 RAMBlock *block;
1440 ram_addr_t offset;
1441 int flags;
1442 void *area, *vaddr;
1443
a3161038 1444 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1445 offset = addr - block->offset;
1446 if (offset < block->length) {
1447 vaddr = block->host + offset;
7bd4f430 1448 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1449 ;
dfeaf2ab
MA
1450 } else if (xen_enabled()) {
1451 abort();
cd19cfa2
HY
1452 } else {
1453 flags = MAP_FIXED;
1454 munmap(vaddr, length);
3435f395 1455 if (block->fd >= 0) {
dbcb8981
PB
1456 flags |= (block->flags & RAM_SHARED ?
1457 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1458 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1459 flags, block->fd, offset);
cd19cfa2 1460 } else {
2eb9fbaa
MA
1461 /*
1462 * Remap needs to match alloc. Accelerators that
1463 * set phys_mem_alloc never remap. If they did,
1464 * we'd need a remap hook here.
1465 */
1466 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1467
cd19cfa2
HY
1468 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1469 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1470 flags, -1, 0);
cd19cfa2
HY
1471 }
1472 if (area != vaddr) {
f15fbc4b
AP
1473 fprintf(stderr, "Could not remap addr: "
1474 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1475 length, addr);
1476 exit(1);
1477 }
8490fc78 1478 memory_try_enable_merging(vaddr, length);
ddb97f1d 1479 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1480 }
1481 return;
1482 }
1483 }
1484}
1485#endif /* !_WIN32 */
1486
a35ba7be
PB
1487int qemu_get_ram_fd(ram_addr_t addr)
1488{
1489 RAMBlock *block = qemu_get_ram_block(addr);
1490
1491 return block->fd;
1492}
1493
3fd74b84
DM
1494void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1495{
1496 RAMBlock *block = qemu_get_ram_block(addr);
1497
1498 return block->host;
1499}
1500
1b5ec234
PB
1501/* Return a host pointer to ram allocated with qemu_ram_alloc.
1502 With the exception of the softmmu code in this file, this should
1503 only be used for local memory (e.g. video ram) that the device owns,
1504 and knows it isn't going to access beyond the end of the block.
1505
1506 It should not be used for general purpose DMA.
1507 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1508 */
1509void *qemu_get_ram_ptr(ram_addr_t addr)
1510{
1511 RAMBlock *block = qemu_get_ram_block(addr);
1512
0d6d3c87
PB
1513 if (xen_enabled()) {
1514 /* We need to check if the requested address is in the RAM
1515 * because we don't want to map the entire memory in QEMU.
1516 * In that case just map until the end of the page.
1517 */
1518 if (block->offset == 0) {
1519 return xen_map_cache(addr, 0, 0);
1520 } else if (block->host == NULL) {
1521 block->host =
1522 xen_map_cache(block->offset, block->length, 1);
1523 }
1524 }
1525 return block->host + (addr - block->offset);
dc828ca1
PB
1526}
1527
38bee5dc
SS
1528/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1529 * but takes a size argument */
cb85f7ab 1530static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1531{
8ab934f9
SS
1532 if (*size == 0) {
1533 return NULL;
1534 }
868bb33f 1535 if (xen_enabled()) {
e41d7c69 1536 return xen_map_cache(addr, *size, 1);
868bb33f 1537 } else {
38bee5dc
SS
1538 RAMBlock *block;
1539
a3161038 1540 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1541 if (addr - block->offset < block->length) {
1542 if (addr - block->offset + *size > block->length)
1543 *size = block->length - addr + block->offset;
1544 return block->host + (addr - block->offset);
1545 }
1546 }
1547
1548 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1549 abort();
38bee5dc
SS
1550 }
1551}
1552
7443b437
PB
1553/* Some of the softmmu routines need to translate from a host pointer
1554 (typically a TLB entry) back to a ram offset. */
1b5ec234 1555MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1556{
94a6b54f
PB
1557 RAMBlock *block;
1558 uint8_t *host = ptr;
1559
868bb33f 1560 if (xen_enabled()) {
e41d7c69 1561 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1562 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1563 }
1564
23887b79
PB
1565 block = ram_list.mru_block;
1566 if (block && block->host && host - block->host < block->length) {
1567 goto found;
1568 }
1569
a3161038 1570 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1571 /* This case append when the block is not mapped. */
1572 if (block->host == NULL) {
1573 continue;
1574 }
f471a17e 1575 if (host - block->host < block->length) {
23887b79 1576 goto found;
f471a17e 1577 }
94a6b54f 1578 }
432d268c 1579
1b5ec234 1580 return NULL;
23887b79
PB
1581
1582found:
1583 *ram_addr = block->offset + (host - block->host);
1b5ec234 1584 return block->mr;
e890261f 1585}
f471a17e 1586
a8170e5e 1587static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1588 uint64_t val, unsigned size)
9fa3e853 1589{
52159192 1590 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1591 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1592 }
0e0df1e2
AK
1593 switch (size) {
1594 case 1:
1595 stb_p(qemu_get_ram_ptr(ram_addr), val);
1596 break;
1597 case 2:
1598 stw_p(qemu_get_ram_ptr(ram_addr), val);
1599 break;
1600 case 4:
1601 stl_p(qemu_get_ram_ptr(ram_addr), val);
1602 break;
1603 default:
1604 abort();
3a7d929e 1605 }
6886867e 1606 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1607 /* we remove the notdirty callback only if the code has been
1608 flushed */
a2cd8c85 1609 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1610 CPUArchState *env = current_cpu->env_ptr;
93afeade 1611 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1612 }
9fa3e853
FB
1613}
1614
b018ddf6
PB
1615static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1616 unsigned size, bool is_write)
1617{
1618 return is_write;
1619}
1620
0e0df1e2 1621static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1622 .write = notdirty_mem_write,
b018ddf6 1623 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1624 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1625};
1626
0f459d16 1627/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1628static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1629{
93afeade
AF
1630 CPUState *cpu = current_cpu;
1631 CPUArchState *env = cpu->env_ptr;
06d55cc1 1632 target_ulong pc, cs_base;
0f459d16 1633 target_ulong vaddr;
a1d1bb31 1634 CPUWatchpoint *wp;
06d55cc1 1635 int cpu_flags;
0f459d16 1636
ff4700b0 1637 if (cpu->watchpoint_hit) {
06d55cc1
AL
1638 /* We re-entered the check after replacing the TB. Now raise
1639 * the debug interrupt so that is will trigger after the
1640 * current instruction. */
93afeade 1641 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1642 return;
1643 }
93afeade 1644 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1645 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1646 if ((vaddr == (wp->vaddr & len_mask) ||
1647 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1648 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1649 if (!cpu->watchpoint_hit) {
1650 cpu->watchpoint_hit = wp;
239c51a5 1651 tb_check_watchpoint(cpu);
6e140f28 1652 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1653 cpu->exception_index = EXCP_DEBUG;
5638d180 1654 cpu_loop_exit(cpu);
6e140f28
AL
1655 } else {
1656 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1657 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1658 cpu_resume_from_signal(cpu, NULL);
6e140f28 1659 }
06d55cc1 1660 }
6e140f28
AL
1661 } else {
1662 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1663 }
1664 }
1665}
1666
6658ffb8
PB
1667/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1668 so these check for a hit then pass through to the normal out-of-line
1669 phys routines. */
a8170e5e 1670static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1671 unsigned size)
6658ffb8 1672{
1ec9b909
AK
1673 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1674 switch (size) {
2c17449b 1675 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1676 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1677 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1678 default: abort();
1679 }
6658ffb8
PB
1680}
1681
a8170e5e 1682static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1683 uint64_t val, unsigned size)
6658ffb8 1684{
1ec9b909
AK
1685 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1686 switch (size) {
67364150 1687 case 1:
db3be60d 1688 stb_phys(&address_space_memory, addr, val);
67364150
MF
1689 break;
1690 case 2:
5ce5944d 1691 stw_phys(&address_space_memory, addr, val);
67364150
MF
1692 break;
1693 case 4:
ab1da857 1694 stl_phys(&address_space_memory, addr, val);
67364150 1695 break;
1ec9b909
AK
1696 default: abort();
1697 }
6658ffb8
PB
1698}
1699
1ec9b909
AK
1700static const MemoryRegionOps watch_mem_ops = {
1701 .read = watch_mem_read,
1702 .write = watch_mem_write,
1703 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1704};
6658ffb8 1705
a8170e5e 1706static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1707 unsigned len)
db7b5426 1708{
acc9d80b
JK
1709 subpage_t *subpage = opaque;
1710 uint8_t buf[4];
791af8c8 1711
db7b5426 1712#if defined(DEBUG_SUBPAGE)
016e9d62 1713 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1714 subpage, len, addr);
db7b5426 1715#endif
acc9d80b
JK
1716 address_space_read(subpage->as, addr + subpage->base, buf, len);
1717 switch (len) {
1718 case 1:
1719 return ldub_p(buf);
1720 case 2:
1721 return lduw_p(buf);
1722 case 4:
1723 return ldl_p(buf);
1724 default:
1725 abort();
1726 }
db7b5426
BS
1727}
1728
a8170e5e 1729static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1730 uint64_t value, unsigned len)
db7b5426 1731{
acc9d80b
JK
1732 subpage_t *subpage = opaque;
1733 uint8_t buf[4];
1734
db7b5426 1735#if defined(DEBUG_SUBPAGE)
016e9d62 1736 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1737 " value %"PRIx64"\n",
1738 __func__, subpage, len, addr, value);
db7b5426 1739#endif
acc9d80b
JK
1740 switch (len) {
1741 case 1:
1742 stb_p(buf, value);
1743 break;
1744 case 2:
1745 stw_p(buf, value);
1746 break;
1747 case 4:
1748 stl_p(buf, value);
1749 break;
1750 default:
1751 abort();
1752 }
1753 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1754}
1755
c353e4cc 1756static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1757 unsigned len, bool is_write)
c353e4cc 1758{
acc9d80b 1759 subpage_t *subpage = opaque;
c353e4cc 1760#if defined(DEBUG_SUBPAGE)
016e9d62 1761 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1762 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1763#endif
1764
acc9d80b 1765 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1766 len, is_write);
c353e4cc
PB
1767}
1768
70c68e44
AK
1769static const MemoryRegionOps subpage_ops = {
1770 .read = subpage_read,
1771 .write = subpage_write,
c353e4cc 1772 .valid.accepts = subpage_accepts,
70c68e44 1773 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1774};
1775
c227f099 1776static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1777 uint16_t section)
db7b5426
BS
1778{
1779 int idx, eidx;
1780
1781 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1782 return -1;
1783 idx = SUBPAGE_IDX(start);
1784 eidx = SUBPAGE_IDX(end);
1785#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1786 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1787 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1788#endif
db7b5426 1789 for (; idx <= eidx; idx++) {
5312bd8b 1790 mmio->sub_section[idx] = section;
db7b5426
BS
1791 }
1792
1793 return 0;
1794}
1795
acc9d80b 1796static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1797{
c227f099 1798 subpage_t *mmio;
db7b5426 1799
7267c094 1800 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1801
acc9d80b 1802 mmio->as = as;
1eec614b 1803 mmio->base = base;
2c9b15ca 1804 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1805 NULL, TARGET_PAGE_SIZE);
b3b00c78 1806 mmio->iomem.subpage = true;
db7b5426 1807#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1808 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1809 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1810#endif
b41aac4f 1811 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1812
1813 return mmio;
1814}
1815
a656e22f
PC
1816static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1817 MemoryRegion *mr)
5312bd8b 1818{
a656e22f 1819 assert(as);
5312bd8b 1820 MemoryRegionSection section = {
a656e22f 1821 .address_space = as,
5312bd8b
AK
1822 .mr = mr,
1823 .offset_within_address_space = 0,
1824 .offset_within_region = 0,
052e87b0 1825 .size = int128_2_64(),
5312bd8b
AK
1826 };
1827
53cb28cb 1828 return phys_section_add(map, &section);
5312bd8b
AK
1829}
1830
77717094 1831MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1832{
77717094 1833 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1834}
1835
e9179ce1
AK
1836static void io_mem_init(void)
1837{
1f6245e5 1838 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1839 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1840 NULL, UINT64_MAX);
2c9b15ca 1841 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1842 NULL, UINT64_MAX);
2c9b15ca 1843 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1844 NULL, UINT64_MAX);
e9179ce1
AK
1845}
1846
ac1970fb 1847static void mem_begin(MemoryListener *listener)
00752703
PB
1848{
1849 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1850 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1851 uint16_t n;
1852
a656e22f 1853 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1854 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1855 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1856 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1857 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1858 assert(n == PHYS_SECTION_ROM);
a656e22f 1859 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1860 assert(n == PHYS_SECTION_WATCH);
00752703 1861
9736e55b 1862 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1863 d->as = as;
1864 as->next_dispatch = d;
1865}
1866
1867static void mem_commit(MemoryListener *listener)
ac1970fb 1868{
89ae337a 1869 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1870 AddressSpaceDispatch *cur = as->dispatch;
1871 AddressSpaceDispatch *next = as->next_dispatch;
1872
53cb28cb 1873 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1874
0475d94f 1875 as->dispatch = next;
b41aac4f 1876
53cb28cb
MA
1877 if (cur) {
1878 phys_sections_free(&cur->map);
1879 g_free(cur);
1880 }
9affd6fc
PB
1881}
1882
1d71148e 1883static void tcg_commit(MemoryListener *listener)
50c1e149 1884{
182735ef 1885 CPUState *cpu;
117712c3
AK
1886
1887 /* since each CPU stores ram addresses in its TLB cache, we must
1888 reset the modified entries */
1889 /* XXX: slow ! */
bdc44640 1890 CPU_FOREACH(cpu) {
33bde2e1
EI
1891 /* FIXME: Disentangle the cpu.h circular files deps so we can
1892 directly get the right CPU from listener. */
1893 if (cpu->tcg_as_listener != listener) {
1894 continue;
1895 }
00c8cb0a 1896 tlb_flush(cpu, 1);
117712c3 1897 }
50c1e149
AK
1898}
1899
93632747
AK
1900static void core_log_global_start(MemoryListener *listener)
1901{
981fdf23 1902 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1903}
1904
1905static void core_log_global_stop(MemoryListener *listener)
1906{
981fdf23 1907 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1908}
1909
93632747 1910static MemoryListener core_memory_listener = {
93632747
AK
1911 .log_global_start = core_log_global_start,
1912 .log_global_stop = core_log_global_stop,
ac1970fb 1913 .priority = 1,
93632747
AK
1914};
1915
ac1970fb
AK
1916void address_space_init_dispatch(AddressSpace *as)
1917{
00752703 1918 as->dispatch = NULL;
89ae337a 1919 as->dispatch_listener = (MemoryListener) {
ac1970fb 1920 .begin = mem_begin,
00752703 1921 .commit = mem_commit,
ac1970fb
AK
1922 .region_add = mem_add,
1923 .region_nop = mem_add,
1924 .priority = 0,
1925 };
89ae337a 1926 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1927}
1928
83f3c251
AK
1929void address_space_destroy_dispatch(AddressSpace *as)
1930{
1931 AddressSpaceDispatch *d = as->dispatch;
1932
89ae337a 1933 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1934 g_free(d);
1935 as->dispatch = NULL;
1936}
1937
62152b8a
AK
1938static void memory_map_init(void)
1939{
7267c094 1940 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1941
57271d63 1942 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1943 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1944
7267c094 1945 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1946 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1947 65536);
7dca8043 1948 address_space_init(&address_space_io, system_io, "I/O");
93632747 1949
f6790af6 1950 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1951}
1952
1953MemoryRegion *get_system_memory(void)
1954{
1955 return system_memory;
1956}
1957
309cb471
AK
1958MemoryRegion *get_system_io(void)
1959{
1960 return system_io;
1961}
1962
e2eef170
PB
1963#endif /* !defined(CONFIG_USER_ONLY) */
1964
13eb76e0
FB
1965/* physical memory access (slow version, mainly for debug) */
1966#if defined(CONFIG_USER_ONLY)
f17ec444 1967int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1968 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1969{
1970 int l, flags;
1971 target_ulong page;
53a5960a 1972 void * p;
13eb76e0
FB
1973
1974 while (len > 0) {
1975 page = addr & TARGET_PAGE_MASK;
1976 l = (page + TARGET_PAGE_SIZE) - addr;
1977 if (l > len)
1978 l = len;
1979 flags = page_get_flags(page);
1980 if (!(flags & PAGE_VALID))
a68fe89c 1981 return -1;
13eb76e0
FB
1982 if (is_write) {
1983 if (!(flags & PAGE_WRITE))
a68fe89c 1984 return -1;
579a97f7 1985 /* XXX: this code should not depend on lock_user */
72fb7daa 1986 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1987 return -1;
72fb7daa
AJ
1988 memcpy(p, buf, l);
1989 unlock_user(p, addr, l);
13eb76e0
FB
1990 } else {
1991 if (!(flags & PAGE_READ))
a68fe89c 1992 return -1;
579a97f7 1993 /* XXX: this code should not depend on lock_user */
72fb7daa 1994 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1995 return -1;
72fb7daa 1996 memcpy(buf, p, l);
5b257578 1997 unlock_user(p, addr, 0);
13eb76e0
FB
1998 }
1999 len -= l;
2000 buf += l;
2001 addr += l;
2002 }
a68fe89c 2003 return 0;
13eb76e0 2004}
8df1cd07 2005
13eb76e0 2006#else
51d7a9eb 2007
a8170e5e
AK
2008static void invalidate_and_set_dirty(hwaddr addr,
2009 hwaddr length)
51d7a9eb 2010{
a2cd8c85 2011 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
2012 /* invalidate code */
2013 tb_invalidate_phys_page_range(addr, addr + length, 0);
2014 /* set dirty bit */
6886867e 2015 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2016 }
e226939d 2017 xen_modified_memory(addr, length);
51d7a9eb
AP
2018}
2019
23326164 2020static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2021{
e1622f4b 2022 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2023
2024 /* Regions are assumed to support 1-4 byte accesses unless
2025 otherwise specified. */
23326164
RH
2026 if (access_size_max == 0) {
2027 access_size_max = 4;
2028 }
2029
2030 /* Bound the maximum access by the alignment of the address. */
2031 if (!mr->ops->impl.unaligned) {
2032 unsigned align_size_max = addr & -addr;
2033 if (align_size_max != 0 && align_size_max < access_size_max) {
2034 access_size_max = align_size_max;
2035 }
82f2563f 2036 }
23326164
RH
2037
2038 /* Don't attempt accesses larger than the maximum. */
2039 if (l > access_size_max) {
2040 l = access_size_max;
82f2563f 2041 }
098178f2
PB
2042 if (l & (l - 1)) {
2043 l = 1 << (qemu_fls(l) - 1);
2044 }
23326164
RH
2045
2046 return l;
82f2563f
PB
2047}
2048
fd8aaa76 2049bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2050 int len, bool is_write)
13eb76e0 2051{
149f54b5 2052 hwaddr l;
13eb76e0 2053 uint8_t *ptr;
791af8c8 2054 uint64_t val;
149f54b5 2055 hwaddr addr1;
5c8a00ce 2056 MemoryRegion *mr;
fd8aaa76 2057 bool error = false;
3b46e624 2058
13eb76e0 2059 while (len > 0) {
149f54b5 2060 l = len;
5c8a00ce 2061 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2062
13eb76e0 2063 if (is_write) {
5c8a00ce
PB
2064 if (!memory_access_is_direct(mr, is_write)) {
2065 l = memory_access_size(mr, l, addr1);
4917cf44 2066 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2067 potential bugs */
23326164
RH
2068 switch (l) {
2069 case 8:
2070 /* 64 bit write access */
2071 val = ldq_p(buf);
2072 error |= io_mem_write(mr, addr1, val, 8);
2073 break;
2074 case 4:
1c213d19 2075 /* 32 bit write access */
c27004ec 2076 val = ldl_p(buf);
5c8a00ce 2077 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2078 break;
2079 case 2:
1c213d19 2080 /* 16 bit write access */
c27004ec 2081 val = lduw_p(buf);
5c8a00ce 2082 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2083 break;
2084 case 1:
1c213d19 2085 /* 8 bit write access */
c27004ec 2086 val = ldub_p(buf);
5c8a00ce 2087 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2088 break;
2089 default:
2090 abort();
13eb76e0 2091 }
2bbfa05d 2092 } else {
5c8a00ce 2093 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2094 /* RAM case */
5579c7f3 2095 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2096 memcpy(ptr, buf, l);
51d7a9eb 2097 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2098 }
2099 } else {
5c8a00ce 2100 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2101 /* I/O case */
5c8a00ce 2102 l = memory_access_size(mr, l, addr1);
23326164
RH
2103 switch (l) {
2104 case 8:
2105 /* 64 bit read access */
2106 error |= io_mem_read(mr, addr1, &val, 8);
2107 stq_p(buf, val);
2108 break;
2109 case 4:
13eb76e0 2110 /* 32 bit read access */
5c8a00ce 2111 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2112 stl_p(buf, val);
23326164
RH
2113 break;
2114 case 2:
13eb76e0 2115 /* 16 bit read access */
5c8a00ce 2116 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2117 stw_p(buf, val);
23326164
RH
2118 break;
2119 case 1:
1c213d19 2120 /* 8 bit read access */
5c8a00ce 2121 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2122 stb_p(buf, val);
23326164
RH
2123 break;
2124 default:
2125 abort();
13eb76e0
FB
2126 }
2127 } else {
2128 /* RAM case */
5c8a00ce 2129 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2130 memcpy(buf, ptr, l);
13eb76e0
FB
2131 }
2132 }
2133 len -= l;
2134 buf += l;
2135 addr += l;
2136 }
fd8aaa76
PB
2137
2138 return error;
13eb76e0 2139}
8df1cd07 2140
fd8aaa76 2141bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2142 const uint8_t *buf, int len)
2143{
fd8aaa76 2144 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2145}
2146
fd8aaa76 2147bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2148{
fd8aaa76 2149 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2150}
2151
2152
a8170e5e 2153void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2154 int len, int is_write)
2155{
fd8aaa76 2156 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2157}
2158
582b55a9
AG
2159enum write_rom_type {
2160 WRITE_DATA,
2161 FLUSH_CACHE,
2162};
2163
2a221651 2164static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2165 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2166{
149f54b5 2167 hwaddr l;
d0ecd2aa 2168 uint8_t *ptr;
149f54b5 2169 hwaddr addr1;
5c8a00ce 2170 MemoryRegion *mr;
3b46e624 2171
d0ecd2aa 2172 while (len > 0) {
149f54b5 2173 l = len;
2a221651 2174 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2175
5c8a00ce
PB
2176 if (!(memory_region_is_ram(mr) ||
2177 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2178 /* do nothing */
2179 } else {
5c8a00ce 2180 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2181 /* ROM/RAM case */
5579c7f3 2182 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2183 switch (type) {
2184 case WRITE_DATA:
2185 memcpy(ptr, buf, l);
2186 invalidate_and_set_dirty(addr1, l);
2187 break;
2188 case FLUSH_CACHE:
2189 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2190 break;
2191 }
d0ecd2aa
FB
2192 }
2193 len -= l;
2194 buf += l;
2195 addr += l;
2196 }
2197}
2198
582b55a9 2199/* used for ROM loading : can write in RAM and ROM */
2a221651 2200void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2201 const uint8_t *buf, int len)
2202{
2a221651 2203 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2204}
2205
2206void cpu_flush_icache_range(hwaddr start, int len)
2207{
2208 /*
2209 * This function should do the same thing as an icache flush that was
2210 * triggered from within the guest. For TCG we are always cache coherent,
2211 * so there is no need to flush anything. For KVM / Xen we need to flush
2212 * the host's instruction cache at least.
2213 */
2214 if (tcg_enabled()) {
2215 return;
2216 }
2217
2a221651
EI
2218 cpu_physical_memory_write_rom_internal(&address_space_memory,
2219 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2220}
2221
6d16c2f8 2222typedef struct {
d3e71559 2223 MemoryRegion *mr;
6d16c2f8 2224 void *buffer;
a8170e5e
AK
2225 hwaddr addr;
2226 hwaddr len;
6d16c2f8
AL
2227} BounceBuffer;
2228
2229static BounceBuffer bounce;
2230
ba223c29
AL
2231typedef struct MapClient {
2232 void *opaque;
2233 void (*callback)(void *opaque);
72cf2d4f 2234 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2235} MapClient;
2236
72cf2d4f
BS
2237static QLIST_HEAD(map_client_list, MapClient) map_client_list
2238 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2239
2240void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2241{
7267c094 2242 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2243
2244 client->opaque = opaque;
2245 client->callback = callback;
72cf2d4f 2246 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2247 return client;
2248}
2249
8b9c99d9 2250static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2251{
2252 MapClient *client = (MapClient *)_client;
2253
72cf2d4f 2254 QLIST_REMOVE(client, link);
7267c094 2255 g_free(client);
ba223c29
AL
2256}
2257
2258static void cpu_notify_map_clients(void)
2259{
2260 MapClient *client;
2261
72cf2d4f
BS
2262 while (!QLIST_EMPTY(&map_client_list)) {
2263 client = QLIST_FIRST(&map_client_list);
ba223c29 2264 client->callback(client->opaque);
34d5e948 2265 cpu_unregister_map_client(client);
ba223c29
AL
2266 }
2267}
2268
51644ab7
PB
2269bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2270{
5c8a00ce 2271 MemoryRegion *mr;
51644ab7
PB
2272 hwaddr l, xlat;
2273
2274 while (len > 0) {
2275 l = len;
5c8a00ce
PB
2276 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2277 if (!memory_access_is_direct(mr, is_write)) {
2278 l = memory_access_size(mr, l, addr);
2279 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2280 return false;
2281 }
2282 }
2283
2284 len -= l;
2285 addr += l;
2286 }
2287 return true;
2288}
2289
6d16c2f8
AL
2290/* Map a physical memory region into a host virtual address.
2291 * May map a subset of the requested range, given by and returned in *plen.
2292 * May return NULL if resources needed to perform the mapping are exhausted.
2293 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2294 * Use cpu_register_map_client() to know when retrying the map operation is
2295 * likely to succeed.
6d16c2f8 2296 */
ac1970fb 2297void *address_space_map(AddressSpace *as,
a8170e5e
AK
2298 hwaddr addr,
2299 hwaddr *plen,
ac1970fb 2300 bool is_write)
6d16c2f8 2301{
a8170e5e 2302 hwaddr len = *plen;
e3127ae0
PB
2303 hwaddr done = 0;
2304 hwaddr l, xlat, base;
2305 MemoryRegion *mr, *this_mr;
2306 ram_addr_t raddr;
6d16c2f8 2307
e3127ae0
PB
2308 if (len == 0) {
2309 return NULL;
2310 }
38bee5dc 2311
e3127ae0
PB
2312 l = len;
2313 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2314 if (!memory_access_is_direct(mr, is_write)) {
2315 if (bounce.buffer) {
2316 return NULL;
6d16c2f8 2317 }
e85d9db5
KW
2318 /* Avoid unbounded allocations */
2319 l = MIN(l, TARGET_PAGE_SIZE);
2320 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2321 bounce.addr = addr;
2322 bounce.len = l;
d3e71559
PB
2323
2324 memory_region_ref(mr);
2325 bounce.mr = mr;
e3127ae0
PB
2326 if (!is_write) {
2327 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2328 }
6d16c2f8 2329
e3127ae0
PB
2330 *plen = l;
2331 return bounce.buffer;
2332 }
2333
2334 base = xlat;
2335 raddr = memory_region_get_ram_addr(mr);
2336
2337 for (;;) {
6d16c2f8
AL
2338 len -= l;
2339 addr += l;
e3127ae0
PB
2340 done += l;
2341 if (len == 0) {
2342 break;
2343 }
2344
2345 l = len;
2346 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2347 if (this_mr != mr || xlat != base + done) {
2348 break;
2349 }
6d16c2f8 2350 }
e3127ae0 2351
d3e71559 2352 memory_region_ref(mr);
e3127ae0
PB
2353 *plen = done;
2354 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2355}
2356
ac1970fb 2357/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2358 * Will also mark the memory as dirty if is_write == 1. access_len gives
2359 * the amount of memory that was actually read or written by the caller.
2360 */
a8170e5e
AK
2361void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2362 int is_write, hwaddr access_len)
6d16c2f8
AL
2363{
2364 if (buffer != bounce.buffer) {
d3e71559
PB
2365 MemoryRegion *mr;
2366 ram_addr_t addr1;
2367
2368 mr = qemu_ram_addr_from_host(buffer, &addr1);
2369 assert(mr != NULL);
6d16c2f8 2370 if (is_write) {
6886867e 2371 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2372 }
868bb33f 2373 if (xen_enabled()) {
e41d7c69 2374 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2375 }
d3e71559 2376 memory_region_unref(mr);
6d16c2f8
AL
2377 return;
2378 }
2379 if (is_write) {
ac1970fb 2380 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2381 }
f8a83245 2382 qemu_vfree(bounce.buffer);
6d16c2f8 2383 bounce.buffer = NULL;
d3e71559 2384 memory_region_unref(bounce.mr);
ba223c29 2385 cpu_notify_map_clients();
6d16c2f8 2386}
d0ecd2aa 2387
a8170e5e
AK
2388void *cpu_physical_memory_map(hwaddr addr,
2389 hwaddr *plen,
ac1970fb
AK
2390 int is_write)
2391{
2392 return address_space_map(&address_space_memory, addr, plen, is_write);
2393}
2394
a8170e5e
AK
2395void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2396 int is_write, hwaddr access_len)
ac1970fb
AK
2397{
2398 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2399}
2400
8df1cd07 2401/* warning: addr must be aligned */
fdfba1a2 2402static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2403 enum device_endian endian)
8df1cd07 2404{
8df1cd07 2405 uint8_t *ptr;
791af8c8 2406 uint64_t val;
5c8a00ce 2407 MemoryRegion *mr;
149f54b5
PB
2408 hwaddr l = 4;
2409 hwaddr addr1;
8df1cd07 2410
fdfba1a2 2411 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2412 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2413 /* I/O case */
5c8a00ce 2414 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2415#if defined(TARGET_WORDS_BIGENDIAN)
2416 if (endian == DEVICE_LITTLE_ENDIAN) {
2417 val = bswap32(val);
2418 }
2419#else
2420 if (endian == DEVICE_BIG_ENDIAN) {
2421 val = bswap32(val);
2422 }
2423#endif
8df1cd07
FB
2424 } else {
2425 /* RAM case */
5c8a00ce 2426 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2427 & TARGET_PAGE_MASK)
149f54b5 2428 + addr1);
1e78bcc1
AG
2429 switch (endian) {
2430 case DEVICE_LITTLE_ENDIAN:
2431 val = ldl_le_p(ptr);
2432 break;
2433 case DEVICE_BIG_ENDIAN:
2434 val = ldl_be_p(ptr);
2435 break;
2436 default:
2437 val = ldl_p(ptr);
2438 break;
2439 }
8df1cd07
FB
2440 }
2441 return val;
2442}
2443
fdfba1a2 2444uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2445{
fdfba1a2 2446 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2447}
2448
fdfba1a2 2449uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2450{
fdfba1a2 2451 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2452}
2453
fdfba1a2 2454uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2455{
fdfba1a2 2456 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2457}
2458
84b7b8e7 2459/* warning: addr must be aligned */
2c17449b 2460static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2461 enum device_endian endian)
84b7b8e7 2462{
84b7b8e7
FB
2463 uint8_t *ptr;
2464 uint64_t val;
5c8a00ce 2465 MemoryRegion *mr;
149f54b5
PB
2466 hwaddr l = 8;
2467 hwaddr addr1;
84b7b8e7 2468
2c17449b 2469 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2470 false);
2471 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2472 /* I/O case */
5c8a00ce 2473 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2474#if defined(TARGET_WORDS_BIGENDIAN)
2475 if (endian == DEVICE_LITTLE_ENDIAN) {
2476 val = bswap64(val);
2477 }
2478#else
2479 if (endian == DEVICE_BIG_ENDIAN) {
2480 val = bswap64(val);
2481 }
84b7b8e7
FB
2482#endif
2483 } else {
2484 /* RAM case */
5c8a00ce 2485 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2486 & TARGET_PAGE_MASK)
149f54b5 2487 + addr1);
1e78bcc1
AG
2488 switch (endian) {
2489 case DEVICE_LITTLE_ENDIAN:
2490 val = ldq_le_p(ptr);
2491 break;
2492 case DEVICE_BIG_ENDIAN:
2493 val = ldq_be_p(ptr);
2494 break;
2495 default:
2496 val = ldq_p(ptr);
2497 break;
2498 }
84b7b8e7
FB
2499 }
2500 return val;
2501}
2502
2c17449b 2503uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2504{
2c17449b 2505 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2506}
2507
2c17449b 2508uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2509{
2c17449b 2510 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2511}
2512
2c17449b 2513uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2514{
2c17449b 2515 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2516}
2517
aab33094 2518/* XXX: optimize */
2c17449b 2519uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2520{
2521 uint8_t val;
2c17449b 2522 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2523 return val;
2524}
2525
733f0b02 2526/* warning: addr must be aligned */
41701aa4 2527static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2528 enum device_endian endian)
aab33094 2529{
733f0b02
MT
2530 uint8_t *ptr;
2531 uint64_t val;
5c8a00ce 2532 MemoryRegion *mr;
149f54b5
PB
2533 hwaddr l = 2;
2534 hwaddr addr1;
733f0b02 2535
41701aa4 2536 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2537 false);
2538 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2539 /* I/O case */
5c8a00ce 2540 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2541#if defined(TARGET_WORDS_BIGENDIAN)
2542 if (endian == DEVICE_LITTLE_ENDIAN) {
2543 val = bswap16(val);
2544 }
2545#else
2546 if (endian == DEVICE_BIG_ENDIAN) {
2547 val = bswap16(val);
2548 }
2549#endif
733f0b02
MT
2550 } else {
2551 /* RAM case */
5c8a00ce 2552 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2553 & TARGET_PAGE_MASK)
149f54b5 2554 + addr1);
1e78bcc1
AG
2555 switch (endian) {
2556 case DEVICE_LITTLE_ENDIAN:
2557 val = lduw_le_p(ptr);
2558 break;
2559 case DEVICE_BIG_ENDIAN:
2560 val = lduw_be_p(ptr);
2561 break;
2562 default:
2563 val = lduw_p(ptr);
2564 break;
2565 }
733f0b02
MT
2566 }
2567 return val;
aab33094
FB
2568}
2569
41701aa4 2570uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2571{
41701aa4 2572 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2573}
2574
41701aa4 2575uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2576{
41701aa4 2577 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2578}
2579
41701aa4 2580uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2581{
41701aa4 2582 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2583}
2584
8df1cd07
FB
2585/* warning: addr must be aligned. The ram page is not masked as dirty
2586 and the code inside is not invalidated. It is useful if the dirty
2587 bits are used to track modified PTEs */
2198a121 2588void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2589{
8df1cd07 2590 uint8_t *ptr;
5c8a00ce 2591 MemoryRegion *mr;
149f54b5
PB
2592 hwaddr l = 4;
2593 hwaddr addr1;
8df1cd07 2594
2198a121 2595 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2596 true);
2597 if (l < 4 || !memory_access_is_direct(mr, true)) {
2598 io_mem_write(mr, addr1, val, 4);
8df1cd07 2599 } else {
5c8a00ce 2600 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2601 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2602 stl_p(ptr, val);
74576198
AL
2603
2604 if (unlikely(in_migration)) {
a2cd8c85 2605 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2606 /* invalidate code */
2607 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2608 /* set dirty bit */
6886867e 2609 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2610 }
2611 }
8df1cd07
FB
2612 }
2613}
2614
2615/* warning: addr must be aligned */
ab1da857
EI
2616static inline void stl_phys_internal(AddressSpace *as,
2617 hwaddr addr, uint32_t val,
1e78bcc1 2618 enum device_endian endian)
8df1cd07 2619{
8df1cd07 2620 uint8_t *ptr;
5c8a00ce 2621 MemoryRegion *mr;
149f54b5
PB
2622 hwaddr l = 4;
2623 hwaddr addr1;
8df1cd07 2624
ab1da857 2625 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2626 true);
2627 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2628#if defined(TARGET_WORDS_BIGENDIAN)
2629 if (endian == DEVICE_LITTLE_ENDIAN) {
2630 val = bswap32(val);
2631 }
2632#else
2633 if (endian == DEVICE_BIG_ENDIAN) {
2634 val = bswap32(val);
2635 }
2636#endif
5c8a00ce 2637 io_mem_write(mr, addr1, val, 4);
8df1cd07 2638 } else {
8df1cd07 2639 /* RAM case */
5c8a00ce 2640 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2641 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2642 switch (endian) {
2643 case DEVICE_LITTLE_ENDIAN:
2644 stl_le_p(ptr, val);
2645 break;
2646 case DEVICE_BIG_ENDIAN:
2647 stl_be_p(ptr, val);
2648 break;
2649 default:
2650 stl_p(ptr, val);
2651 break;
2652 }
51d7a9eb 2653 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2654 }
2655}
2656
ab1da857 2657void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2658{
ab1da857 2659 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2660}
2661
ab1da857 2662void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2663{
ab1da857 2664 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2665}
2666
ab1da857 2667void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2668{
ab1da857 2669 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2670}
2671
aab33094 2672/* XXX: optimize */
db3be60d 2673void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2674{
2675 uint8_t v = val;
db3be60d 2676 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2677}
2678
733f0b02 2679/* warning: addr must be aligned */
5ce5944d
EI
2680static inline void stw_phys_internal(AddressSpace *as,
2681 hwaddr addr, uint32_t val,
1e78bcc1 2682 enum device_endian endian)
aab33094 2683{
733f0b02 2684 uint8_t *ptr;
5c8a00ce 2685 MemoryRegion *mr;
149f54b5
PB
2686 hwaddr l = 2;
2687 hwaddr addr1;
733f0b02 2688
5ce5944d 2689 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2690 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2691#if defined(TARGET_WORDS_BIGENDIAN)
2692 if (endian == DEVICE_LITTLE_ENDIAN) {
2693 val = bswap16(val);
2694 }
2695#else
2696 if (endian == DEVICE_BIG_ENDIAN) {
2697 val = bswap16(val);
2698 }
2699#endif
5c8a00ce 2700 io_mem_write(mr, addr1, val, 2);
733f0b02 2701 } else {
733f0b02 2702 /* RAM case */
5c8a00ce 2703 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2704 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2705 switch (endian) {
2706 case DEVICE_LITTLE_ENDIAN:
2707 stw_le_p(ptr, val);
2708 break;
2709 case DEVICE_BIG_ENDIAN:
2710 stw_be_p(ptr, val);
2711 break;
2712 default:
2713 stw_p(ptr, val);
2714 break;
2715 }
51d7a9eb 2716 invalidate_and_set_dirty(addr1, 2);
733f0b02 2717 }
aab33094
FB
2718}
2719
5ce5944d 2720void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2721{
5ce5944d 2722 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2723}
2724
5ce5944d 2725void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2726{
5ce5944d 2727 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2728}
2729
5ce5944d 2730void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2731{
5ce5944d 2732 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2733}
2734
aab33094 2735/* XXX: optimize */
f606604f 2736void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2737{
2738 val = tswap64(val);
f606604f 2739 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2740}
2741
f606604f 2742void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2743{
2744 val = cpu_to_le64(val);
f606604f 2745 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2746}
2747
f606604f 2748void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2749{
2750 val = cpu_to_be64(val);
f606604f 2751 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2752}
2753
5e2972fd 2754/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2755int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2756 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2757{
2758 int l;
a8170e5e 2759 hwaddr phys_addr;
9b3c35e0 2760 target_ulong page;
13eb76e0
FB
2761
2762 while (len > 0) {
2763 page = addr & TARGET_PAGE_MASK;
f17ec444 2764 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2765 /* if no physical page mapped, return an error */
2766 if (phys_addr == -1)
2767 return -1;
2768 l = (page + TARGET_PAGE_SIZE) - addr;
2769 if (l > len)
2770 l = len;
5e2972fd 2771 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2772 if (is_write) {
2773 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2774 } else {
2775 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2776 }
13eb76e0
FB
2777 len -= l;
2778 buf += l;
2779 addr += l;
2780 }
2781 return 0;
2782}
a68fe89c 2783#endif
13eb76e0 2784
8e4a424b
BS
2785/*
2786 * A helper function for the _utterly broken_ virtio device model to find out if
2787 * it's running on a big endian machine. Don't do this at home kids!
2788 */
98ed8ecf
GK
2789bool target_words_bigendian(void);
2790bool target_words_bigendian(void)
8e4a424b
BS
2791{
2792#if defined(TARGET_WORDS_BIGENDIAN)
2793 return true;
2794#else
2795 return false;
2796#endif
2797}
2798
76f35538 2799#ifndef CONFIG_USER_ONLY
a8170e5e 2800bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2801{
5c8a00ce 2802 MemoryRegion*mr;
149f54b5 2803 hwaddr l = 1;
76f35538 2804
5c8a00ce
PB
2805 mr = address_space_translate(&address_space_memory,
2806 phys_addr, &phys_addr, &l, false);
76f35538 2807
5c8a00ce
PB
2808 return !(memory_region_is_ram(mr) ||
2809 memory_region_is_romd(mr));
76f35538 2810}
bd2fa51f
MH
2811
2812void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2813{
2814 RAMBlock *block;
2815
2816 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2817 func(block->host, block->offset, block->length, opaque);
2818 }
2819}
ec3f8c99 2820#endif