]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
migration: move dirty bitmap sync to ram_addr.h
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
022c62cb 51#include "exec/cputlb.h"
5b6dd868 52#include "translate-all.h"
0cac1b66 53
022c62cb 54#include "exec/memory-internal.h"
220c3ebd 55#include "exec/ram_addr.h"
67d95c15 56
b35ba30f
MT
57#include "qemu/range.h"
58
db7b5426 59//#define DEBUG_SUBPAGE
1196be37 60
e2eef170 61#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
62/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
0d53d9fe 65RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
66
67static MemoryRegion *system_memory;
309cb471 68static MemoryRegion *system_io;
62152b8a 69
f6790af6
AK
70AddressSpace address_space_io;
71AddressSpace address_space_memory;
2673a5da 72
0844e007 73MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 74static MemoryRegion io_mem_unassigned;
0e0df1e2 75
7bd4f430
PB
76/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
dbcb8981
PB
79/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
62be4e3a
MT
82/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
e2eef170 87#endif
9fa3e853 88
bdc44640 89struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
90/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
4917cf44 92DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 93/* 0 = Do not count executed instructions.
bf20dc07 94 1 = Precise instruction counting.
2e70f6ef 95 2 = Adaptive rate instruction counting. */
5708fc66 96int use_icount;
6a00d601 97
e2eef170 98#if !defined(CONFIG_USER_ONLY)
4346ae3e 99
1db8abb1
PB
100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
9736e55b 103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 104 uint32_t skip : 6;
9736e55b 105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 106 uint32_t ptr : 26;
1db8abb1
PB
107};
108
8b795765
MT
109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
03f49957 111/* Size of the L2 (and L3, etc) page tables. */
57271d63 112#define ADDR_SPACE_BITS 64
03f49957 113
026736ce 114#define P_L2_BITS 9
03f49957
PB
115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 120
53cb28cb 121typedef struct PhysPageMap {
79e2b9ae
PB
122 struct rcu_head rcu;
123
53cb28cb
MA
124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
1db8abb1 132struct AddressSpaceDispatch {
79e2b9ae
PB
133 struct rcu_head rcu;
134
1db8abb1
PB
135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
53cb28cb 139 PhysPageMap map;
acc9d80b 140 AddressSpace *as;
1db8abb1
PB
141};
142
90260c6c
JK
143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
acc9d80b 146 AddressSpace *as;
90260c6c
JK
147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
b41aac4f
LPF
151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
5312bd8b 155
e2eef170 156static void io_mem_init(void);
62152b8a 157static void memory_map_init(void);
09daed84 158static void tcg_commit(MemoryListener *listener);
e2eef170 159
1ec9b909 160static MemoryRegion io_mem_watch;
6658ffb8 161#endif
fd6ce8f6 162
6d9a1304 163#if !defined(CONFIG_USER_ONLY)
d6f2ea22 164
53cb28cb 165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 166{
53cb28cb
MA
167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 171 }
f7bf5461
AK
172}
173
db94604b 174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
175{
176 unsigned i;
8b795765 177 uint32_t ret;
db94604b
PB
178 PhysPageEntry e;
179 PhysPageEntry *p;
f7bf5461 180
53cb28cb 181 ret = map->nodes_nb++;
db94604b 182 p = map->nodes[ret];
f7bf5461 183 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 184 assert(ret != map->nodes_nb_alloc);
db94604b
PB
185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 188 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 189 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 190 }
f7bf5461 191 return ret;
d6f2ea22
AK
192}
193
53cb28cb
MA
194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 196 int level)
f7bf5461
AK
197{
198 PhysPageEntry *p;
03f49957 199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 200
9736e55b 201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 202 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 203 }
db94604b 204 p = map->nodes[lp->ptr];
03f49957 205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 206
03f49957 207 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 208 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 209 lp->skip = 0;
c19e8800 210 lp->ptr = leaf;
07f07b31
AK
211 *index += step;
212 *nb -= step;
2999097b 213 } else {
53cb28cb 214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
215 }
216 ++lp;
f7bf5461
AK
217 }
218}
219
ac1970fb 220static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 221 hwaddr index, hwaddr nb,
2999097b 222 uint16_t leaf)
f7bf5461 223{
2999097b 224 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 226
53cb28cb 227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
228}
229
b35ba30f
MT
230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
53cb28cb 288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
289 }
290}
291
97115a8d 292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 293 Node *nodes, MemoryRegionSection *sections)
92e873b9 294{
31ab2b4a 295 PhysPageEntry *p;
97115a8d 296 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 297 int i;
f1f6e3b8 298
9736e55b 299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 301 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 302 }
9affd6fc 303 p = nodes[lp.ptr];
03f49957 304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 305 }
b35ba30f
MT
306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
f3705d53
AK
314}
315
e5548617
BS
316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
2a8e7499 318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 319 && mr != &io_mem_watch;
fd6ce8f6 320}
149f54b5 321
79e2b9ae 322/* Called from RCU critical section */
c7086b4a 323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
324 hwaddr addr,
325 bool resolve_subpage)
9f029603 326{
90260c6c
JK
327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
53cb28cb 330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
334 }
335 return section;
9f029603
JK
336}
337
79e2b9ae 338/* Called from RCU critical section */
90260c6c 339static MemoryRegionSection *
c7086b4a 340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 341 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
342{
343 MemoryRegionSection *section;
a87f3954 344 Int128 diff;
149f54b5 345
c7086b4a 346 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
355 return section;
356}
90260c6c 357
a87f3954
PB
358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
41063e1e 370/* Called from RCU critical section */
5c8a00ce
PB
371MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
372 hwaddr *xlat, hwaddr *plen,
373 bool is_write)
90260c6c 374{
30951157
AK
375 IOMMUTLBEntry iotlb;
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
30951157
AK
378
379 for (;;) {
79e2b9ae
PB
380 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
381 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
382 mr = section->mr;
383
384 if (!mr->iommu_ops) {
385 break;
386 }
387
8d7b8cb9 388 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
389 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
390 | (addr & iotlb.addr_mask));
23820dbf 391 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
392 if (!(iotlb.perm & (1 << is_write))) {
393 mr = &io_mem_unassigned;
394 break;
395 }
396
397 as = iotlb.target_as;
398 }
399
fe680d0d 400 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 401 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 402 *plen = MIN(page, *plen);
a87f3954
PB
403 }
404
30951157
AK
405 *xlat = addr;
406 return mr;
90260c6c
JK
407}
408
79e2b9ae 409/* Called from RCU critical section */
90260c6c 410MemoryRegionSection *
9d82b5a7
PB
411address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
412 hwaddr *xlat, hwaddr *plen)
90260c6c 413{
30951157 414 MemoryRegionSection *section;
9d82b5a7
PB
415 section = address_space_translate_internal(cpu->memory_dispatch,
416 addr, xlat, plen, false);
30951157
AK
417
418 assert(!section->mr->iommu_ops);
419 return section;
90260c6c 420}
5b6dd868 421#endif
fd6ce8f6 422
b170fce3 423#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
424
425static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 426{
259186a7 427 CPUState *cpu = opaque;
a513fe19 428
5b6dd868
BS
429 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
430 version_id is increased. */
259186a7 431 cpu->interrupt_request &= ~0x01;
c01a71c1 432 tlb_flush(cpu, 1);
5b6dd868
BS
433
434 return 0;
a513fe19 435}
7501267e 436
6c3bff0e
PD
437static int cpu_common_pre_load(void *opaque)
438{
439 CPUState *cpu = opaque;
440
adee6424 441 cpu->exception_index = -1;
6c3bff0e
PD
442
443 return 0;
444}
445
446static bool cpu_common_exception_index_needed(void *opaque)
447{
448 CPUState *cpu = opaque;
449
adee6424 450 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
451}
452
453static const VMStateDescription vmstate_cpu_common_exception_index = {
454 .name = "cpu_common/exception_index",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .fields = (VMStateField[]) {
458 VMSTATE_INT32(exception_index, CPUState),
459 VMSTATE_END_OF_LIST()
460 }
461};
462
1a1562f5 463const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
464 .name = "cpu_common",
465 .version_id = 1,
466 .minimum_version_id = 1,
6c3bff0e 467 .pre_load = cpu_common_pre_load,
5b6dd868 468 .post_load = cpu_common_post_load,
35d08458 469 .fields = (VMStateField[]) {
259186a7
AF
470 VMSTATE_UINT32(halted, CPUState),
471 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 472 VMSTATE_END_OF_LIST()
6c3bff0e
PD
473 },
474 .subsections = (VMStateSubsection[]) {
475 {
476 .vmsd = &vmstate_cpu_common_exception_index,
477 .needed = cpu_common_exception_index_needed,
478 } , {
479 /* empty */
480 }
5b6dd868
BS
481 }
482};
1a1562f5 483
5b6dd868 484#endif
ea041c0e 485
38d8f5c8 486CPUState *qemu_get_cpu(int index)
ea041c0e 487{
bdc44640 488 CPUState *cpu;
ea041c0e 489
bdc44640 490 CPU_FOREACH(cpu) {
55e5c285 491 if (cpu->cpu_index == index) {
bdc44640 492 return cpu;
55e5c285 493 }
ea041c0e 494 }
5b6dd868 495
bdc44640 496 return NULL;
ea041c0e
FB
497}
498
09daed84
EI
499#if !defined(CONFIG_USER_ONLY)
500void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
501{
502 /* We only support one address space per cpu at the moment. */
503 assert(cpu->as == as);
504
505 if (cpu->tcg_as_listener) {
506 memory_listener_unregister(cpu->tcg_as_listener);
507 } else {
508 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
509 }
510 cpu->tcg_as_listener->commit = tcg_commit;
511 memory_listener_register(cpu->tcg_as_listener, as);
512}
513#endif
514
5b6dd868 515void cpu_exec_init(CPUArchState *env)
ea041c0e 516{
5b6dd868 517 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 518 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 519 CPUState *some_cpu;
5b6dd868
BS
520 int cpu_index;
521
522#if defined(CONFIG_USER_ONLY)
523 cpu_list_lock();
524#endif
5b6dd868 525 cpu_index = 0;
bdc44640 526 CPU_FOREACH(some_cpu) {
5b6dd868
BS
527 cpu_index++;
528 }
55e5c285 529 cpu->cpu_index = cpu_index;
1b1ed8dc 530 cpu->numa_node = 0;
f0c3c505 531 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 532 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 533#ifndef CONFIG_USER_ONLY
09daed84 534 cpu->as = &address_space_memory;
5b6dd868 535 cpu->thread_id = qemu_get_thread_id();
cba70549 536 cpu_reload_memory_map(cpu);
5b6dd868 537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
0dc3f44a 798/* Called from RCU critical section */
041603fe
PB
799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
43771539 803 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
0dc3f44a 807 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
43771539
PB
817 /* It is safe to write mru_block outside the iothread lock. This
818 * is what happens:
819 *
820 * mru_block = xxx
821 * rcu_read_unlock()
822 * xxx removed from list
823 * rcu_read_lock()
824 * read mru_block
825 * mru_block = NULL;
826 * call_rcu(reclaim_ramblock, xxx);
827 * rcu_read_unlock()
828 *
829 * atomic_rcu_set is not needed here. The block was already published
830 * when it was placed into the list. Here we're just making an extra
831 * copy of the pointer.
832 */
041603fe
PB
833 ram_list.mru_block = block;
834 return block;
835}
836
a2f4d5be 837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 838{
041603fe 839 ram_addr_t start1;
a2f4d5be
JQ
840 RAMBlock *block;
841 ram_addr_t end;
842
843 end = TARGET_PAGE_ALIGN(start + length);
844 start &= TARGET_PAGE_MASK;
d24981d3 845
0dc3f44a 846 rcu_read_lock();
041603fe
PB
847 block = qemu_get_ram_block(start);
848 assert(block == qemu_get_ram_block(end - 1));
1240be24 849 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 850 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 851 rcu_read_unlock();
d24981d3
JQ
852}
853
5579c7f3 854/* Note: start and end must be within the same ram block. */
a2f4d5be 855void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 856 unsigned client)
1ccde1cb 857{
1ccde1cb
FB
858 if (length == 0)
859 return;
c8d6f66a 860 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 861
d24981d3 862 if (tcg_enabled()) {
a2f4d5be 863 tlb_reset_dirty_range_all(start, length);
5579c7f3 864 }
1ccde1cb
FB
865}
866
79e2b9ae 867/* Called from RCU critical section */
bb0e627a 868hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
869 MemoryRegionSection *section,
870 target_ulong vaddr,
871 hwaddr paddr, hwaddr xlat,
872 int prot,
873 target_ulong *address)
e5548617 874{
a8170e5e 875 hwaddr iotlb;
e5548617
BS
876 CPUWatchpoint *wp;
877
cc5bea60 878 if (memory_region_is_ram(section->mr)) {
e5548617
BS
879 /* Normal RAM. */
880 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 881 + xlat;
e5548617 882 if (!section->readonly) {
b41aac4f 883 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 884 } else {
b41aac4f 885 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
886 }
887 } else {
1b3fb98f 888 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 889 iotlb += xlat;
e5548617
BS
890 }
891
892 /* Make accesses to pages with watchpoints go via the
893 watchpoint trap routines. */
ff4700b0 894 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 895 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
896 /* Avoid trapping reads of pages with a write breakpoint. */
897 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 898 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
899 *address |= TLB_MMIO;
900 break;
901 }
902 }
903 }
904
905 return iotlb;
906}
9fa3e853
FB
907#endif /* defined(CONFIG_USER_ONLY) */
908
e2eef170 909#if !defined(CONFIG_USER_ONLY)
8da3ff18 910
c227f099 911static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 912 uint16_t section);
acc9d80b 913static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 914
a2b257d6
IM
915static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
916 qemu_anon_ram_alloc;
91138037
MA
917
918/*
919 * Set a custom physical guest memory alloator.
920 * Accelerators with unusual needs may need this. Hopefully, we can
921 * get rid of it eventually.
922 */
a2b257d6 923void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
924{
925 phys_mem_alloc = alloc;
926}
927
53cb28cb
MA
928static uint16_t phys_section_add(PhysPageMap *map,
929 MemoryRegionSection *section)
5312bd8b 930{
68f3f65b
PB
931 /* The physical section number is ORed with a page-aligned
932 * pointer to produce the iotlb entries. Thus it should
933 * never overflow into the page-aligned value.
934 */
53cb28cb 935 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 936
53cb28cb
MA
937 if (map->sections_nb == map->sections_nb_alloc) {
938 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
939 map->sections = g_renew(MemoryRegionSection, map->sections,
940 map->sections_nb_alloc);
5312bd8b 941 }
53cb28cb 942 map->sections[map->sections_nb] = *section;
dfde4e6e 943 memory_region_ref(section->mr);
53cb28cb 944 return map->sections_nb++;
5312bd8b
AK
945}
946
058bc4b5
PB
947static void phys_section_destroy(MemoryRegion *mr)
948{
dfde4e6e
PB
949 memory_region_unref(mr);
950
058bc4b5
PB
951 if (mr->subpage) {
952 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 953 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
954 g_free(subpage);
955 }
956}
957
6092666e 958static void phys_sections_free(PhysPageMap *map)
5312bd8b 959{
9affd6fc
PB
960 while (map->sections_nb > 0) {
961 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
962 phys_section_destroy(section->mr);
963 }
9affd6fc
PB
964 g_free(map->sections);
965 g_free(map->nodes);
5312bd8b
AK
966}
967
ac1970fb 968static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
969{
970 subpage_t *subpage;
a8170e5e 971 hwaddr base = section->offset_within_address_space
0f0cb164 972 & TARGET_PAGE_MASK;
97115a8d 973 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 974 d->map.nodes, d->map.sections);
0f0cb164
AK
975 MemoryRegionSection subsection = {
976 .offset_within_address_space = base,
052e87b0 977 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 978 };
a8170e5e 979 hwaddr start, end;
0f0cb164 980
f3705d53 981 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 982
f3705d53 983 if (!(existing->mr->subpage)) {
acc9d80b 984 subpage = subpage_init(d->as, base);
3be91e86 985 subsection.address_space = d->as;
0f0cb164 986 subsection.mr = &subpage->iomem;
ac1970fb 987 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 988 phys_section_add(&d->map, &subsection));
0f0cb164 989 } else {
f3705d53 990 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
991 }
992 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 993 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
994 subpage_register(subpage, start, end,
995 phys_section_add(&d->map, section));
0f0cb164
AK
996}
997
998
052e87b0
PB
999static void register_multipage(AddressSpaceDispatch *d,
1000 MemoryRegionSection *section)
33417e70 1001{
a8170e5e 1002 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1003 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1004 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1005 TARGET_PAGE_BITS));
dd81124b 1006
733d5ef5
PB
1007 assert(num_pages);
1008 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1009}
1010
ac1970fb 1011static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1012{
89ae337a 1013 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1014 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1015 MemoryRegionSection now = *section, remain = *section;
052e87b0 1016 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1017
733d5ef5
PB
1018 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1019 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1020 - now.offset_within_address_space;
1021
052e87b0 1022 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1023 register_subpage(d, &now);
733d5ef5 1024 } else {
052e87b0 1025 now.size = int128_zero();
733d5ef5 1026 }
052e87b0
PB
1027 while (int128_ne(remain.size, now.size)) {
1028 remain.size = int128_sub(remain.size, now.size);
1029 remain.offset_within_address_space += int128_get64(now.size);
1030 remain.offset_within_region += int128_get64(now.size);
69b67646 1031 now = remain;
052e87b0 1032 if (int128_lt(remain.size, page_size)) {
733d5ef5 1033 register_subpage(d, &now);
88266249 1034 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1035 now.size = page_size;
ac1970fb 1036 register_subpage(d, &now);
69b67646 1037 } else {
052e87b0 1038 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1039 register_multipage(d, &now);
69b67646 1040 }
0f0cb164
AK
1041 }
1042}
1043
62a2744c
SY
1044void qemu_flush_coalesced_mmio_buffer(void)
1045{
1046 if (kvm_enabled())
1047 kvm_flush_coalesced_mmio_buffer();
1048}
1049
b2a8658e
UD
1050void qemu_mutex_lock_ramlist(void)
1051{
1052 qemu_mutex_lock(&ram_list.mutex);
1053}
1054
1055void qemu_mutex_unlock_ramlist(void)
1056{
1057 qemu_mutex_unlock(&ram_list.mutex);
1058}
1059
e1e84ba0 1060#ifdef __linux__
c902760f
MT
1061
1062#include <sys/vfs.h>
1063
1064#define HUGETLBFS_MAGIC 0x958458f6
1065
fc7a5800 1066static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1067{
1068 struct statfs fs;
1069 int ret;
1070
1071 do {
9742bf26 1072 ret = statfs(path, &fs);
c902760f
MT
1073 } while (ret != 0 && errno == EINTR);
1074
1075 if (ret != 0) {
fc7a5800
HT
1076 error_setg_errno(errp, errno, "failed to get page size of file %s",
1077 path);
9742bf26 1078 return 0;
c902760f
MT
1079 }
1080
1081 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1082 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1083
1084 return fs.f_bsize;
1085}
1086
04b16653
AW
1087static void *file_ram_alloc(RAMBlock *block,
1088 ram_addr_t memory,
7f56e740
PB
1089 const char *path,
1090 Error **errp)
c902760f
MT
1091{
1092 char *filename;
8ca761f6
PF
1093 char *sanitized_name;
1094 char *c;
557529dd 1095 void *area = NULL;
c902760f 1096 int fd;
557529dd 1097 uint64_t hpagesize;
fc7a5800 1098 Error *local_err = NULL;
c902760f 1099
fc7a5800
HT
1100 hpagesize = gethugepagesize(path, &local_err);
1101 if (local_err) {
1102 error_propagate(errp, local_err);
f9a49dfa 1103 goto error;
c902760f 1104 }
a2b257d6 1105 block->mr->align = hpagesize;
c902760f
MT
1106
1107 if (memory < hpagesize) {
557529dd
HT
1108 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1109 "or larger than huge page size 0x%" PRIx64,
1110 memory, hpagesize);
1111 goto error;
c902760f
MT
1112 }
1113
1114 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1115 error_setg(errp,
1116 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1117 goto error;
c902760f
MT
1118 }
1119
8ca761f6 1120 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1121 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1122 for (c = sanitized_name; *c != '\0'; c++) {
1123 if (*c == '/')
1124 *c = '_';
1125 }
1126
1127 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1128 sanitized_name);
1129 g_free(sanitized_name);
c902760f
MT
1130
1131 fd = mkstemp(filename);
1132 if (fd < 0) {
7f56e740
PB
1133 error_setg_errno(errp, errno,
1134 "unable to create backing store for hugepages");
e4ada482 1135 g_free(filename);
f9a49dfa 1136 goto error;
c902760f
MT
1137 }
1138 unlink(filename);
e4ada482 1139 g_free(filename);
c902760f
MT
1140
1141 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1142
1143 /*
1144 * ftruncate is not supported by hugetlbfs in older
1145 * hosts, so don't bother bailing out on errors.
1146 * If anything goes wrong with it under other filesystems,
1147 * mmap will fail.
1148 */
7f56e740 1149 if (ftruncate(fd, memory)) {
9742bf26 1150 perror("ftruncate");
7f56e740 1151 }
c902760f 1152
dbcb8981
PB
1153 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1154 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1155 fd, 0);
c902760f 1156 if (area == MAP_FAILED) {
7f56e740
PB
1157 error_setg_errno(errp, errno,
1158 "unable to map backing store for hugepages");
9742bf26 1159 close(fd);
f9a49dfa 1160 goto error;
c902760f 1161 }
ef36fa14
MT
1162
1163 if (mem_prealloc) {
38183310 1164 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1165 }
1166
04b16653 1167 block->fd = fd;
c902760f 1168 return area;
f9a49dfa
MT
1169
1170error:
1171 if (mem_prealloc) {
81b07353 1172 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1173 exit(1);
1174 }
1175 return NULL;
c902760f
MT
1176}
1177#endif
1178
0dc3f44a 1179/* Called with the ramlist lock held. */
d17b5288 1180static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1181{
1182 RAMBlock *block, *next_block;
3e837b2c 1183 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1184
49cd9ac6
SH
1185 assert(size != 0); /* it would hand out same offset multiple times */
1186
0dc3f44a 1187 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1188 return 0;
0d53d9fe 1189 }
04b16653 1190
0dc3f44a 1191 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1192 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1193
62be4e3a 1194 end = block->offset + block->max_length;
04b16653 1195
0dc3f44a 1196 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1197 if (next_block->offset >= end) {
1198 next = MIN(next, next_block->offset);
1199 }
1200 }
1201 if (next - end >= size && next - end < mingap) {
3e837b2c 1202 offset = end;
04b16653
AW
1203 mingap = next - end;
1204 }
1205 }
3e837b2c
AW
1206
1207 if (offset == RAM_ADDR_MAX) {
1208 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1209 (uint64_t)size);
1210 abort();
1211 }
1212
04b16653
AW
1213 return offset;
1214}
1215
652d7ec2 1216ram_addr_t last_ram_offset(void)
d17b5288
AW
1217{
1218 RAMBlock *block;
1219 ram_addr_t last = 0;
1220
0dc3f44a
MD
1221 rcu_read_lock();
1222 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1223 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1224 }
0dc3f44a 1225 rcu_read_unlock();
d17b5288
AW
1226 return last;
1227}
1228
ddb97f1d
JB
1229static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1230{
1231 int ret;
ddb97f1d
JB
1232
1233 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1234 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1235 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1236 if (ret) {
1237 perror("qemu_madvise");
1238 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1239 "but dump_guest_core=off specified\n");
1240 }
1241 }
1242}
1243
0dc3f44a
MD
1244/* Called within an RCU critical section, or while the ramlist lock
1245 * is held.
1246 */
20cfe881 1247static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1248{
20cfe881 1249 RAMBlock *block;
84b89d78 1250
0dc3f44a 1251 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1252 if (block->offset == addr) {
20cfe881 1253 return block;
c5705a77
AK
1254 }
1255 }
20cfe881
HT
1256
1257 return NULL;
1258}
1259
ae3a7047 1260/* Called with iothread lock held. */
20cfe881
HT
1261void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1262{
ae3a7047 1263 RAMBlock *new_block, *block;
20cfe881 1264
0dc3f44a 1265 rcu_read_lock();
ae3a7047 1266 new_block = find_ram_block(addr);
c5705a77
AK
1267 assert(new_block);
1268 assert(!new_block->idstr[0]);
84b89d78 1269
09e5ab63
AL
1270 if (dev) {
1271 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1272 if (id) {
1273 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1274 g_free(id);
84b89d78
CM
1275 }
1276 }
1277 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1278
0dc3f44a 1279 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1280 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1281 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1282 new_block->idstr);
1283 abort();
1284 }
1285 }
0dc3f44a 1286 rcu_read_unlock();
c5705a77
AK
1287}
1288
ae3a7047 1289/* Called with iothread lock held. */
20cfe881
HT
1290void qemu_ram_unset_idstr(ram_addr_t addr)
1291{
ae3a7047 1292 RAMBlock *block;
20cfe881 1293
ae3a7047
MD
1294 /* FIXME: arch_init.c assumes that this is not called throughout
1295 * migration. Ignore the problem since hot-unplug during migration
1296 * does not work anyway.
1297 */
1298
0dc3f44a 1299 rcu_read_lock();
ae3a7047 1300 block = find_ram_block(addr);
20cfe881
HT
1301 if (block) {
1302 memset(block->idstr, 0, sizeof(block->idstr));
1303 }
0dc3f44a 1304 rcu_read_unlock();
20cfe881
HT
1305}
1306
8490fc78
LC
1307static int memory_try_enable_merging(void *addr, size_t len)
1308{
75cc7f01 1309 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1310 /* disabled by the user */
1311 return 0;
1312 }
1313
1314 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1315}
1316
62be4e3a
MT
1317/* Only legal before guest might have detected the memory size: e.g. on
1318 * incoming migration, or right after reset.
1319 *
1320 * As memory core doesn't know how is memory accessed, it is up to
1321 * resize callback to update device state and/or add assertions to detect
1322 * misuse, if necessary.
1323 */
1324int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1325{
1326 RAMBlock *block = find_ram_block(base);
1327
1328 assert(block);
1329
129ddaf3
MT
1330 newsize = TARGET_PAGE_ALIGN(newsize);
1331
62be4e3a
MT
1332 if (block->used_length == newsize) {
1333 return 0;
1334 }
1335
1336 if (!(block->flags & RAM_RESIZEABLE)) {
1337 error_setg_errno(errp, EINVAL,
1338 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1339 " in != 0x" RAM_ADDR_FMT, block->idstr,
1340 newsize, block->used_length);
1341 return -EINVAL;
1342 }
1343
1344 if (block->max_length < newsize) {
1345 error_setg_errno(errp, EINVAL,
1346 "Length too large: %s: 0x" RAM_ADDR_FMT
1347 " > 0x" RAM_ADDR_FMT, block->idstr,
1348 newsize, block->max_length);
1349 return -EINVAL;
1350 }
1351
1352 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1353 block->used_length = newsize;
58d2707e
PB
1354 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1355 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1356 memory_region_set_size(block->mr, newsize);
1357 if (block->resized) {
1358 block->resized(block->idstr, newsize, block->host);
1359 }
1360 return 0;
1361}
1362
ef701d7b 1363static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1364{
e1c57ab8 1365 RAMBlock *block;
0d53d9fe 1366 RAMBlock *last_block = NULL;
2152f5ca
JQ
1367 ram_addr_t old_ram_size, new_ram_size;
1368
1369 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1370
b2a8658e 1371 qemu_mutex_lock_ramlist();
9b8424d5 1372 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1373
1374 if (!new_block->host) {
1375 if (xen_enabled()) {
9b8424d5
MT
1376 xen_ram_alloc(new_block->offset, new_block->max_length,
1377 new_block->mr);
e1c57ab8 1378 } else {
9b8424d5 1379 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1380 &new_block->mr->align);
39228250 1381 if (!new_block->host) {
ef701d7b
HT
1382 error_setg_errno(errp, errno,
1383 "cannot set up guest memory '%s'",
1384 memory_region_name(new_block->mr));
1385 qemu_mutex_unlock_ramlist();
1386 return -1;
39228250 1387 }
9b8424d5 1388 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1389 }
c902760f 1390 }
94a6b54f 1391
0d53d9fe
MD
1392 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1393 * QLIST (which has an RCU-friendly variant) does not have insertion at
1394 * tail, so save the last element in last_block.
1395 */
0dc3f44a 1396 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1397 last_block = block;
9b8424d5 1398 if (block->max_length < new_block->max_length) {
abb26d63
PB
1399 break;
1400 }
1401 }
1402 if (block) {
0dc3f44a 1403 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1404 } else if (last_block) {
0dc3f44a 1405 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1406 } else { /* list is empty */
0dc3f44a 1407 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1408 }
0d6d3c87 1409 ram_list.mru_block = NULL;
94a6b54f 1410
0dc3f44a
MD
1411 /* Write list before version */
1412 smp_wmb();
f798b07f 1413 ram_list.version++;
b2a8658e 1414 qemu_mutex_unlock_ramlist();
f798b07f 1415
2152f5ca
JQ
1416 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1417
1418 if (new_ram_size > old_ram_size) {
1ab4c8ce 1419 int i;
ae3a7047
MD
1420
1421 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1422 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1423 ram_list.dirty_memory[i] =
1424 bitmap_zero_extend(ram_list.dirty_memory[i],
1425 old_ram_size, new_ram_size);
1426 }
2152f5ca 1427 }
9b8424d5 1428 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1429 new_block->used_length,
1430 DIRTY_CLIENTS_ALL);
94a6b54f 1431
a904c911
PB
1432 if (new_block->host) {
1433 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1434 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1435 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1436 if (kvm_enabled()) {
1437 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1438 }
e1c57ab8 1439 }
6f0437e8 1440
94a6b54f
PB
1441 return new_block->offset;
1442}
e9a1ab19 1443
0b183fc8 1444#ifdef __linux__
e1c57ab8 1445ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1446 bool share, const char *mem_path,
7f56e740 1447 Error **errp)
e1c57ab8
PB
1448{
1449 RAMBlock *new_block;
ef701d7b
HT
1450 ram_addr_t addr;
1451 Error *local_err = NULL;
e1c57ab8
PB
1452
1453 if (xen_enabled()) {
7f56e740
PB
1454 error_setg(errp, "-mem-path not supported with Xen");
1455 return -1;
e1c57ab8
PB
1456 }
1457
1458 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1459 /*
1460 * file_ram_alloc() needs to allocate just like
1461 * phys_mem_alloc, but we haven't bothered to provide
1462 * a hook there.
1463 */
7f56e740
PB
1464 error_setg(errp,
1465 "-mem-path not supported with this accelerator");
1466 return -1;
e1c57ab8
PB
1467 }
1468
1469 size = TARGET_PAGE_ALIGN(size);
1470 new_block = g_malloc0(sizeof(*new_block));
1471 new_block->mr = mr;
9b8424d5
MT
1472 new_block->used_length = size;
1473 new_block->max_length = size;
dbcb8981 1474 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1475 new_block->host = file_ram_alloc(new_block, size,
1476 mem_path, errp);
1477 if (!new_block->host) {
1478 g_free(new_block);
1479 return -1;
1480 }
1481
ef701d7b
HT
1482 addr = ram_block_add(new_block, &local_err);
1483 if (local_err) {
1484 g_free(new_block);
1485 error_propagate(errp, local_err);
1486 return -1;
1487 }
1488 return addr;
e1c57ab8 1489}
0b183fc8 1490#endif
e1c57ab8 1491
62be4e3a
MT
1492static
1493ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1494 void (*resized)(const char*,
1495 uint64_t length,
1496 void *host),
1497 void *host, bool resizeable,
ef701d7b 1498 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1499{
1500 RAMBlock *new_block;
ef701d7b
HT
1501 ram_addr_t addr;
1502 Error *local_err = NULL;
e1c57ab8
PB
1503
1504 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1505 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1506 new_block = g_malloc0(sizeof(*new_block));
1507 new_block->mr = mr;
62be4e3a 1508 new_block->resized = resized;
9b8424d5
MT
1509 new_block->used_length = size;
1510 new_block->max_length = max_size;
62be4e3a 1511 assert(max_size >= size);
e1c57ab8
PB
1512 new_block->fd = -1;
1513 new_block->host = host;
1514 if (host) {
7bd4f430 1515 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1516 }
62be4e3a
MT
1517 if (resizeable) {
1518 new_block->flags |= RAM_RESIZEABLE;
1519 }
ef701d7b
HT
1520 addr = ram_block_add(new_block, &local_err);
1521 if (local_err) {
1522 g_free(new_block);
1523 error_propagate(errp, local_err);
1524 return -1;
1525 }
1526 return addr;
e1c57ab8
PB
1527}
1528
62be4e3a
MT
1529ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1530 MemoryRegion *mr, Error **errp)
1531{
1532 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1533}
1534
ef701d7b 1535ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1536{
62be4e3a
MT
1537 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1538}
1539
1540ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1541 void (*resized)(const char*,
1542 uint64_t length,
1543 void *host),
1544 MemoryRegion *mr, Error **errp)
1545{
1546 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1547}
1548
1f2e98b6
AW
1549void qemu_ram_free_from_ptr(ram_addr_t addr)
1550{
1551 RAMBlock *block;
1552
b2a8658e 1553 qemu_mutex_lock_ramlist();
0dc3f44a 1554 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1555 if (addr == block->offset) {
0dc3f44a 1556 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1557 ram_list.mru_block = NULL;
0dc3f44a
MD
1558 /* Write list before version */
1559 smp_wmb();
f798b07f 1560 ram_list.version++;
43771539 1561 g_free_rcu(block, rcu);
b2a8658e 1562 break;
1f2e98b6
AW
1563 }
1564 }
b2a8658e 1565 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1566}
1567
43771539
PB
1568static void reclaim_ramblock(RAMBlock *block)
1569{
1570 if (block->flags & RAM_PREALLOC) {
1571 ;
1572 } else if (xen_enabled()) {
1573 xen_invalidate_map_cache_entry(block->host);
1574#ifndef _WIN32
1575 } else if (block->fd >= 0) {
1576 munmap(block->host, block->max_length);
1577 close(block->fd);
1578#endif
1579 } else {
1580 qemu_anon_ram_free(block->host, block->max_length);
1581 }
1582 g_free(block);
1583}
1584
c227f099 1585void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1586{
04b16653
AW
1587 RAMBlock *block;
1588
b2a8658e 1589 qemu_mutex_lock_ramlist();
0dc3f44a 1590 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1591 if (addr == block->offset) {
0dc3f44a 1592 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1593 ram_list.mru_block = NULL;
0dc3f44a
MD
1594 /* Write list before version */
1595 smp_wmb();
f798b07f 1596 ram_list.version++;
43771539 1597 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1598 break;
04b16653
AW
1599 }
1600 }
b2a8658e 1601 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1602}
1603
cd19cfa2
HY
1604#ifndef _WIN32
1605void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1606{
1607 RAMBlock *block;
1608 ram_addr_t offset;
1609 int flags;
1610 void *area, *vaddr;
1611
0dc3f44a 1612 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1613 offset = addr - block->offset;
9b8424d5 1614 if (offset < block->max_length) {
1240be24 1615 vaddr = ramblock_ptr(block, offset);
7bd4f430 1616 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1617 ;
dfeaf2ab
MA
1618 } else if (xen_enabled()) {
1619 abort();
cd19cfa2
HY
1620 } else {
1621 flags = MAP_FIXED;
3435f395 1622 if (block->fd >= 0) {
dbcb8981
PB
1623 flags |= (block->flags & RAM_SHARED ?
1624 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1625 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1626 flags, block->fd, offset);
cd19cfa2 1627 } else {
2eb9fbaa
MA
1628 /*
1629 * Remap needs to match alloc. Accelerators that
1630 * set phys_mem_alloc never remap. If they did,
1631 * we'd need a remap hook here.
1632 */
1633 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1634
cd19cfa2
HY
1635 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1636 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1637 flags, -1, 0);
cd19cfa2
HY
1638 }
1639 if (area != vaddr) {
f15fbc4b
AP
1640 fprintf(stderr, "Could not remap addr: "
1641 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1642 length, addr);
1643 exit(1);
1644 }
8490fc78 1645 memory_try_enable_merging(vaddr, length);
ddb97f1d 1646 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1647 }
cd19cfa2
HY
1648 }
1649 }
1650}
1651#endif /* !_WIN32 */
1652
a35ba7be
PB
1653int qemu_get_ram_fd(ram_addr_t addr)
1654{
ae3a7047
MD
1655 RAMBlock *block;
1656 int fd;
a35ba7be 1657
0dc3f44a 1658 rcu_read_lock();
ae3a7047
MD
1659 block = qemu_get_ram_block(addr);
1660 fd = block->fd;
0dc3f44a 1661 rcu_read_unlock();
ae3a7047 1662 return fd;
a35ba7be
PB
1663}
1664
3fd74b84
DM
1665void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1666{
ae3a7047
MD
1667 RAMBlock *block;
1668 void *ptr;
3fd74b84 1669
0dc3f44a 1670 rcu_read_lock();
ae3a7047
MD
1671 block = qemu_get_ram_block(addr);
1672 ptr = ramblock_ptr(block, 0);
0dc3f44a 1673 rcu_read_unlock();
ae3a7047 1674 return ptr;
3fd74b84
DM
1675}
1676
1b5ec234 1677/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1678 * This should not be used for general purpose DMA. Use address_space_map
1679 * or address_space_rw instead. For local memory (e.g. video ram) that the
1680 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1681 *
1682 * By the time this function returns, the returned pointer is not protected
1683 * by RCU anymore. If the caller is not within an RCU critical section and
1684 * does not hold the iothread lock, it must have other means of protecting the
1685 * pointer, such as a reference to the region that includes the incoming
1686 * ram_addr_t.
1b5ec234
PB
1687 */
1688void *qemu_get_ram_ptr(ram_addr_t addr)
1689{
ae3a7047
MD
1690 RAMBlock *block;
1691 void *ptr;
1b5ec234 1692
0dc3f44a 1693 rcu_read_lock();
ae3a7047
MD
1694 block = qemu_get_ram_block(addr);
1695
1696 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1697 /* We need to check if the requested address is in the RAM
1698 * because we don't want to map the entire memory in QEMU.
1699 * In that case just map until the end of the page.
1700 */
1701 if (block->offset == 0) {
ae3a7047 1702 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1703 goto unlock;
0d6d3c87 1704 }
ae3a7047
MD
1705
1706 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1707 }
ae3a7047
MD
1708 ptr = ramblock_ptr(block, addr - block->offset);
1709
0dc3f44a
MD
1710unlock:
1711 rcu_read_unlock();
ae3a7047 1712 return ptr;
dc828ca1
PB
1713}
1714
38bee5dc 1715/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1716 * but takes a size argument.
0dc3f44a
MD
1717 *
1718 * By the time this function returns, the returned pointer is not protected
1719 * by RCU anymore. If the caller is not within an RCU critical section and
1720 * does not hold the iothread lock, it must have other means of protecting the
1721 * pointer, such as a reference to the region that includes the incoming
1722 * ram_addr_t.
ae3a7047 1723 */
cb85f7ab 1724static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1725{
ae3a7047 1726 void *ptr;
8ab934f9
SS
1727 if (*size == 0) {
1728 return NULL;
1729 }
868bb33f 1730 if (xen_enabled()) {
e41d7c69 1731 return xen_map_cache(addr, *size, 1);
868bb33f 1732 } else {
38bee5dc 1733 RAMBlock *block;
0dc3f44a
MD
1734 rcu_read_lock();
1735 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1736 if (addr - block->offset < block->max_length) {
1737 if (addr - block->offset + *size > block->max_length)
1738 *size = block->max_length - addr + block->offset;
ae3a7047 1739 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1740 rcu_read_unlock();
ae3a7047 1741 return ptr;
38bee5dc
SS
1742 }
1743 }
1744
1745 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1746 abort();
38bee5dc
SS
1747 }
1748}
1749
7443b437 1750/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1751 * (typically a TLB entry) back to a ram offset.
1752 *
1753 * By the time this function returns, the returned pointer is not protected
1754 * by RCU anymore. If the caller is not within an RCU critical section and
1755 * does not hold the iothread lock, it must have other means of protecting the
1756 * pointer, such as a reference to the region that includes the incoming
1757 * ram_addr_t.
1758 */
1b5ec234 1759MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1760{
94a6b54f
PB
1761 RAMBlock *block;
1762 uint8_t *host = ptr;
ae3a7047 1763 MemoryRegion *mr;
94a6b54f 1764
868bb33f 1765 if (xen_enabled()) {
0dc3f44a 1766 rcu_read_lock();
e41d7c69 1767 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1768 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1769 rcu_read_unlock();
ae3a7047 1770 return mr;
712c2b41
SS
1771 }
1772
0dc3f44a
MD
1773 rcu_read_lock();
1774 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1775 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1776 goto found;
1777 }
1778
0dc3f44a 1779 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1780 /* This case append when the block is not mapped. */
1781 if (block->host == NULL) {
1782 continue;
1783 }
9b8424d5 1784 if (host - block->host < block->max_length) {
23887b79 1785 goto found;
f471a17e 1786 }
94a6b54f 1787 }
432d268c 1788
0dc3f44a 1789 rcu_read_unlock();
1b5ec234 1790 return NULL;
23887b79
PB
1791
1792found:
1793 *ram_addr = block->offset + (host - block->host);
ae3a7047 1794 mr = block->mr;
0dc3f44a 1795 rcu_read_unlock();
ae3a7047 1796 return mr;
e890261f 1797}
f471a17e 1798
a8170e5e 1799static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1800 uint64_t val, unsigned size)
9fa3e853 1801{
52159192 1802 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1803 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1804 }
0e0df1e2
AK
1805 switch (size) {
1806 case 1:
1807 stb_p(qemu_get_ram_ptr(ram_addr), val);
1808 break;
1809 case 2:
1810 stw_p(qemu_get_ram_ptr(ram_addr), val);
1811 break;
1812 case 4:
1813 stl_p(qemu_get_ram_ptr(ram_addr), val);
1814 break;
1815 default:
1816 abort();
3a7d929e 1817 }
58d2707e
PB
1818 /* Set both VGA and migration bits for simplicity and to remove
1819 * the notdirty callback faster.
1820 */
1821 cpu_physical_memory_set_dirty_range(ram_addr, size,
1822 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1823 /* we remove the notdirty callback only if the code has been
1824 flushed */
a2cd8c85 1825 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1826 CPUArchState *env = current_cpu->env_ptr;
93afeade 1827 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1828 }
9fa3e853
FB
1829}
1830
b018ddf6
PB
1831static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1832 unsigned size, bool is_write)
1833{
1834 return is_write;
1835}
1836
0e0df1e2 1837static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1838 .write = notdirty_mem_write,
b018ddf6 1839 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1840 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1841};
1842
0f459d16 1843/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1844static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1845{
93afeade
AF
1846 CPUState *cpu = current_cpu;
1847 CPUArchState *env = cpu->env_ptr;
06d55cc1 1848 target_ulong pc, cs_base;
0f459d16 1849 target_ulong vaddr;
a1d1bb31 1850 CPUWatchpoint *wp;
06d55cc1 1851 int cpu_flags;
0f459d16 1852
ff4700b0 1853 if (cpu->watchpoint_hit) {
06d55cc1
AL
1854 /* We re-entered the check after replacing the TB. Now raise
1855 * the debug interrupt so that is will trigger after the
1856 * current instruction. */
93afeade 1857 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1858 return;
1859 }
93afeade 1860 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1861 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1862 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1863 && (wp->flags & flags)) {
08225676
PM
1864 if (flags == BP_MEM_READ) {
1865 wp->flags |= BP_WATCHPOINT_HIT_READ;
1866 } else {
1867 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1868 }
1869 wp->hitaddr = vaddr;
66b9b43c 1870 wp->hitattrs = attrs;
ff4700b0
AF
1871 if (!cpu->watchpoint_hit) {
1872 cpu->watchpoint_hit = wp;
239c51a5 1873 tb_check_watchpoint(cpu);
6e140f28 1874 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1875 cpu->exception_index = EXCP_DEBUG;
5638d180 1876 cpu_loop_exit(cpu);
6e140f28
AL
1877 } else {
1878 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1879 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1880 cpu_resume_from_signal(cpu, NULL);
6e140f28 1881 }
06d55cc1 1882 }
6e140f28
AL
1883 } else {
1884 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1885 }
1886 }
1887}
1888
6658ffb8
PB
1889/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1890 so these check for a hit then pass through to the normal out-of-line
1891 phys routines. */
66b9b43c
PM
1892static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1893 unsigned size, MemTxAttrs attrs)
6658ffb8 1894{
66b9b43c
PM
1895 MemTxResult res;
1896 uint64_t data;
1897
1898 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1899 switch (size) {
66b9b43c
PM
1900 case 1:
1901 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1902 break;
1903 case 2:
1904 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1905 break;
1906 case 4:
1907 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1908 break;
1ec9b909
AK
1909 default: abort();
1910 }
66b9b43c
PM
1911 *pdata = data;
1912 return res;
6658ffb8
PB
1913}
1914
66b9b43c
PM
1915static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1916 uint64_t val, unsigned size,
1917 MemTxAttrs attrs)
6658ffb8 1918{
66b9b43c
PM
1919 MemTxResult res;
1920
1921 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1922 switch (size) {
67364150 1923 case 1:
66b9b43c 1924 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1925 break;
1926 case 2:
66b9b43c 1927 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
1928 break;
1929 case 4:
66b9b43c 1930 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 1931 break;
1ec9b909
AK
1932 default: abort();
1933 }
66b9b43c 1934 return res;
6658ffb8
PB
1935}
1936
1ec9b909 1937static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
1938 .read_with_attrs = watch_mem_read,
1939 .write_with_attrs = watch_mem_write,
1ec9b909 1940 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1941};
6658ffb8 1942
f25a49e0
PM
1943static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1944 unsigned len, MemTxAttrs attrs)
db7b5426 1945{
acc9d80b 1946 subpage_t *subpage = opaque;
ff6cff75 1947 uint8_t buf[8];
5c9eb028 1948 MemTxResult res;
791af8c8 1949
db7b5426 1950#if defined(DEBUG_SUBPAGE)
016e9d62 1951 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1952 subpage, len, addr);
db7b5426 1953#endif
5c9eb028
PM
1954 res = address_space_read(subpage->as, addr + subpage->base,
1955 attrs, buf, len);
1956 if (res) {
1957 return res;
f25a49e0 1958 }
acc9d80b
JK
1959 switch (len) {
1960 case 1:
f25a49e0
PM
1961 *data = ldub_p(buf);
1962 return MEMTX_OK;
acc9d80b 1963 case 2:
f25a49e0
PM
1964 *data = lduw_p(buf);
1965 return MEMTX_OK;
acc9d80b 1966 case 4:
f25a49e0
PM
1967 *data = ldl_p(buf);
1968 return MEMTX_OK;
ff6cff75 1969 case 8:
f25a49e0
PM
1970 *data = ldq_p(buf);
1971 return MEMTX_OK;
acc9d80b
JK
1972 default:
1973 abort();
1974 }
db7b5426
BS
1975}
1976
f25a49e0
PM
1977static MemTxResult subpage_write(void *opaque, hwaddr addr,
1978 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 1979{
acc9d80b 1980 subpage_t *subpage = opaque;
ff6cff75 1981 uint8_t buf[8];
acc9d80b 1982
db7b5426 1983#if defined(DEBUG_SUBPAGE)
016e9d62 1984 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1985 " value %"PRIx64"\n",
1986 __func__, subpage, len, addr, value);
db7b5426 1987#endif
acc9d80b
JK
1988 switch (len) {
1989 case 1:
1990 stb_p(buf, value);
1991 break;
1992 case 2:
1993 stw_p(buf, value);
1994 break;
1995 case 4:
1996 stl_p(buf, value);
1997 break;
ff6cff75
PB
1998 case 8:
1999 stq_p(buf, value);
2000 break;
acc9d80b
JK
2001 default:
2002 abort();
2003 }
5c9eb028
PM
2004 return address_space_write(subpage->as, addr + subpage->base,
2005 attrs, buf, len);
db7b5426
BS
2006}
2007
c353e4cc 2008static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2009 unsigned len, bool is_write)
c353e4cc 2010{
acc9d80b 2011 subpage_t *subpage = opaque;
c353e4cc 2012#if defined(DEBUG_SUBPAGE)
016e9d62 2013 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2014 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2015#endif
2016
acc9d80b 2017 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2018 len, is_write);
c353e4cc
PB
2019}
2020
70c68e44 2021static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2022 .read_with_attrs = subpage_read,
2023 .write_with_attrs = subpage_write,
ff6cff75
PB
2024 .impl.min_access_size = 1,
2025 .impl.max_access_size = 8,
2026 .valid.min_access_size = 1,
2027 .valid.max_access_size = 8,
c353e4cc 2028 .valid.accepts = subpage_accepts,
70c68e44 2029 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2030};
2031
c227f099 2032static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2033 uint16_t section)
db7b5426
BS
2034{
2035 int idx, eidx;
2036
2037 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2038 return -1;
2039 idx = SUBPAGE_IDX(start);
2040 eidx = SUBPAGE_IDX(end);
2041#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2042 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2043 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2044#endif
db7b5426 2045 for (; idx <= eidx; idx++) {
5312bd8b 2046 mmio->sub_section[idx] = section;
db7b5426
BS
2047 }
2048
2049 return 0;
2050}
2051
acc9d80b 2052static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2053{
c227f099 2054 subpage_t *mmio;
db7b5426 2055
7267c094 2056 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2057
acc9d80b 2058 mmio->as = as;
1eec614b 2059 mmio->base = base;
2c9b15ca 2060 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2061 NULL, TARGET_PAGE_SIZE);
b3b00c78 2062 mmio->iomem.subpage = true;
db7b5426 2063#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2064 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2065 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2066#endif
b41aac4f 2067 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2068
2069 return mmio;
2070}
2071
a656e22f
PC
2072static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2073 MemoryRegion *mr)
5312bd8b 2074{
a656e22f 2075 assert(as);
5312bd8b 2076 MemoryRegionSection section = {
a656e22f 2077 .address_space = as,
5312bd8b
AK
2078 .mr = mr,
2079 .offset_within_address_space = 0,
2080 .offset_within_region = 0,
052e87b0 2081 .size = int128_2_64(),
5312bd8b
AK
2082 };
2083
53cb28cb 2084 return phys_section_add(map, &section);
5312bd8b
AK
2085}
2086
9d82b5a7 2087MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2088{
79e2b9ae
PB
2089 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2090 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2091
2092 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2093}
2094
e9179ce1
AK
2095static void io_mem_init(void)
2096{
1f6245e5 2097 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2098 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2099 NULL, UINT64_MAX);
2c9b15ca 2100 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2101 NULL, UINT64_MAX);
2c9b15ca 2102 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2103 NULL, UINT64_MAX);
e9179ce1
AK
2104}
2105
ac1970fb 2106static void mem_begin(MemoryListener *listener)
00752703
PB
2107{
2108 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2109 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2110 uint16_t n;
2111
a656e22f 2112 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2113 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2114 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2115 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2116 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2117 assert(n == PHYS_SECTION_ROM);
a656e22f 2118 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2119 assert(n == PHYS_SECTION_WATCH);
00752703 2120
9736e55b 2121 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2122 d->as = as;
2123 as->next_dispatch = d;
2124}
2125
79e2b9ae
PB
2126static void address_space_dispatch_free(AddressSpaceDispatch *d)
2127{
2128 phys_sections_free(&d->map);
2129 g_free(d);
2130}
2131
00752703 2132static void mem_commit(MemoryListener *listener)
ac1970fb 2133{
89ae337a 2134 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2135 AddressSpaceDispatch *cur = as->dispatch;
2136 AddressSpaceDispatch *next = as->next_dispatch;
2137
53cb28cb 2138 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2139
79e2b9ae 2140 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2141 if (cur) {
79e2b9ae 2142 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2143 }
9affd6fc
PB
2144}
2145
1d71148e 2146static void tcg_commit(MemoryListener *listener)
50c1e149 2147{
182735ef 2148 CPUState *cpu;
117712c3
AK
2149
2150 /* since each CPU stores ram addresses in its TLB cache, we must
2151 reset the modified entries */
2152 /* XXX: slow ! */
bdc44640 2153 CPU_FOREACH(cpu) {
33bde2e1
EI
2154 /* FIXME: Disentangle the cpu.h circular files deps so we can
2155 directly get the right CPU from listener. */
2156 if (cpu->tcg_as_listener != listener) {
2157 continue;
2158 }
76e5c76f 2159 cpu_reload_memory_map(cpu);
117712c3 2160 }
50c1e149
AK
2161}
2162
ac1970fb
AK
2163void address_space_init_dispatch(AddressSpace *as)
2164{
00752703 2165 as->dispatch = NULL;
89ae337a 2166 as->dispatch_listener = (MemoryListener) {
ac1970fb 2167 .begin = mem_begin,
00752703 2168 .commit = mem_commit,
ac1970fb
AK
2169 .region_add = mem_add,
2170 .region_nop = mem_add,
2171 .priority = 0,
2172 };
89ae337a 2173 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2174}
2175
6e48e8f9
PB
2176void address_space_unregister(AddressSpace *as)
2177{
2178 memory_listener_unregister(&as->dispatch_listener);
2179}
2180
83f3c251
AK
2181void address_space_destroy_dispatch(AddressSpace *as)
2182{
2183 AddressSpaceDispatch *d = as->dispatch;
2184
79e2b9ae
PB
2185 atomic_rcu_set(&as->dispatch, NULL);
2186 if (d) {
2187 call_rcu(d, address_space_dispatch_free, rcu);
2188 }
83f3c251
AK
2189}
2190
62152b8a
AK
2191static void memory_map_init(void)
2192{
7267c094 2193 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2194
57271d63 2195 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2196 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2197
7267c094 2198 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2199 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2200 65536);
7dca8043 2201 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2202}
2203
2204MemoryRegion *get_system_memory(void)
2205{
2206 return system_memory;
2207}
2208
309cb471
AK
2209MemoryRegion *get_system_io(void)
2210{
2211 return system_io;
2212}
2213
e2eef170
PB
2214#endif /* !defined(CONFIG_USER_ONLY) */
2215
13eb76e0
FB
2216/* physical memory access (slow version, mainly for debug) */
2217#if defined(CONFIG_USER_ONLY)
f17ec444 2218int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2219 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2220{
2221 int l, flags;
2222 target_ulong page;
53a5960a 2223 void * p;
13eb76e0
FB
2224
2225 while (len > 0) {
2226 page = addr & TARGET_PAGE_MASK;
2227 l = (page + TARGET_PAGE_SIZE) - addr;
2228 if (l > len)
2229 l = len;
2230 flags = page_get_flags(page);
2231 if (!(flags & PAGE_VALID))
a68fe89c 2232 return -1;
13eb76e0
FB
2233 if (is_write) {
2234 if (!(flags & PAGE_WRITE))
a68fe89c 2235 return -1;
579a97f7 2236 /* XXX: this code should not depend on lock_user */
72fb7daa 2237 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2238 return -1;
72fb7daa
AJ
2239 memcpy(p, buf, l);
2240 unlock_user(p, addr, l);
13eb76e0
FB
2241 } else {
2242 if (!(flags & PAGE_READ))
a68fe89c 2243 return -1;
579a97f7 2244 /* XXX: this code should not depend on lock_user */
72fb7daa 2245 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2246 return -1;
72fb7daa 2247 memcpy(buf, p, l);
5b257578 2248 unlock_user(p, addr, 0);
13eb76e0
FB
2249 }
2250 len -= l;
2251 buf += l;
2252 addr += l;
2253 }
a68fe89c 2254 return 0;
13eb76e0 2255}
8df1cd07 2256
13eb76e0 2257#else
51d7a9eb 2258
845b6214 2259static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2260 hwaddr length)
51d7a9eb 2261{
e87f7778
PB
2262 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2263 /* No early return if dirty_log_mask is or becomes 0, because
2264 * cpu_physical_memory_set_dirty_range will still call
2265 * xen_modified_memory.
2266 */
2267 if (dirty_log_mask) {
2268 dirty_log_mask =
2269 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2270 }
2271 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2272 tb_invalidate_phys_range(addr, addr + length);
2273 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2274 }
e87f7778 2275 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2276}
2277
23326164 2278static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2279{
e1622f4b 2280 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2281
2282 /* Regions are assumed to support 1-4 byte accesses unless
2283 otherwise specified. */
23326164
RH
2284 if (access_size_max == 0) {
2285 access_size_max = 4;
2286 }
2287
2288 /* Bound the maximum access by the alignment of the address. */
2289 if (!mr->ops->impl.unaligned) {
2290 unsigned align_size_max = addr & -addr;
2291 if (align_size_max != 0 && align_size_max < access_size_max) {
2292 access_size_max = align_size_max;
2293 }
82f2563f 2294 }
23326164
RH
2295
2296 /* Don't attempt accesses larger than the maximum. */
2297 if (l > access_size_max) {
2298 l = access_size_max;
82f2563f 2299 }
098178f2
PB
2300 if (l & (l - 1)) {
2301 l = 1 << (qemu_fls(l) - 1);
2302 }
23326164
RH
2303
2304 return l;
82f2563f
PB
2305}
2306
5c9eb028
PM
2307MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2308 uint8_t *buf, int len, bool is_write)
13eb76e0 2309{
149f54b5 2310 hwaddr l;
13eb76e0 2311 uint8_t *ptr;
791af8c8 2312 uint64_t val;
149f54b5 2313 hwaddr addr1;
5c8a00ce 2314 MemoryRegion *mr;
3b643495 2315 MemTxResult result = MEMTX_OK;
3b46e624 2316
41063e1e 2317 rcu_read_lock();
13eb76e0 2318 while (len > 0) {
149f54b5 2319 l = len;
5c8a00ce 2320 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2321
13eb76e0 2322 if (is_write) {
5c8a00ce
PB
2323 if (!memory_access_is_direct(mr, is_write)) {
2324 l = memory_access_size(mr, l, addr1);
4917cf44 2325 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2326 potential bugs */
23326164
RH
2327 switch (l) {
2328 case 8:
2329 /* 64 bit write access */
2330 val = ldq_p(buf);
3b643495
PM
2331 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2332 attrs);
23326164
RH
2333 break;
2334 case 4:
1c213d19 2335 /* 32 bit write access */
c27004ec 2336 val = ldl_p(buf);
3b643495
PM
2337 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2338 attrs);
23326164
RH
2339 break;
2340 case 2:
1c213d19 2341 /* 16 bit write access */
c27004ec 2342 val = lduw_p(buf);
3b643495
PM
2343 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2344 attrs);
23326164
RH
2345 break;
2346 case 1:
1c213d19 2347 /* 8 bit write access */
c27004ec 2348 val = ldub_p(buf);
3b643495
PM
2349 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2350 attrs);
23326164
RH
2351 break;
2352 default:
2353 abort();
13eb76e0 2354 }
2bbfa05d 2355 } else {
5c8a00ce 2356 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2357 /* RAM case */
5579c7f3 2358 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2359 memcpy(ptr, buf, l);
845b6214 2360 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2361 }
2362 } else {
5c8a00ce 2363 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2364 /* I/O case */
5c8a00ce 2365 l = memory_access_size(mr, l, addr1);
23326164
RH
2366 switch (l) {
2367 case 8:
2368 /* 64 bit read access */
3b643495
PM
2369 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2370 attrs);
23326164
RH
2371 stq_p(buf, val);
2372 break;
2373 case 4:
13eb76e0 2374 /* 32 bit read access */
3b643495
PM
2375 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2376 attrs);
c27004ec 2377 stl_p(buf, val);
23326164
RH
2378 break;
2379 case 2:
13eb76e0 2380 /* 16 bit read access */
3b643495
PM
2381 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2382 attrs);
c27004ec 2383 stw_p(buf, val);
23326164
RH
2384 break;
2385 case 1:
1c213d19 2386 /* 8 bit read access */
3b643495
PM
2387 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2388 attrs);
c27004ec 2389 stb_p(buf, val);
23326164
RH
2390 break;
2391 default:
2392 abort();
13eb76e0
FB
2393 }
2394 } else {
2395 /* RAM case */
5c8a00ce 2396 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2397 memcpy(buf, ptr, l);
13eb76e0
FB
2398 }
2399 }
2400 len -= l;
2401 buf += l;
2402 addr += l;
2403 }
41063e1e 2404 rcu_read_unlock();
fd8aaa76 2405
3b643495 2406 return result;
13eb76e0 2407}
8df1cd07 2408
5c9eb028
PM
2409MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2410 const uint8_t *buf, int len)
ac1970fb 2411{
5c9eb028 2412 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2413}
2414
5c9eb028
PM
2415MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2416 uint8_t *buf, int len)
ac1970fb 2417{
5c9eb028 2418 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2419}
2420
2421
a8170e5e 2422void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2423 int len, int is_write)
2424{
5c9eb028
PM
2425 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2426 buf, len, is_write);
ac1970fb
AK
2427}
2428
582b55a9
AG
2429enum write_rom_type {
2430 WRITE_DATA,
2431 FLUSH_CACHE,
2432};
2433
2a221651 2434static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2435 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2436{
149f54b5 2437 hwaddr l;
d0ecd2aa 2438 uint8_t *ptr;
149f54b5 2439 hwaddr addr1;
5c8a00ce 2440 MemoryRegion *mr;
3b46e624 2441
41063e1e 2442 rcu_read_lock();
d0ecd2aa 2443 while (len > 0) {
149f54b5 2444 l = len;
2a221651 2445 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2446
5c8a00ce
PB
2447 if (!(memory_region_is_ram(mr) ||
2448 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2449 /* do nothing */
2450 } else {
5c8a00ce 2451 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2452 /* ROM/RAM case */
5579c7f3 2453 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2454 switch (type) {
2455 case WRITE_DATA:
2456 memcpy(ptr, buf, l);
845b6214 2457 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2458 break;
2459 case FLUSH_CACHE:
2460 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2461 break;
2462 }
d0ecd2aa
FB
2463 }
2464 len -= l;
2465 buf += l;
2466 addr += l;
2467 }
41063e1e 2468 rcu_read_unlock();
d0ecd2aa
FB
2469}
2470
582b55a9 2471/* used for ROM loading : can write in RAM and ROM */
2a221651 2472void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2473 const uint8_t *buf, int len)
2474{
2a221651 2475 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2476}
2477
2478void cpu_flush_icache_range(hwaddr start, int len)
2479{
2480 /*
2481 * This function should do the same thing as an icache flush that was
2482 * triggered from within the guest. For TCG we are always cache coherent,
2483 * so there is no need to flush anything. For KVM / Xen we need to flush
2484 * the host's instruction cache at least.
2485 */
2486 if (tcg_enabled()) {
2487 return;
2488 }
2489
2a221651
EI
2490 cpu_physical_memory_write_rom_internal(&address_space_memory,
2491 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2492}
2493
6d16c2f8 2494typedef struct {
d3e71559 2495 MemoryRegion *mr;
6d16c2f8 2496 void *buffer;
a8170e5e
AK
2497 hwaddr addr;
2498 hwaddr len;
c2cba0ff 2499 bool in_use;
6d16c2f8
AL
2500} BounceBuffer;
2501
2502static BounceBuffer bounce;
2503
ba223c29 2504typedef struct MapClient {
e95205e1 2505 QEMUBH *bh;
72cf2d4f 2506 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2507} MapClient;
2508
38e047b5 2509QemuMutex map_client_list_lock;
72cf2d4f
BS
2510static QLIST_HEAD(map_client_list, MapClient) map_client_list
2511 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2512
e95205e1
FZ
2513static void cpu_unregister_map_client_do(MapClient *client)
2514{
2515 QLIST_REMOVE(client, link);
2516 g_free(client);
2517}
2518
33b6c2ed
FZ
2519static void cpu_notify_map_clients_locked(void)
2520{
2521 MapClient *client;
2522
2523 while (!QLIST_EMPTY(&map_client_list)) {
2524 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2525 qemu_bh_schedule(client->bh);
2526 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2527 }
2528}
2529
e95205e1 2530void cpu_register_map_client(QEMUBH *bh)
ba223c29 2531{
7267c094 2532 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2533
38e047b5 2534 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2535 client->bh = bh;
72cf2d4f 2536 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2537 if (!atomic_read(&bounce.in_use)) {
2538 cpu_notify_map_clients_locked();
2539 }
38e047b5 2540 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2541}
2542
38e047b5 2543void cpu_exec_init_all(void)
ba223c29 2544{
38e047b5
FZ
2545 qemu_mutex_init(&ram_list.mutex);
2546 memory_map_init();
2547 io_mem_init();
2548 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2549}
2550
e95205e1 2551void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2552{
2553 MapClient *client;
2554
e95205e1
FZ
2555 qemu_mutex_lock(&map_client_list_lock);
2556 QLIST_FOREACH(client, &map_client_list, link) {
2557 if (client->bh == bh) {
2558 cpu_unregister_map_client_do(client);
2559 break;
2560 }
ba223c29 2561 }
e95205e1 2562 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2563}
2564
2565static void cpu_notify_map_clients(void)
2566{
38e047b5 2567 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2568 cpu_notify_map_clients_locked();
38e047b5 2569 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2570}
2571
51644ab7
PB
2572bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2573{
5c8a00ce 2574 MemoryRegion *mr;
51644ab7
PB
2575 hwaddr l, xlat;
2576
41063e1e 2577 rcu_read_lock();
51644ab7
PB
2578 while (len > 0) {
2579 l = len;
5c8a00ce
PB
2580 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2581 if (!memory_access_is_direct(mr, is_write)) {
2582 l = memory_access_size(mr, l, addr);
2583 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2584 return false;
2585 }
2586 }
2587
2588 len -= l;
2589 addr += l;
2590 }
41063e1e 2591 rcu_read_unlock();
51644ab7
PB
2592 return true;
2593}
2594
6d16c2f8
AL
2595/* Map a physical memory region into a host virtual address.
2596 * May map a subset of the requested range, given by and returned in *plen.
2597 * May return NULL if resources needed to perform the mapping are exhausted.
2598 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2599 * Use cpu_register_map_client() to know when retrying the map operation is
2600 * likely to succeed.
6d16c2f8 2601 */
ac1970fb 2602void *address_space_map(AddressSpace *as,
a8170e5e
AK
2603 hwaddr addr,
2604 hwaddr *plen,
ac1970fb 2605 bool is_write)
6d16c2f8 2606{
a8170e5e 2607 hwaddr len = *plen;
e3127ae0
PB
2608 hwaddr done = 0;
2609 hwaddr l, xlat, base;
2610 MemoryRegion *mr, *this_mr;
2611 ram_addr_t raddr;
6d16c2f8 2612
e3127ae0
PB
2613 if (len == 0) {
2614 return NULL;
2615 }
38bee5dc 2616
e3127ae0 2617 l = len;
41063e1e 2618 rcu_read_lock();
e3127ae0 2619 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2620
e3127ae0 2621 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2622 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2623 rcu_read_unlock();
e3127ae0 2624 return NULL;
6d16c2f8 2625 }
e85d9db5
KW
2626 /* Avoid unbounded allocations */
2627 l = MIN(l, TARGET_PAGE_SIZE);
2628 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2629 bounce.addr = addr;
2630 bounce.len = l;
d3e71559
PB
2631
2632 memory_region_ref(mr);
2633 bounce.mr = mr;
e3127ae0 2634 if (!is_write) {
5c9eb028
PM
2635 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2636 bounce.buffer, l);
8ab934f9 2637 }
6d16c2f8 2638
41063e1e 2639 rcu_read_unlock();
e3127ae0
PB
2640 *plen = l;
2641 return bounce.buffer;
2642 }
2643
2644 base = xlat;
2645 raddr = memory_region_get_ram_addr(mr);
2646
2647 for (;;) {
6d16c2f8
AL
2648 len -= l;
2649 addr += l;
e3127ae0
PB
2650 done += l;
2651 if (len == 0) {
2652 break;
2653 }
2654
2655 l = len;
2656 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2657 if (this_mr != mr || xlat != base + done) {
2658 break;
2659 }
6d16c2f8 2660 }
e3127ae0 2661
d3e71559 2662 memory_region_ref(mr);
41063e1e 2663 rcu_read_unlock();
e3127ae0
PB
2664 *plen = done;
2665 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2666}
2667
ac1970fb 2668/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2669 * Will also mark the memory as dirty if is_write == 1. access_len gives
2670 * the amount of memory that was actually read or written by the caller.
2671 */
a8170e5e
AK
2672void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2673 int is_write, hwaddr access_len)
6d16c2f8
AL
2674{
2675 if (buffer != bounce.buffer) {
d3e71559
PB
2676 MemoryRegion *mr;
2677 ram_addr_t addr1;
2678
2679 mr = qemu_ram_addr_from_host(buffer, &addr1);
2680 assert(mr != NULL);
6d16c2f8 2681 if (is_write) {
845b6214 2682 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2683 }
868bb33f 2684 if (xen_enabled()) {
e41d7c69 2685 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2686 }
d3e71559 2687 memory_region_unref(mr);
6d16c2f8
AL
2688 return;
2689 }
2690 if (is_write) {
5c9eb028
PM
2691 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2692 bounce.buffer, access_len);
6d16c2f8 2693 }
f8a83245 2694 qemu_vfree(bounce.buffer);
6d16c2f8 2695 bounce.buffer = NULL;
d3e71559 2696 memory_region_unref(bounce.mr);
c2cba0ff 2697 atomic_mb_set(&bounce.in_use, false);
ba223c29 2698 cpu_notify_map_clients();
6d16c2f8 2699}
d0ecd2aa 2700
a8170e5e
AK
2701void *cpu_physical_memory_map(hwaddr addr,
2702 hwaddr *plen,
ac1970fb
AK
2703 int is_write)
2704{
2705 return address_space_map(&address_space_memory, addr, plen, is_write);
2706}
2707
a8170e5e
AK
2708void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2709 int is_write, hwaddr access_len)
ac1970fb
AK
2710{
2711 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2712}
2713
8df1cd07 2714/* warning: addr must be aligned */
50013115
PM
2715static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2716 MemTxAttrs attrs,
2717 MemTxResult *result,
2718 enum device_endian endian)
8df1cd07 2719{
8df1cd07 2720 uint8_t *ptr;
791af8c8 2721 uint64_t val;
5c8a00ce 2722 MemoryRegion *mr;
149f54b5
PB
2723 hwaddr l = 4;
2724 hwaddr addr1;
50013115 2725 MemTxResult r;
8df1cd07 2726
41063e1e 2727 rcu_read_lock();
fdfba1a2 2728 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2729 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2730 /* I/O case */
50013115 2731 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2732#if defined(TARGET_WORDS_BIGENDIAN)
2733 if (endian == DEVICE_LITTLE_ENDIAN) {
2734 val = bswap32(val);
2735 }
2736#else
2737 if (endian == DEVICE_BIG_ENDIAN) {
2738 val = bswap32(val);
2739 }
2740#endif
8df1cd07
FB
2741 } else {
2742 /* RAM case */
5c8a00ce 2743 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2744 & TARGET_PAGE_MASK)
149f54b5 2745 + addr1);
1e78bcc1
AG
2746 switch (endian) {
2747 case DEVICE_LITTLE_ENDIAN:
2748 val = ldl_le_p(ptr);
2749 break;
2750 case DEVICE_BIG_ENDIAN:
2751 val = ldl_be_p(ptr);
2752 break;
2753 default:
2754 val = ldl_p(ptr);
2755 break;
2756 }
50013115
PM
2757 r = MEMTX_OK;
2758 }
2759 if (result) {
2760 *result = r;
8df1cd07 2761 }
41063e1e 2762 rcu_read_unlock();
8df1cd07
FB
2763 return val;
2764}
2765
50013115
PM
2766uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2767 MemTxAttrs attrs, MemTxResult *result)
2768{
2769 return address_space_ldl_internal(as, addr, attrs, result,
2770 DEVICE_NATIVE_ENDIAN);
2771}
2772
2773uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2774 MemTxAttrs attrs, MemTxResult *result)
2775{
2776 return address_space_ldl_internal(as, addr, attrs, result,
2777 DEVICE_LITTLE_ENDIAN);
2778}
2779
2780uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2781 MemTxAttrs attrs, MemTxResult *result)
2782{
2783 return address_space_ldl_internal(as, addr, attrs, result,
2784 DEVICE_BIG_ENDIAN);
2785}
2786
fdfba1a2 2787uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2788{
50013115 2789 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2790}
2791
fdfba1a2 2792uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2793{
50013115 2794 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2795}
2796
fdfba1a2 2797uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2798{
50013115 2799 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2800}
2801
84b7b8e7 2802/* warning: addr must be aligned */
50013115
PM
2803static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2804 MemTxAttrs attrs,
2805 MemTxResult *result,
2806 enum device_endian endian)
84b7b8e7 2807{
84b7b8e7
FB
2808 uint8_t *ptr;
2809 uint64_t val;
5c8a00ce 2810 MemoryRegion *mr;
149f54b5
PB
2811 hwaddr l = 8;
2812 hwaddr addr1;
50013115 2813 MemTxResult r;
84b7b8e7 2814
41063e1e 2815 rcu_read_lock();
2c17449b 2816 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2817 false);
2818 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2819 /* I/O case */
50013115 2820 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2821#if defined(TARGET_WORDS_BIGENDIAN)
2822 if (endian == DEVICE_LITTLE_ENDIAN) {
2823 val = bswap64(val);
2824 }
2825#else
2826 if (endian == DEVICE_BIG_ENDIAN) {
2827 val = bswap64(val);
2828 }
84b7b8e7
FB
2829#endif
2830 } else {
2831 /* RAM case */
5c8a00ce 2832 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2833 & TARGET_PAGE_MASK)
149f54b5 2834 + addr1);
1e78bcc1
AG
2835 switch (endian) {
2836 case DEVICE_LITTLE_ENDIAN:
2837 val = ldq_le_p(ptr);
2838 break;
2839 case DEVICE_BIG_ENDIAN:
2840 val = ldq_be_p(ptr);
2841 break;
2842 default:
2843 val = ldq_p(ptr);
2844 break;
2845 }
50013115
PM
2846 r = MEMTX_OK;
2847 }
2848 if (result) {
2849 *result = r;
84b7b8e7 2850 }
41063e1e 2851 rcu_read_unlock();
84b7b8e7
FB
2852 return val;
2853}
2854
50013115
PM
2855uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2856 MemTxAttrs attrs, MemTxResult *result)
2857{
2858 return address_space_ldq_internal(as, addr, attrs, result,
2859 DEVICE_NATIVE_ENDIAN);
2860}
2861
2862uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2863 MemTxAttrs attrs, MemTxResult *result)
2864{
2865 return address_space_ldq_internal(as, addr, attrs, result,
2866 DEVICE_LITTLE_ENDIAN);
2867}
2868
2869uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2870 MemTxAttrs attrs, MemTxResult *result)
2871{
2872 return address_space_ldq_internal(as, addr, attrs, result,
2873 DEVICE_BIG_ENDIAN);
2874}
2875
2c17449b 2876uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2877{
50013115 2878 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2879}
2880
2c17449b 2881uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2882{
50013115 2883 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2884}
2885
2c17449b 2886uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2887{
50013115 2888 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2889}
2890
aab33094 2891/* XXX: optimize */
50013115
PM
2892uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2893 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
2894{
2895 uint8_t val;
50013115
PM
2896 MemTxResult r;
2897
2898 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2899 if (result) {
2900 *result = r;
2901 }
aab33094
FB
2902 return val;
2903}
2904
50013115
PM
2905uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2906{
2907 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2908}
2909
733f0b02 2910/* warning: addr must be aligned */
50013115
PM
2911static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2912 hwaddr addr,
2913 MemTxAttrs attrs,
2914 MemTxResult *result,
2915 enum device_endian endian)
aab33094 2916{
733f0b02
MT
2917 uint8_t *ptr;
2918 uint64_t val;
5c8a00ce 2919 MemoryRegion *mr;
149f54b5
PB
2920 hwaddr l = 2;
2921 hwaddr addr1;
50013115 2922 MemTxResult r;
733f0b02 2923
41063e1e 2924 rcu_read_lock();
41701aa4 2925 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2926 false);
2927 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2928 /* I/O case */
50013115 2929 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
2930#if defined(TARGET_WORDS_BIGENDIAN)
2931 if (endian == DEVICE_LITTLE_ENDIAN) {
2932 val = bswap16(val);
2933 }
2934#else
2935 if (endian == DEVICE_BIG_ENDIAN) {
2936 val = bswap16(val);
2937 }
2938#endif
733f0b02
MT
2939 } else {
2940 /* RAM case */
5c8a00ce 2941 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2942 & TARGET_PAGE_MASK)
149f54b5 2943 + addr1);
1e78bcc1
AG
2944 switch (endian) {
2945 case DEVICE_LITTLE_ENDIAN:
2946 val = lduw_le_p(ptr);
2947 break;
2948 case DEVICE_BIG_ENDIAN:
2949 val = lduw_be_p(ptr);
2950 break;
2951 default:
2952 val = lduw_p(ptr);
2953 break;
2954 }
50013115
PM
2955 r = MEMTX_OK;
2956 }
2957 if (result) {
2958 *result = r;
733f0b02 2959 }
41063e1e 2960 rcu_read_unlock();
733f0b02 2961 return val;
aab33094
FB
2962}
2963
50013115
PM
2964uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2965 MemTxAttrs attrs, MemTxResult *result)
2966{
2967 return address_space_lduw_internal(as, addr, attrs, result,
2968 DEVICE_NATIVE_ENDIAN);
2969}
2970
2971uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2972 MemTxAttrs attrs, MemTxResult *result)
2973{
2974 return address_space_lduw_internal(as, addr, attrs, result,
2975 DEVICE_LITTLE_ENDIAN);
2976}
2977
2978uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2979 MemTxAttrs attrs, MemTxResult *result)
2980{
2981 return address_space_lduw_internal(as, addr, attrs, result,
2982 DEVICE_BIG_ENDIAN);
2983}
2984
41701aa4 2985uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2986{
50013115 2987 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2988}
2989
41701aa4 2990uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2991{
50013115 2992 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2993}
2994
41701aa4 2995uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2996{
50013115 2997 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2998}
2999
8df1cd07
FB
3000/* warning: addr must be aligned. The ram page is not masked as dirty
3001 and the code inside is not invalidated. It is useful if the dirty
3002 bits are used to track modified PTEs */
50013115
PM
3003void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3004 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3005{
8df1cd07 3006 uint8_t *ptr;
5c8a00ce 3007 MemoryRegion *mr;
149f54b5
PB
3008 hwaddr l = 4;
3009 hwaddr addr1;
50013115 3010 MemTxResult r;
845b6214 3011 uint8_t dirty_log_mask;
8df1cd07 3012
41063e1e 3013 rcu_read_lock();
2198a121 3014 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3015 true);
3016 if (l < 4 || !memory_access_is_direct(mr, true)) {
50013115 3017 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3018 } else {
5c8a00ce 3019 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3020 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3021 stl_p(ptr, val);
74576198 3022
845b6214
PB
3023 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3024 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3025 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3026 r = MEMTX_OK;
3027 }
3028 if (result) {
3029 *result = r;
8df1cd07 3030 }
41063e1e 3031 rcu_read_unlock();
8df1cd07
FB
3032}
3033
50013115
PM
3034void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3035{
3036 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3037}
3038
8df1cd07 3039/* warning: addr must be aligned */
50013115
PM
3040static inline void address_space_stl_internal(AddressSpace *as,
3041 hwaddr addr, uint32_t val,
3042 MemTxAttrs attrs,
3043 MemTxResult *result,
3044 enum device_endian endian)
8df1cd07 3045{
8df1cd07 3046 uint8_t *ptr;
5c8a00ce 3047 MemoryRegion *mr;
149f54b5
PB
3048 hwaddr l = 4;
3049 hwaddr addr1;
50013115 3050 MemTxResult r;
8df1cd07 3051
41063e1e 3052 rcu_read_lock();
ab1da857 3053 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3054 true);
3055 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3056#if defined(TARGET_WORDS_BIGENDIAN)
3057 if (endian == DEVICE_LITTLE_ENDIAN) {
3058 val = bswap32(val);
3059 }
3060#else
3061 if (endian == DEVICE_BIG_ENDIAN) {
3062 val = bswap32(val);
3063 }
3064#endif
50013115 3065 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3066 } else {
8df1cd07 3067 /* RAM case */
5c8a00ce 3068 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3069 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3070 switch (endian) {
3071 case DEVICE_LITTLE_ENDIAN:
3072 stl_le_p(ptr, val);
3073 break;
3074 case DEVICE_BIG_ENDIAN:
3075 stl_be_p(ptr, val);
3076 break;
3077 default:
3078 stl_p(ptr, val);
3079 break;
3080 }
845b6214 3081 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3082 r = MEMTX_OK;
3083 }
3084 if (result) {
3085 *result = r;
8df1cd07 3086 }
41063e1e 3087 rcu_read_unlock();
8df1cd07
FB
3088}
3089
50013115
PM
3090void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3091 MemTxAttrs attrs, MemTxResult *result)
3092{
3093 address_space_stl_internal(as, addr, val, attrs, result,
3094 DEVICE_NATIVE_ENDIAN);
3095}
3096
3097void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3098 MemTxAttrs attrs, MemTxResult *result)
3099{
3100 address_space_stl_internal(as, addr, val, attrs, result,
3101 DEVICE_LITTLE_ENDIAN);
3102}
3103
3104void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3105 MemTxAttrs attrs, MemTxResult *result)
3106{
3107 address_space_stl_internal(as, addr, val, attrs, result,
3108 DEVICE_BIG_ENDIAN);
3109}
3110
ab1da857 3111void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3112{
50013115 3113 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3114}
3115
ab1da857 3116void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3117{
50013115 3118 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3119}
3120
ab1da857 3121void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3122{
50013115 3123 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3124}
3125
aab33094 3126/* XXX: optimize */
50013115
PM
3127void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3128 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3129{
3130 uint8_t v = val;
50013115
PM
3131 MemTxResult r;
3132
3133 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3134 if (result) {
3135 *result = r;
3136 }
3137}
3138
3139void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3140{
3141 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3142}
3143
733f0b02 3144/* warning: addr must be aligned */
50013115
PM
3145static inline void address_space_stw_internal(AddressSpace *as,
3146 hwaddr addr, uint32_t val,
3147 MemTxAttrs attrs,
3148 MemTxResult *result,
3149 enum device_endian endian)
aab33094 3150{
733f0b02 3151 uint8_t *ptr;
5c8a00ce 3152 MemoryRegion *mr;
149f54b5
PB
3153 hwaddr l = 2;
3154 hwaddr addr1;
50013115 3155 MemTxResult r;
733f0b02 3156
41063e1e 3157 rcu_read_lock();
5ce5944d 3158 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3159 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
3160#if defined(TARGET_WORDS_BIGENDIAN)
3161 if (endian == DEVICE_LITTLE_ENDIAN) {
3162 val = bswap16(val);
3163 }
3164#else
3165 if (endian == DEVICE_BIG_ENDIAN) {
3166 val = bswap16(val);
3167 }
3168#endif
50013115 3169 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3170 } else {
733f0b02 3171 /* RAM case */
5c8a00ce 3172 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3173 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3174 switch (endian) {
3175 case DEVICE_LITTLE_ENDIAN:
3176 stw_le_p(ptr, val);
3177 break;
3178 case DEVICE_BIG_ENDIAN:
3179 stw_be_p(ptr, val);
3180 break;
3181 default:
3182 stw_p(ptr, val);
3183 break;
3184 }
845b6214 3185 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3186 r = MEMTX_OK;
3187 }
3188 if (result) {
3189 *result = r;
733f0b02 3190 }
41063e1e 3191 rcu_read_unlock();
aab33094
FB
3192}
3193
50013115
PM
3194void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3195 MemTxAttrs attrs, MemTxResult *result)
3196{
3197 address_space_stw_internal(as, addr, val, attrs, result,
3198 DEVICE_NATIVE_ENDIAN);
3199}
3200
3201void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3202 MemTxAttrs attrs, MemTxResult *result)
3203{
3204 address_space_stw_internal(as, addr, val, attrs, result,
3205 DEVICE_LITTLE_ENDIAN);
3206}
3207
3208void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3209 MemTxAttrs attrs, MemTxResult *result)
3210{
3211 address_space_stw_internal(as, addr, val, attrs, result,
3212 DEVICE_BIG_ENDIAN);
3213}
3214
5ce5944d 3215void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3216{
50013115 3217 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3218}
3219
5ce5944d 3220void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3221{
50013115 3222 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3223}
3224
5ce5944d 3225void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3226{
50013115 3227 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3228}
3229
aab33094 3230/* XXX: optimize */
50013115
PM
3231void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3232 MemTxAttrs attrs, MemTxResult *result)
aab33094 3233{
50013115 3234 MemTxResult r;
aab33094 3235 val = tswap64(val);
50013115
PM
3236 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3237 if (result) {
3238 *result = r;
3239 }
aab33094
FB
3240}
3241
50013115
PM
3242void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3243 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3244{
50013115 3245 MemTxResult r;
1e78bcc1 3246 val = cpu_to_le64(val);
50013115
PM
3247 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3248 if (result) {
3249 *result = r;
3250 }
3251}
3252void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3253 MemTxAttrs attrs, MemTxResult *result)
3254{
3255 MemTxResult r;
3256 val = cpu_to_be64(val);
3257 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3258 if (result) {
3259 *result = r;
3260 }
3261}
3262
3263void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3264{
3265 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3266}
3267
3268void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3269{
3270 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3271}
3272
f606604f 3273void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3274{
50013115 3275 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3276}
3277
5e2972fd 3278/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3279int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3280 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3281{
3282 int l;
a8170e5e 3283 hwaddr phys_addr;
9b3c35e0 3284 target_ulong page;
13eb76e0
FB
3285
3286 while (len > 0) {
3287 page = addr & TARGET_PAGE_MASK;
f17ec444 3288 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3289 /* if no physical page mapped, return an error */
3290 if (phys_addr == -1)
3291 return -1;
3292 l = (page + TARGET_PAGE_SIZE) - addr;
3293 if (l > len)
3294 l = len;
5e2972fd 3295 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3296 if (is_write) {
3297 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3298 } else {
5c9eb028
PM
3299 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3300 buf, l, 0);
2e38847b 3301 }
13eb76e0
FB
3302 len -= l;
3303 buf += l;
3304 addr += l;
3305 }
3306 return 0;
3307}
a68fe89c 3308#endif
13eb76e0 3309
8e4a424b
BS
3310/*
3311 * A helper function for the _utterly broken_ virtio device model to find out if
3312 * it's running on a big endian machine. Don't do this at home kids!
3313 */
98ed8ecf
GK
3314bool target_words_bigendian(void);
3315bool target_words_bigendian(void)
8e4a424b
BS
3316{
3317#if defined(TARGET_WORDS_BIGENDIAN)
3318 return true;
3319#else
3320 return false;
3321#endif
3322}
3323
76f35538 3324#ifndef CONFIG_USER_ONLY
a8170e5e 3325bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3326{
5c8a00ce 3327 MemoryRegion*mr;
149f54b5 3328 hwaddr l = 1;
41063e1e 3329 bool res;
76f35538 3330
41063e1e 3331 rcu_read_lock();
5c8a00ce
PB
3332 mr = address_space_translate(&address_space_memory,
3333 phys_addr, &phys_addr, &l, false);
76f35538 3334
41063e1e
PB
3335 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3336 rcu_read_unlock();
3337 return res;
76f35538 3338}
bd2fa51f
MH
3339
3340void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3341{
3342 RAMBlock *block;
3343
0dc3f44a
MD
3344 rcu_read_lock();
3345 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 3346 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f 3347 }
0dc3f44a 3348 rcu_read_unlock();
bd2fa51f 3349}
ec3f8c99 3350#endif