]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
target-ppc: Move cpu_exec_init() call to realize function
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
4485bd26 29#if !defined(CONFIG_USER_ONLY)
47c8ca53 30#include "hw/boards.h"
4485bd26 31#endif
cc9e98cb 32#include "hw/qdev.h"
1de7afc9 33#include "qemu/osdep.h"
9c17d615 34#include "sysemu/kvm.h"
2ff3de68 35#include "sysemu/sysemu.h"
0d09e41a 36#include "hw/xen/xen.h"
1de7afc9
PB
37#include "qemu/timer.h"
38#include "qemu/config-file.h"
75a34036 39#include "qemu/error-report.h"
022c62cb 40#include "exec/memory.h"
9c17d615 41#include "sysemu/dma.h"
022c62cb 42#include "exec/address-spaces.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
432d268c 45#else /* !CONFIG_USER_ONLY */
9c17d615 46#include "sysemu/xen-mapcache.h"
6506e4f9 47#include "trace.h"
53a5960a 48#endif
0d6d3c87 49#include "exec/cpu-all.h"
0dc3f44a 50#include "qemu/rcu_queue.h"
4840f10e 51#include "qemu/main-loop.h"
022c62cb 52#include "exec/cputlb.h"
5b6dd868 53#include "translate-all.h"
0cac1b66 54
022c62cb 55#include "exec/memory-internal.h"
220c3ebd 56#include "exec/ram_addr.h"
67d95c15 57
b35ba30f
MT
58#include "qemu/range.h"
59
db7b5426 60//#define DEBUG_SUBPAGE
1196be37 61
e2eef170 62#if !defined(CONFIG_USER_ONLY)
0dc3f44a
MD
63/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
65 */
0d53d9fe 66RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
67
68static MemoryRegion *system_memory;
309cb471 69static MemoryRegion *system_io;
62152b8a 70
f6790af6
AK
71AddressSpace address_space_io;
72AddressSpace address_space_memory;
2673a5da 73
0844e007 74MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 75static MemoryRegion io_mem_unassigned;
0e0df1e2 76
7bd4f430
PB
77/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78#define RAM_PREALLOC (1 << 0)
79
dbcb8981
PB
80/* RAM is mmap-ed with MAP_SHARED */
81#define RAM_SHARED (1 << 1)
82
62be4e3a
MT
83/* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
85 */
86#define RAM_RESIZEABLE (1 << 2)
87
e2eef170 88#endif
9fa3e853 89
bdc44640 90struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
91/* current CPU in the current thread. It is only valid inside
92 cpu_exec() */
4917cf44 93DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 94/* 0 = Do not count executed instructions.
bf20dc07 95 1 = Precise instruction counting.
2e70f6ef 96 2 = Adaptive rate instruction counting. */
5708fc66 97int use_icount;
6a00d601 98
e2eef170 99#if !defined(CONFIG_USER_ONLY)
4346ae3e 100
1db8abb1
PB
101typedef struct PhysPageEntry PhysPageEntry;
102
103struct PhysPageEntry {
9736e55b 104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 105 uint32_t skip : 6;
9736e55b 106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 107 uint32_t ptr : 26;
1db8abb1
PB
108};
109
8b795765
MT
110#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
111
03f49957 112/* Size of the L2 (and L3, etc) page tables. */
57271d63 113#define ADDR_SPACE_BITS 64
03f49957 114
026736ce 115#define P_L2_BITS 9
03f49957
PB
116#define P_L2_SIZE (1 << P_L2_BITS)
117
118#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
119
120typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 121
53cb28cb 122typedef struct PhysPageMap {
79e2b9ae
PB
123 struct rcu_head rcu;
124
53cb28cb
MA
125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
1db8abb1 133struct AddressSpaceDispatch {
79e2b9ae
PB
134 struct rcu_head rcu;
135
1db8abb1
PB
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
138 */
139 PhysPageEntry phys_map;
53cb28cb 140 PhysPageMap map;
acc9d80b 141 AddressSpace *as;
1db8abb1
PB
142};
143
90260c6c
JK
144#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145typedef struct subpage_t {
146 MemoryRegion iomem;
acc9d80b 147 AddressSpace *as;
90260c6c
JK
148 hwaddr base;
149 uint16_t sub_section[TARGET_PAGE_SIZE];
150} subpage_t;
151
b41aac4f
LPF
152#define PHYS_SECTION_UNASSIGNED 0
153#define PHYS_SECTION_NOTDIRTY 1
154#define PHYS_SECTION_ROM 2
155#define PHYS_SECTION_WATCH 3
5312bd8b 156
e2eef170 157static void io_mem_init(void);
62152b8a 158static void memory_map_init(void);
09daed84 159static void tcg_commit(MemoryListener *listener);
e2eef170 160
1ec9b909 161static MemoryRegion io_mem_watch;
6658ffb8 162#endif
fd6ce8f6 163
6d9a1304 164#if !defined(CONFIG_USER_ONLY)
d6f2ea22 165
53cb28cb 166static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 167{
53cb28cb
MA
168 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
171 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 172 }
f7bf5461
AK
173}
174
db94604b 175static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
f7bf5461
AK
176{
177 unsigned i;
8b795765 178 uint32_t ret;
db94604b
PB
179 PhysPageEntry e;
180 PhysPageEntry *p;
f7bf5461 181
53cb28cb 182 ret = map->nodes_nb++;
db94604b 183 p = map->nodes[ret];
f7bf5461 184 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 185 assert(ret != map->nodes_nb_alloc);
db94604b
PB
186
187 e.skip = leaf ? 0 : 1;
188 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
03f49957 189 for (i = 0; i < P_L2_SIZE; ++i) {
db94604b 190 memcpy(&p[i], &e, sizeof(e));
d6f2ea22 191 }
f7bf5461 192 return ret;
d6f2ea22
AK
193}
194
53cb28cb
MA
195static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
196 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 197 int level)
f7bf5461
AK
198{
199 PhysPageEntry *p;
03f49957 200 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 201
9736e55b 202 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
db94604b 203 lp->ptr = phys_map_node_alloc(map, level == 0);
92e873b9 204 }
db94604b 205 p = map->nodes[lp->ptr];
03f49957 206 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 207
03f49957 208 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 209 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 210 lp->skip = 0;
c19e8800 211 lp->ptr = leaf;
07f07b31
AK
212 *index += step;
213 *nb -= step;
2999097b 214 } else {
53cb28cb 215 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
216 }
217 ++lp;
f7bf5461
AK
218 }
219}
220
ac1970fb 221static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 222 hwaddr index, hwaddr nb,
2999097b 223 uint16_t leaf)
f7bf5461 224{
2999097b 225 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 226 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 227
53cb28cb 228 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
229}
230
b35ba30f
MT
231/* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
233 */
234static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
235{
236 unsigned valid_ptr = P_L2_SIZE;
237 int valid = 0;
238 PhysPageEntry *p;
239 int i;
240
241 if (lp->ptr == PHYS_MAP_NODE_NIL) {
242 return;
243 }
244
245 p = nodes[lp->ptr];
246 for (i = 0; i < P_L2_SIZE; i++) {
247 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
248 continue;
249 }
250
251 valid_ptr = i;
252 valid++;
253 if (p[i].skip) {
254 phys_page_compact(&p[i], nodes, compacted);
255 }
256 }
257
258 /* We can only compress if there's only one child. */
259 if (valid != 1) {
260 return;
261 }
262
263 assert(valid_ptr < P_L2_SIZE);
264
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
267 return;
268 }
269
270 lp->ptr = p[valid_ptr].ptr;
271 if (!p[valid_ptr].skip) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
276 * change this rule.
277 */
278 lp->skip = 0;
279 } else {
280 lp->skip += p[valid_ptr].skip;
281 }
282}
283
284static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
285{
286 DECLARE_BITMAP(compacted, nodes_nb);
287
288 if (d->phys_map.skip) {
53cb28cb 289 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
290 }
291}
292
97115a8d 293static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 294 Node *nodes, MemoryRegionSection *sections)
92e873b9 295{
31ab2b4a 296 PhysPageEntry *p;
97115a8d 297 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 298 int i;
f1f6e3b8 299
9736e55b 300 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 301 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 302 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 303 }
9affd6fc 304 p = nodes[lp.ptr];
03f49957 305 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 306 }
b35ba30f
MT
307
308 if (sections[lp.ptr].size.hi ||
309 range_covers_byte(sections[lp.ptr].offset_within_address_space,
310 sections[lp.ptr].size.lo, addr)) {
311 return &sections[lp.ptr];
312 } else {
313 return &sections[PHYS_SECTION_UNASSIGNED];
314 }
f3705d53
AK
315}
316
e5548617
BS
317bool memory_region_is_unassigned(MemoryRegion *mr)
318{
2a8e7499 319 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 320 && mr != &io_mem_watch;
fd6ce8f6 321}
149f54b5 322
79e2b9ae 323/* Called from RCU critical section */
c7086b4a 324static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
325 hwaddr addr,
326 bool resolve_subpage)
9f029603 327{
90260c6c
JK
328 MemoryRegionSection *section;
329 subpage_t *subpage;
330
53cb28cb 331 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
332 if (resolve_subpage && section->mr->subpage) {
333 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 334 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
335 }
336 return section;
9f029603
JK
337}
338
79e2b9ae 339/* Called from RCU critical section */
90260c6c 340static MemoryRegionSection *
c7086b4a 341address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 342 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
343{
344 MemoryRegionSection *section;
965eb2fc 345 MemoryRegion *mr;
a87f3954 346 Int128 diff;
149f54b5 347
c7086b4a 348 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
965eb2fc 355 mr = section->mr;
b242e0e0
PB
356
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
362 * here.
363 *
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
367 */
965eb2fc 368 if (memory_region_is_ram(mr)) {
e4a511f8 369 diff = int128_sub(section->size, int128_make64(addr));
965eb2fc
PB
370 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
371 }
149f54b5
PB
372 return section;
373}
90260c6c 374
a87f3954
PB
375static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
376{
377 if (memory_region_is_ram(mr)) {
378 return !(is_write && mr->readonly);
379 }
380 if (memory_region_is_romd(mr)) {
381 return !is_write;
382 }
383
384 return false;
385}
386
41063e1e 387/* Called from RCU critical section */
5c8a00ce
PB
388MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
389 hwaddr *xlat, hwaddr *plen,
390 bool is_write)
90260c6c 391{
30951157
AK
392 IOMMUTLBEntry iotlb;
393 MemoryRegionSection *section;
394 MemoryRegion *mr;
30951157
AK
395
396 for (;;) {
79e2b9ae
PB
397 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
398 section = address_space_translate_internal(d, addr, &addr, plen, true);
30951157
AK
399 mr = section->mr;
400
401 if (!mr->iommu_ops) {
402 break;
403 }
404
8d7b8cb9 405 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
406 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
407 | (addr & iotlb.addr_mask));
23820dbf 408 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
30951157
AK
409 if (!(iotlb.perm & (1 << is_write))) {
410 mr = &io_mem_unassigned;
411 break;
412 }
413
414 as = iotlb.target_as;
415 }
416
fe680d0d 417 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954 418 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
23820dbf 419 *plen = MIN(page, *plen);
a87f3954
PB
420 }
421
30951157
AK
422 *xlat = addr;
423 return mr;
90260c6c
JK
424}
425
79e2b9ae 426/* Called from RCU critical section */
90260c6c 427MemoryRegionSection *
9d82b5a7
PB
428address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
429 hwaddr *xlat, hwaddr *plen)
90260c6c 430{
30951157 431 MemoryRegionSection *section;
9d82b5a7
PB
432 section = address_space_translate_internal(cpu->memory_dispatch,
433 addr, xlat, plen, false);
30951157
AK
434
435 assert(!section->mr->iommu_ops);
436 return section;
90260c6c 437}
5b6dd868 438#endif
fd6ce8f6 439
b170fce3 440#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
441
442static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 443{
259186a7 444 CPUState *cpu = opaque;
a513fe19 445
5b6dd868
BS
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
259186a7 448 cpu->interrupt_request &= ~0x01;
c01a71c1 449 tlb_flush(cpu, 1);
5b6dd868
BS
450
451 return 0;
a513fe19 452}
7501267e 453
6c3bff0e
PD
454static int cpu_common_pre_load(void *opaque)
455{
456 CPUState *cpu = opaque;
457
adee6424 458 cpu->exception_index = -1;
6c3bff0e
PD
459
460 return 0;
461}
462
463static bool cpu_common_exception_index_needed(void *opaque)
464{
465 CPUState *cpu = opaque;
466
adee6424 467 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
468}
469
470static const VMStateDescription vmstate_cpu_common_exception_index = {
471 .name = "cpu_common/exception_index",
472 .version_id = 1,
473 .minimum_version_id = 1,
5cd8cada 474 .needed = cpu_common_exception_index_needed,
6c3bff0e
PD
475 .fields = (VMStateField[]) {
476 VMSTATE_INT32(exception_index, CPUState),
477 VMSTATE_END_OF_LIST()
478 }
479};
480
1a1562f5 481const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
482 .name = "cpu_common",
483 .version_id = 1,
484 .minimum_version_id = 1,
6c3bff0e 485 .pre_load = cpu_common_pre_load,
5b6dd868 486 .post_load = cpu_common_post_load,
35d08458 487 .fields = (VMStateField[]) {
259186a7
AF
488 VMSTATE_UINT32(halted, CPUState),
489 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 490 VMSTATE_END_OF_LIST()
6c3bff0e 491 },
5cd8cada
JQ
492 .subsections = (const VMStateDescription*[]) {
493 &vmstate_cpu_common_exception_index,
494 NULL
5b6dd868
BS
495 }
496};
1a1562f5 497
5b6dd868 498#endif
ea041c0e 499
38d8f5c8 500CPUState *qemu_get_cpu(int index)
ea041c0e 501{
bdc44640 502 CPUState *cpu;
ea041c0e 503
bdc44640 504 CPU_FOREACH(cpu) {
55e5c285 505 if (cpu->cpu_index == index) {
bdc44640 506 return cpu;
55e5c285 507 }
ea041c0e 508 }
5b6dd868 509
bdc44640 510 return NULL;
ea041c0e
FB
511}
512
09daed84
EI
513#if !defined(CONFIG_USER_ONLY)
514void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
515{
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu->as == as);
518
519 if (cpu->tcg_as_listener) {
520 memory_listener_unregister(cpu->tcg_as_listener);
521 } else {
522 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
523 }
524 cpu->tcg_as_listener->commit = tcg_commit;
525 memory_listener_register(cpu->tcg_as_listener, as);
526}
527#endif
528
b7bca733
BR
529#ifndef CONFIG_USER_ONLY
530static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
531
532static int cpu_get_free_index(Error **errp)
533{
534 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
535
536 if (cpu >= MAX_CPUMASK_BITS) {
537 error_setg(errp, "Trying to use more CPUs than max of %d",
538 MAX_CPUMASK_BITS);
539 return -1;
540 }
541
542 bitmap_set(cpu_index_map, cpu, 1);
543 return cpu;
544}
545
546void cpu_exec_exit(CPUState *cpu)
547{
548 if (cpu->cpu_index == -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
550 return;
551 }
552
553 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
554 cpu->cpu_index = -1;
555}
556#else
557
558static int cpu_get_free_index(Error **errp)
559{
560 CPUState *some_cpu;
561 int cpu_index = 0;
562
563 CPU_FOREACH(some_cpu) {
564 cpu_index++;
565 }
566 return cpu_index;
567}
568
569void cpu_exec_exit(CPUState *cpu)
570{
571}
572#endif
573
5a790cc4 574void cpu_exec_init(CPUArchState *env, Error **errp)
ea041c0e 575{
5b6dd868 576 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 577 CPUClass *cc = CPU_GET_CLASS(cpu);
5b6dd868 578 int cpu_index;
b7bca733 579 Error *local_err = NULL;
5b6dd868 580
291135b5
EH
581#ifndef CONFIG_USER_ONLY
582 cpu->as = &address_space_memory;
583 cpu->thread_id = qemu_get_thread_id();
584 cpu_reload_memory_map(cpu);
585#endif
586
5b6dd868
BS
587#if defined(CONFIG_USER_ONLY)
588 cpu_list_lock();
589#endif
b7bca733
BR
590 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
591 if (local_err) {
592 error_propagate(errp, local_err);
593#if defined(CONFIG_USER_ONLY)
594 cpu_list_unlock();
595#endif
596 return;
5b6dd868 597 }
bdc44640 598 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
599#if defined(CONFIG_USER_ONLY)
600 cpu_list_unlock();
601#endif
e0d47944
AF
602 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
603 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
604 }
5b6dd868 605#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
606 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
607 cpu_save, cpu_load, env);
b170fce3 608 assert(cc->vmsd == NULL);
e0d47944 609 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 610#endif
b170fce3
AF
611 if (cc->vmsd != NULL) {
612 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
613 }
ea041c0e
FB
614}
615
94df27fd 616#if defined(CONFIG_USER_ONLY)
00b941e5 617static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
618{
619 tb_invalidate_phys_page_range(pc, pc + 1, 0);
620}
621#else
00b941e5 622static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 623{
e8262a1b
MF
624 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
625 if (phys != -1) {
09daed84 626 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 627 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 628 }
1e7855a5 629}
c27004ec 630#endif
d720b93d 631
c527ee8f 632#if defined(CONFIG_USER_ONLY)
75a34036 633void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
634
635{
636}
637
3ee887e8
PM
638int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
639 int flags)
640{
641 return -ENOSYS;
642}
643
644void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
645{
646}
647
75a34036 648int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
649 int flags, CPUWatchpoint **watchpoint)
650{
651 return -ENOSYS;
652}
653#else
6658ffb8 654/* Add a watchpoint. */
75a34036 655int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 656 int flags, CPUWatchpoint **watchpoint)
6658ffb8 657{
c0ce998e 658 CPUWatchpoint *wp;
6658ffb8 659
05068c0d 660 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 661 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
662 error_report("tried to set invalid watchpoint at %"
663 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
664 return -EINVAL;
665 }
7267c094 666 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
667
668 wp->vaddr = addr;
05068c0d 669 wp->len = len;
a1d1bb31
AL
670 wp->flags = flags;
671
2dc9f411 672 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
673 if (flags & BP_GDB) {
674 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
675 } else {
676 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
677 }
6658ffb8 678
31b030d4 679 tlb_flush_page(cpu, addr);
a1d1bb31
AL
680
681 if (watchpoint)
682 *watchpoint = wp;
683 return 0;
6658ffb8
PB
684}
685
a1d1bb31 686/* Remove a specific watchpoint. */
75a34036 687int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 688 int flags)
6658ffb8 689{
a1d1bb31 690 CPUWatchpoint *wp;
6658ffb8 691
ff4700b0 692 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 693 if (addr == wp->vaddr && len == wp->len
6e140f28 694 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 695 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
696 return 0;
697 }
698 }
a1d1bb31 699 return -ENOENT;
6658ffb8
PB
700}
701
a1d1bb31 702/* Remove a specific watchpoint by reference. */
75a34036 703void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 704{
ff4700b0 705 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 706
31b030d4 707 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 708
7267c094 709 g_free(watchpoint);
a1d1bb31
AL
710}
711
712/* Remove all matching watchpoints. */
75a34036 713void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 714{
c0ce998e 715 CPUWatchpoint *wp, *next;
a1d1bb31 716
ff4700b0 717 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
718 if (wp->flags & mask) {
719 cpu_watchpoint_remove_by_ref(cpu, wp);
720 }
c0ce998e 721 }
7d03f82f 722}
05068c0d
PM
723
724/* Return true if this watchpoint address matches the specified
725 * access (ie the address range covered by the watchpoint overlaps
726 * partially or completely with the address range covered by the
727 * access).
728 */
729static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
730 vaddr addr,
731 vaddr len)
732{
733 /* We know the lengths are non-zero, but a little caution is
734 * required to avoid errors in the case where the range ends
735 * exactly at the top of the address space and so addr + len
736 * wraps round to zero.
737 */
738 vaddr wpend = wp->vaddr + wp->len - 1;
739 vaddr addrend = addr + len - 1;
740
741 return !(addr > wpend || wp->vaddr > addrend);
742}
743
c527ee8f 744#endif
7d03f82f 745
a1d1bb31 746/* Add a breakpoint. */
b3310ab3 747int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 748 CPUBreakpoint **breakpoint)
4c3a88a2 749{
c0ce998e 750 CPUBreakpoint *bp;
3b46e624 751
7267c094 752 bp = g_malloc(sizeof(*bp));
4c3a88a2 753
a1d1bb31
AL
754 bp->pc = pc;
755 bp->flags = flags;
756
2dc9f411 757 /* keep all GDB-injected breakpoints in front */
00b941e5 758 if (flags & BP_GDB) {
f0c3c505 759 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 760 } else {
f0c3c505 761 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 762 }
3b46e624 763
f0c3c505 764 breakpoint_invalidate(cpu, pc);
a1d1bb31 765
00b941e5 766 if (breakpoint) {
a1d1bb31 767 *breakpoint = bp;
00b941e5 768 }
4c3a88a2 769 return 0;
4c3a88a2
FB
770}
771
a1d1bb31 772/* Remove a specific breakpoint. */
b3310ab3 773int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 774{
a1d1bb31
AL
775 CPUBreakpoint *bp;
776
f0c3c505 777 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 778 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 779 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
780 return 0;
781 }
7d03f82f 782 }
a1d1bb31 783 return -ENOENT;
7d03f82f
EI
784}
785
a1d1bb31 786/* Remove a specific breakpoint by reference. */
b3310ab3 787void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 788{
f0c3c505
AF
789 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
790
791 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 792
7267c094 793 g_free(breakpoint);
a1d1bb31
AL
794}
795
796/* Remove all matching breakpoints. */
b3310ab3 797void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 798{
c0ce998e 799 CPUBreakpoint *bp, *next;
a1d1bb31 800
f0c3c505 801 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
802 if (bp->flags & mask) {
803 cpu_breakpoint_remove_by_ref(cpu, bp);
804 }
c0ce998e 805 }
4c3a88a2
FB
806}
807
c33a346e
FB
808/* enable or disable single step mode. EXCP_DEBUG is returned by the
809 CPU loop after each instruction */
3825b28f 810void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 811{
ed2803da
AF
812 if (cpu->singlestep_enabled != enabled) {
813 cpu->singlestep_enabled = enabled;
814 if (kvm_enabled()) {
38e478ec 815 kvm_update_guest_debug(cpu, 0);
ed2803da 816 } else {
ccbb4d44 817 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 818 /* XXX: only flush what is necessary */
38e478ec 819 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
820 tb_flush(env);
821 }
c33a346e 822 }
c33a346e
FB
823}
824
a47dddd7 825void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
826{
827 va_list ap;
493ae1f0 828 va_list ap2;
7501267e
FB
829
830 va_start(ap, fmt);
493ae1f0 831 va_copy(ap2, ap);
7501267e
FB
832 fprintf(stderr, "qemu: fatal: ");
833 vfprintf(stderr, fmt, ap);
834 fprintf(stderr, "\n");
878096ee 835 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
836 if (qemu_log_enabled()) {
837 qemu_log("qemu: fatal: ");
838 qemu_log_vprintf(fmt, ap2);
839 qemu_log("\n");
a0762859 840 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 841 qemu_log_flush();
93fcfe39 842 qemu_log_close();
924edcae 843 }
493ae1f0 844 va_end(ap2);
f9373291 845 va_end(ap);
fd052bf6
RV
846#if defined(CONFIG_USER_ONLY)
847 {
848 struct sigaction act;
849 sigfillset(&act.sa_mask);
850 act.sa_handler = SIG_DFL;
851 sigaction(SIGABRT, &act, NULL);
852 }
853#endif
7501267e
FB
854 abort();
855}
856
0124311e 857#if !defined(CONFIG_USER_ONLY)
0dc3f44a 858/* Called from RCU critical section */
041603fe
PB
859static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
860{
861 RAMBlock *block;
862
43771539 863 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 864 if (block && addr - block->offset < block->max_length) {
041603fe
PB
865 goto found;
866 }
0dc3f44a 867 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5 868 if (addr - block->offset < block->max_length) {
041603fe
PB
869 goto found;
870 }
871 }
872
873 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
874 abort();
875
876found:
43771539
PB
877 /* It is safe to write mru_block outside the iothread lock. This
878 * is what happens:
879 *
880 * mru_block = xxx
881 * rcu_read_unlock()
882 * xxx removed from list
883 * rcu_read_lock()
884 * read mru_block
885 * mru_block = NULL;
886 * call_rcu(reclaim_ramblock, xxx);
887 * rcu_read_unlock()
888 *
889 * atomic_rcu_set is not needed here. The block was already published
890 * when it was placed into the list. Here we're just making an extra
891 * copy of the pointer.
892 */
041603fe
PB
893 ram_list.mru_block = block;
894 return block;
895}
896
a2f4d5be 897static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 898{
041603fe 899 ram_addr_t start1;
a2f4d5be
JQ
900 RAMBlock *block;
901 ram_addr_t end;
902
903 end = TARGET_PAGE_ALIGN(start + length);
904 start &= TARGET_PAGE_MASK;
d24981d3 905
0dc3f44a 906 rcu_read_lock();
041603fe
PB
907 block = qemu_get_ram_block(start);
908 assert(block == qemu_get_ram_block(end - 1));
1240be24 909 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 910 cpu_tlb_reset_dirty_all(start1, length);
0dc3f44a 911 rcu_read_unlock();
d24981d3
JQ
912}
913
5579c7f3 914/* Note: start and end must be within the same ram block. */
03eebc9e
SH
915bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
916 ram_addr_t length,
917 unsigned client)
1ccde1cb 918{
03eebc9e
SH
919 unsigned long end, page;
920 bool dirty;
921
922 if (length == 0) {
923 return false;
924 }
f23db169 925
03eebc9e
SH
926 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
927 page = start >> TARGET_PAGE_BITS;
928 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
929 page, end - page);
930
931 if (dirty && tcg_enabled()) {
a2f4d5be 932 tlb_reset_dirty_range_all(start, length);
5579c7f3 933 }
03eebc9e
SH
934
935 return dirty;
1ccde1cb
FB
936}
937
79e2b9ae 938/* Called from RCU critical section */
bb0e627a 939hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
940 MemoryRegionSection *section,
941 target_ulong vaddr,
942 hwaddr paddr, hwaddr xlat,
943 int prot,
944 target_ulong *address)
e5548617 945{
a8170e5e 946 hwaddr iotlb;
e5548617
BS
947 CPUWatchpoint *wp;
948
cc5bea60 949 if (memory_region_is_ram(section->mr)) {
e5548617
BS
950 /* Normal RAM. */
951 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 952 + xlat;
e5548617 953 if (!section->readonly) {
b41aac4f 954 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 955 } else {
b41aac4f 956 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
957 }
958 } else {
1b3fb98f 959 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 960 iotlb += xlat;
e5548617
BS
961 }
962
963 /* Make accesses to pages with watchpoints go via the
964 watchpoint trap routines. */
ff4700b0 965 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 966 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
967 /* Avoid trapping reads of pages with a write breakpoint. */
968 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 969 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
970 *address |= TLB_MMIO;
971 break;
972 }
973 }
974 }
975
976 return iotlb;
977}
9fa3e853
FB
978#endif /* defined(CONFIG_USER_ONLY) */
979
e2eef170 980#if !defined(CONFIG_USER_ONLY)
8da3ff18 981
c227f099 982static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 983 uint16_t section);
acc9d80b 984static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 985
a2b257d6
IM
986static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
987 qemu_anon_ram_alloc;
91138037
MA
988
989/*
990 * Set a custom physical guest memory alloator.
991 * Accelerators with unusual needs may need this. Hopefully, we can
992 * get rid of it eventually.
993 */
a2b257d6 994void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
995{
996 phys_mem_alloc = alloc;
997}
998
53cb28cb
MA
999static uint16_t phys_section_add(PhysPageMap *map,
1000 MemoryRegionSection *section)
5312bd8b 1001{
68f3f65b
PB
1002 /* The physical section number is ORed with a page-aligned
1003 * pointer to produce the iotlb entries. Thus it should
1004 * never overflow into the page-aligned value.
1005 */
53cb28cb 1006 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 1007
53cb28cb
MA
1008 if (map->sections_nb == map->sections_nb_alloc) {
1009 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1010 map->sections = g_renew(MemoryRegionSection, map->sections,
1011 map->sections_nb_alloc);
5312bd8b 1012 }
53cb28cb 1013 map->sections[map->sections_nb] = *section;
dfde4e6e 1014 memory_region_ref(section->mr);
53cb28cb 1015 return map->sections_nb++;
5312bd8b
AK
1016}
1017
058bc4b5
PB
1018static void phys_section_destroy(MemoryRegion *mr)
1019{
dfde4e6e
PB
1020 memory_region_unref(mr);
1021
058bc4b5
PB
1022 if (mr->subpage) {
1023 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 1024 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
1025 g_free(subpage);
1026 }
1027}
1028
6092666e 1029static void phys_sections_free(PhysPageMap *map)
5312bd8b 1030{
9affd6fc
PB
1031 while (map->sections_nb > 0) {
1032 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
1033 phys_section_destroy(section->mr);
1034 }
9affd6fc
PB
1035 g_free(map->sections);
1036 g_free(map->nodes);
5312bd8b
AK
1037}
1038
ac1970fb 1039static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
1040{
1041 subpage_t *subpage;
a8170e5e 1042 hwaddr base = section->offset_within_address_space
0f0cb164 1043 & TARGET_PAGE_MASK;
97115a8d 1044 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 1045 d->map.nodes, d->map.sections);
0f0cb164
AK
1046 MemoryRegionSection subsection = {
1047 .offset_within_address_space = base,
052e87b0 1048 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 1049 };
a8170e5e 1050 hwaddr start, end;
0f0cb164 1051
f3705d53 1052 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 1053
f3705d53 1054 if (!(existing->mr->subpage)) {
acc9d80b 1055 subpage = subpage_init(d->as, base);
3be91e86 1056 subsection.address_space = d->as;
0f0cb164 1057 subsection.mr = &subpage->iomem;
ac1970fb 1058 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 1059 phys_section_add(&d->map, &subsection));
0f0cb164 1060 } else {
f3705d53 1061 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
1062 }
1063 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 1064 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
1065 subpage_register(subpage, start, end,
1066 phys_section_add(&d->map, section));
0f0cb164
AK
1067}
1068
1069
052e87b0
PB
1070static void register_multipage(AddressSpaceDispatch *d,
1071 MemoryRegionSection *section)
33417e70 1072{
a8170e5e 1073 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1074 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1075 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1076 TARGET_PAGE_BITS));
dd81124b 1077
733d5ef5
PB
1078 assert(num_pages);
1079 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1080}
1081
ac1970fb 1082static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1083{
89ae337a 1084 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1085 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1086 MemoryRegionSection now = *section, remain = *section;
052e87b0 1087 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1088
733d5ef5
PB
1089 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1090 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1091 - now.offset_within_address_space;
1092
052e87b0 1093 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1094 register_subpage(d, &now);
733d5ef5 1095 } else {
052e87b0 1096 now.size = int128_zero();
733d5ef5 1097 }
052e87b0
PB
1098 while (int128_ne(remain.size, now.size)) {
1099 remain.size = int128_sub(remain.size, now.size);
1100 remain.offset_within_address_space += int128_get64(now.size);
1101 remain.offset_within_region += int128_get64(now.size);
69b67646 1102 now = remain;
052e87b0 1103 if (int128_lt(remain.size, page_size)) {
733d5ef5 1104 register_subpage(d, &now);
88266249 1105 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1106 now.size = page_size;
ac1970fb 1107 register_subpage(d, &now);
69b67646 1108 } else {
052e87b0 1109 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1110 register_multipage(d, &now);
69b67646 1111 }
0f0cb164
AK
1112 }
1113}
1114
62a2744c
SY
1115void qemu_flush_coalesced_mmio_buffer(void)
1116{
1117 if (kvm_enabled())
1118 kvm_flush_coalesced_mmio_buffer();
1119}
1120
b2a8658e
UD
1121void qemu_mutex_lock_ramlist(void)
1122{
1123 qemu_mutex_lock(&ram_list.mutex);
1124}
1125
1126void qemu_mutex_unlock_ramlist(void)
1127{
1128 qemu_mutex_unlock(&ram_list.mutex);
1129}
1130
e1e84ba0 1131#ifdef __linux__
c902760f
MT
1132
1133#include <sys/vfs.h>
1134
1135#define HUGETLBFS_MAGIC 0x958458f6
1136
fc7a5800 1137static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1138{
1139 struct statfs fs;
1140 int ret;
1141
1142 do {
9742bf26 1143 ret = statfs(path, &fs);
c902760f
MT
1144 } while (ret != 0 && errno == EINTR);
1145
1146 if (ret != 0) {
fc7a5800
HT
1147 error_setg_errno(errp, errno, "failed to get page size of file %s",
1148 path);
9742bf26 1149 return 0;
c902760f
MT
1150 }
1151
1152 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1153 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1154
1155 return fs.f_bsize;
1156}
1157
04b16653
AW
1158static void *file_ram_alloc(RAMBlock *block,
1159 ram_addr_t memory,
7f56e740
PB
1160 const char *path,
1161 Error **errp)
c902760f
MT
1162{
1163 char *filename;
8ca761f6
PF
1164 char *sanitized_name;
1165 char *c;
557529dd 1166 void *area = NULL;
c902760f 1167 int fd;
557529dd 1168 uint64_t hpagesize;
fc7a5800 1169 Error *local_err = NULL;
c902760f 1170
fc7a5800
HT
1171 hpagesize = gethugepagesize(path, &local_err);
1172 if (local_err) {
1173 error_propagate(errp, local_err);
f9a49dfa 1174 goto error;
c902760f 1175 }
a2b257d6 1176 block->mr->align = hpagesize;
c902760f
MT
1177
1178 if (memory < hpagesize) {
557529dd
HT
1179 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1180 "or larger than huge page size 0x%" PRIx64,
1181 memory, hpagesize);
1182 goto error;
c902760f
MT
1183 }
1184
1185 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1186 error_setg(errp,
1187 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1188 goto error;
c902760f
MT
1189 }
1190
8ca761f6 1191 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1192 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1193 for (c = sanitized_name; *c != '\0'; c++) {
1194 if (*c == '/')
1195 *c = '_';
1196 }
1197
1198 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1199 sanitized_name);
1200 g_free(sanitized_name);
c902760f
MT
1201
1202 fd = mkstemp(filename);
1203 if (fd < 0) {
7f56e740
PB
1204 error_setg_errno(errp, errno,
1205 "unable to create backing store for hugepages");
e4ada482 1206 g_free(filename);
f9a49dfa 1207 goto error;
c902760f
MT
1208 }
1209 unlink(filename);
e4ada482 1210 g_free(filename);
c902760f
MT
1211
1212 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1213
1214 /*
1215 * ftruncate is not supported by hugetlbfs in older
1216 * hosts, so don't bother bailing out on errors.
1217 * If anything goes wrong with it under other filesystems,
1218 * mmap will fail.
1219 */
7f56e740 1220 if (ftruncate(fd, memory)) {
9742bf26 1221 perror("ftruncate");
7f56e740 1222 }
c902760f 1223
dbcb8981
PB
1224 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1225 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1226 fd, 0);
c902760f 1227 if (area == MAP_FAILED) {
7f56e740
PB
1228 error_setg_errno(errp, errno,
1229 "unable to map backing store for hugepages");
9742bf26 1230 close(fd);
f9a49dfa 1231 goto error;
c902760f 1232 }
ef36fa14
MT
1233
1234 if (mem_prealloc) {
38183310 1235 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1236 }
1237
04b16653 1238 block->fd = fd;
c902760f 1239 return area;
f9a49dfa
MT
1240
1241error:
1242 if (mem_prealloc) {
81b07353 1243 error_report("%s", error_get_pretty(*errp));
f9a49dfa
MT
1244 exit(1);
1245 }
1246 return NULL;
c902760f
MT
1247}
1248#endif
1249
0dc3f44a 1250/* Called with the ramlist lock held. */
d17b5288 1251static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1252{
1253 RAMBlock *block, *next_block;
3e837b2c 1254 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1255
49cd9ac6
SH
1256 assert(size != 0); /* it would hand out same offset multiple times */
1257
0dc3f44a 1258 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
04b16653 1259 return 0;
0d53d9fe 1260 }
04b16653 1261
0dc3f44a 1262 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
f15fbc4b 1263 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1264
62be4e3a 1265 end = block->offset + block->max_length;
04b16653 1266
0dc3f44a 1267 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
04b16653
AW
1268 if (next_block->offset >= end) {
1269 next = MIN(next, next_block->offset);
1270 }
1271 }
1272 if (next - end >= size && next - end < mingap) {
3e837b2c 1273 offset = end;
04b16653
AW
1274 mingap = next - end;
1275 }
1276 }
3e837b2c
AW
1277
1278 if (offset == RAM_ADDR_MAX) {
1279 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1280 (uint64_t)size);
1281 abort();
1282 }
1283
04b16653
AW
1284 return offset;
1285}
1286
652d7ec2 1287ram_addr_t last_ram_offset(void)
d17b5288
AW
1288{
1289 RAMBlock *block;
1290 ram_addr_t last = 0;
1291
0dc3f44a
MD
1292 rcu_read_lock();
1293 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
62be4e3a 1294 last = MAX(last, block->offset + block->max_length);
0d53d9fe 1295 }
0dc3f44a 1296 rcu_read_unlock();
d17b5288
AW
1297 return last;
1298}
1299
ddb97f1d
JB
1300static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1301{
1302 int ret;
ddb97f1d
JB
1303
1304 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
47c8ca53 1305 if (!machine_dump_guest_core(current_machine)) {
ddb97f1d
JB
1306 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1307 if (ret) {
1308 perror("qemu_madvise");
1309 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1310 "but dump_guest_core=off specified\n");
1311 }
1312 }
1313}
1314
0dc3f44a
MD
1315/* Called within an RCU critical section, or while the ramlist lock
1316 * is held.
1317 */
20cfe881 1318static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1319{
20cfe881 1320 RAMBlock *block;
84b89d78 1321
0dc3f44a 1322 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1323 if (block->offset == addr) {
20cfe881 1324 return block;
c5705a77
AK
1325 }
1326 }
20cfe881
HT
1327
1328 return NULL;
1329}
1330
ae3a7047 1331/* Called with iothread lock held. */
20cfe881
HT
1332void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1333{
ae3a7047 1334 RAMBlock *new_block, *block;
20cfe881 1335
0dc3f44a 1336 rcu_read_lock();
ae3a7047 1337 new_block = find_ram_block(addr);
c5705a77
AK
1338 assert(new_block);
1339 assert(!new_block->idstr[0]);
84b89d78 1340
09e5ab63
AL
1341 if (dev) {
1342 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1343 if (id) {
1344 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1345 g_free(id);
84b89d78
CM
1346 }
1347 }
1348 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1349
0dc3f44a 1350 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
c5705a77 1351 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1352 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1353 new_block->idstr);
1354 abort();
1355 }
1356 }
0dc3f44a 1357 rcu_read_unlock();
c5705a77
AK
1358}
1359
ae3a7047 1360/* Called with iothread lock held. */
20cfe881
HT
1361void qemu_ram_unset_idstr(ram_addr_t addr)
1362{
ae3a7047 1363 RAMBlock *block;
20cfe881 1364
ae3a7047
MD
1365 /* FIXME: arch_init.c assumes that this is not called throughout
1366 * migration. Ignore the problem since hot-unplug during migration
1367 * does not work anyway.
1368 */
1369
0dc3f44a 1370 rcu_read_lock();
ae3a7047 1371 block = find_ram_block(addr);
20cfe881
HT
1372 if (block) {
1373 memset(block->idstr, 0, sizeof(block->idstr));
1374 }
0dc3f44a 1375 rcu_read_unlock();
20cfe881
HT
1376}
1377
8490fc78
LC
1378static int memory_try_enable_merging(void *addr, size_t len)
1379{
75cc7f01 1380 if (!machine_mem_merge(current_machine)) {
8490fc78
LC
1381 /* disabled by the user */
1382 return 0;
1383 }
1384
1385 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1386}
1387
62be4e3a
MT
1388/* Only legal before guest might have detected the memory size: e.g. on
1389 * incoming migration, or right after reset.
1390 *
1391 * As memory core doesn't know how is memory accessed, it is up to
1392 * resize callback to update device state and/or add assertions to detect
1393 * misuse, if necessary.
1394 */
1395int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1396{
1397 RAMBlock *block = find_ram_block(base);
1398
1399 assert(block);
1400
129ddaf3
MT
1401 newsize = TARGET_PAGE_ALIGN(newsize);
1402
62be4e3a
MT
1403 if (block->used_length == newsize) {
1404 return 0;
1405 }
1406
1407 if (!(block->flags & RAM_RESIZEABLE)) {
1408 error_setg_errno(errp, EINVAL,
1409 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1410 " in != 0x" RAM_ADDR_FMT, block->idstr,
1411 newsize, block->used_length);
1412 return -EINVAL;
1413 }
1414
1415 if (block->max_length < newsize) {
1416 error_setg_errno(errp, EINVAL,
1417 "Length too large: %s: 0x" RAM_ADDR_FMT
1418 " > 0x" RAM_ADDR_FMT, block->idstr,
1419 newsize, block->max_length);
1420 return -EINVAL;
1421 }
1422
1423 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1424 block->used_length = newsize;
58d2707e
PB
1425 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1426 DIRTY_CLIENTS_ALL);
62be4e3a
MT
1427 memory_region_set_size(block->mr, newsize);
1428 if (block->resized) {
1429 block->resized(block->idstr, newsize, block->host);
1430 }
1431 return 0;
1432}
1433
ef701d7b 1434static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1435{
e1c57ab8 1436 RAMBlock *block;
0d53d9fe 1437 RAMBlock *last_block = NULL;
2152f5ca
JQ
1438 ram_addr_t old_ram_size, new_ram_size;
1439
1440 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1441
b2a8658e 1442 qemu_mutex_lock_ramlist();
9b8424d5 1443 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1444
1445 if (!new_block->host) {
1446 if (xen_enabled()) {
9b8424d5
MT
1447 xen_ram_alloc(new_block->offset, new_block->max_length,
1448 new_block->mr);
e1c57ab8 1449 } else {
9b8424d5 1450 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1451 &new_block->mr->align);
39228250 1452 if (!new_block->host) {
ef701d7b
HT
1453 error_setg_errno(errp, errno,
1454 "cannot set up guest memory '%s'",
1455 memory_region_name(new_block->mr));
1456 qemu_mutex_unlock_ramlist();
1457 return -1;
39228250 1458 }
9b8424d5 1459 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1460 }
c902760f 1461 }
94a6b54f 1462
dd631697
LZ
1463 new_ram_size = MAX(old_ram_size,
1464 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1465 if (new_ram_size > old_ram_size) {
1466 migration_bitmap_extend(old_ram_size, new_ram_size);
1467 }
0d53d9fe
MD
1468 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1469 * QLIST (which has an RCU-friendly variant) does not have insertion at
1470 * tail, so save the last element in last_block.
1471 */
0dc3f44a 1472 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
0d53d9fe 1473 last_block = block;
9b8424d5 1474 if (block->max_length < new_block->max_length) {
abb26d63
PB
1475 break;
1476 }
1477 }
1478 if (block) {
0dc3f44a 1479 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
0d53d9fe 1480 } else if (last_block) {
0dc3f44a 1481 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
0d53d9fe 1482 } else { /* list is empty */
0dc3f44a 1483 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
abb26d63 1484 }
0d6d3c87 1485 ram_list.mru_block = NULL;
94a6b54f 1486
0dc3f44a
MD
1487 /* Write list before version */
1488 smp_wmb();
f798b07f 1489 ram_list.version++;
b2a8658e 1490 qemu_mutex_unlock_ramlist();
f798b07f 1491
2152f5ca
JQ
1492 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1493
1494 if (new_ram_size > old_ram_size) {
1ab4c8ce 1495 int i;
ae3a7047
MD
1496
1497 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1ab4c8ce
JQ
1498 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1499 ram_list.dirty_memory[i] =
1500 bitmap_zero_extend(ram_list.dirty_memory[i],
1501 old_ram_size, new_ram_size);
1502 }
2152f5ca 1503 }
9b8424d5 1504 cpu_physical_memory_set_dirty_range(new_block->offset,
58d2707e
PB
1505 new_block->used_length,
1506 DIRTY_CLIENTS_ALL);
94a6b54f 1507
a904c911
PB
1508 if (new_block->host) {
1509 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1510 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1511 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1512 if (kvm_enabled()) {
1513 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1514 }
e1c57ab8 1515 }
6f0437e8 1516
94a6b54f
PB
1517 return new_block->offset;
1518}
e9a1ab19 1519
0b183fc8 1520#ifdef __linux__
e1c57ab8 1521ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1522 bool share, const char *mem_path,
7f56e740 1523 Error **errp)
e1c57ab8
PB
1524{
1525 RAMBlock *new_block;
ef701d7b
HT
1526 ram_addr_t addr;
1527 Error *local_err = NULL;
e1c57ab8
PB
1528
1529 if (xen_enabled()) {
7f56e740
PB
1530 error_setg(errp, "-mem-path not supported with Xen");
1531 return -1;
e1c57ab8
PB
1532 }
1533
1534 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1535 /*
1536 * file_ram_alloc() needs to allocate just like
1537 * phys_mem_alloc, but we haven't bothered to provide
1538 * a hook there.
1539 */
7f56e740
PB
1540 error_setg(errp,
1541 "-mem-path not supported with this accelerator");
1542 return -1;
e1c57ab8
PB
1543 }
1544
1545 size = TARGET_PAGE_ALIGN(size);
1546 new_block = g_malloc0(sizeof(*new_block));
1547 new_block->mr = mr;
9b8424d5
MT
1548 new_block->used_length = size;
1549 new_block->max_length = size;
dbcb8981 1550 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1551 new_block->host = file_ram_alloc(new_block, size,
1552 mem_path, errp);
1553 if (!new_block->host) {
1554 g_free(new_block);
1555 return -1;
1556 }
1557
ef701d7b
HT
1558 addr = ram_block_add(new_block, &local_err);
1559 if (local_err) {
1560 g_free(new_block);
1561 error_propagate(errp, local_err);
1562 return -1;
1563 }
1564 return addr;
e1c57ab8 1565}
0b183fc8 1566#endif
e1c57ab8 1567
62be4e3a
MT
1568static
1569ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1570 void (*resized)(const char*,
1571 uint64_t length,
1572 void *host),
1573 void *host, bool resizeable,
ef701d7b 1574 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1575{
1576 RAMBlock *new_block;
ef701d7b
HT
1577 ram_addr_t addr;
1578 Error *local_err = NULL;
e1c57ab8
PB
1579
1580 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1581 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1582 new_block = g_malloc0(sizeof(*new_block));
1583 new_block->mr = mr;
62be4e3a 1584 new_block->resized = resized;
9b8424d5
MT
1585 new_block->used_length = size;
1586 new_block->max_length = max_size;
62be4e3a 1587 assert(max_size >= size);
e1c57ab8
PB
1588 new_block->fd = -1;
1589 new_block->host = host;
1590 if (host) {
7bd4f430 1591 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1592 }
62be4e3a
MT
1593 if (resizeable) {
1594 new_block->flags |= RAM_RESIZEABLE;
1595 }
ef701d7b
HT
1596 addr = ram_block_add(new_block, &local_err);
1597 if (local_err) {
1598 g_free(new_block);
1599 error_propagate(errp, local_err);
1600 return -1;
1601 }
1602 return addr;
e1c57ab8
PB
1603}
1604
62be4e3a
MT
1605ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1606 MemoryRegion *mr, Error **errp)
1607{
1608 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1609}
1610
ef701d7b 1611ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1612{
62be4e3a
MT
1613 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1614}
1615
1616ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1617 void (*resized)(const char*,
1618 uint64_t length,
1619 void *host),
1620 MemoryRegion *mr, Error **errp)
1621{
1622 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1623}
1624
1f2e98b6
AW
1625void qemu_ram_free_from_ptr(ram_addr_t addr)
1626{
1627 RAMBlock *block;
1628
b2a8658e 1629 qemu_mutex_lock_ramlist();
0dc3f44a 1630 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1f2e98b6 1631 if (addr == block->offset) {
0dc3f44a 1632 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1633 ram_list.mru_block = NULL;
0dc3f44a
MD
1634 /* Write list before version */
1635 smp_wmb();
f798b07f 1636 ram_list.version++;
43771539 1637 g_free_rcu(block, rcu);
b2a8658e 1638 break;
1f2e98b6
AW
1639 }
1640 }
b2a8658e 1641 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1642}
1643
43771539
PB
1644static void reclaim_ramblock(RAMBlock *block)
1645{
1646 if (block->flags & RAM_PREALLOC) {
1647 ;
1648 } else if (xen_enabled()) {
1649 xen_invalidate_map_cache_entry(block->host);
1650#ifndef _WIN32
1651 } else if (block->fd >= 0) {
1652 munmap(block->host, block->max_length);
1653 close(block->fd);
1654#endif
1655 } else {
1656 qemu_anon_ram_free(block->host, block->max_length);
1657 }
1658 g_free(block);
1659}
1660
c227f099 1661void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1662{
04b16653
AW
1663 RAMBlock *block;
1664
b2a8658e 1665 qemu_mutex_lock_ramlist();
0dc3f44a 1666 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
04b16653 1667 if (addr == block->offset) {
0dc3f44a 1668 QLIST_REMOVE_RCU(block, next);
0d6d3c87 1669 ram_list.mru_block = NULL;
0dc3f44a
MD
1670 /* Write list before version */
1671 smp_wmb();
f798b07f 1672 ram_list.version++;
43771539 1673 call_rcu(block, reclaim_ramblock, rcu);
b2a8658e 1674 break;
04b16653
AW
1675 }
1676 }
b2a8658e 1677 qemu_mutex_unlock_ramlist();
e9a1ab19
FB
1678}
1679
cd19cfa2
HY
1680#ifndef _WIN32
1681void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1682{
1683 RAMBlock *block;
1684 ram_addr_t offset;
1685 int flags;
1686 void *area, *vaddr;
1687
0dc3f44a 1688 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
cd19cfa2 1689 offset = addr - block->offset;
9b8424d5 1690 if (offset < block->max_length) {
1240be24 1691 vaddr = ramblock_ptr(block, offset);
7bd4f430 1692 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1693 ;
dfeaf2ab
MA
1694 } else if (xen_enabled()) {
1695 abort();
cd19cfa2
HY
1696 } else {
1697 flags = MAP_FIXED;
3435f395 1698 if (block->fd >= 0) {
dbcb8981
PB
1699 flags |= (block->flags & RAM_SHARED ?
1700 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1701 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1702 flags, block->fd, offset);
cd19cfa2 1703 } else {
2eb9fbaa
MA
1704 /*
1705 * Remap needs to match alloc. Accelerators that
1706 * set phys_mem_alloc never remap. If they did,
1707 * we'd need a remap hook here.
1708 */
1709 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1710
cd19cfa2
HY
1711 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1712 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1713 flags, -1, 0);
cd19cfa2
HY
1714 }
1715 if (area != vaddr) {
f15fbc4b
AP
1716 fprintf(stderr, "Could not remap addr: "
1717 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1718 length, addr);
1719 exit(1);
1720 }
8490fc78 1721 memory_try_enable_merging(vaddr, length);
ddb97f1d 1722 qemu_ram_setup_dump(vaddr, length);
cd19cfa2 1723 }
cd19cfa2
HY
1724 }
1725 }
1726}
1727#endif /* !_WIN32 */
1728
a35ba7be
PB
1729int qemu_get_ram_fd(ram_addr_t addr)
1730{
ae3a7047
MD
1731 RAMBlock *block;
1732 int fd;
a35ba7be 1733
0dc3f44a 1734 rcu_read_lock();
ae3a7047
MD
1735 block = qemu_get_ram_block(addr);
1736 fd = block->fd;
0dc3f44a 1737 rcu_read_unlock();
ae3a7047 1738 return fd;
a35ba7be
PB
1739}
1740
3fd74b84
DM
1741void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1742{
ae3a7047
MD
1743 RAMBlock *block;
1744 void *ptr;
3fd74b84 1745
0dc3f44a 1746 rcu_read_lock();
ae3a7047
MD
1747 block = qemu_get_ram_block(addr);
1748 ptr = ramblock_ptr(block, 0);
0dc3f44a 1749 rcu_read_unlock();
ae3a7047 1750 return ptr;
3fd74b84
DM
1751}
1752
1b5ec234 1753/* Return a host pointer to ram allocated with qemu_ram_alloc.
ae3a7047
MD
1754 * This should not be used for general purpose DMA. Use address_space_map
1755 * or address_space_rw instead. For local memory (e.g. video ram) that the
1756 * device owns, use memory_region_get_ram_ptr.
0dc3f44a
MD
1757 *
1758 * By the time this function returns, the returned pointer is not protected
1759 * by RCU anymore. If the caller is not within an RCU critical section and
1760 * does not hold the iothread lock, it must have other means of protecting the
1761 * pointer, such as a reference to the region that includes the incoming
1762 * ram_addr_t.
1b5ec234
PB
1763 */
1764void *qemu_get_ram_ptr(ram_addr_t addr)
1765{
ae3a7047
MD
1766 RAMBlock *block;
1767 void *ptr;
1b5ec234 1768
0dc3f44a 1769 rcu_read_lock();
ae3a7047
MD
1770 block = qemu_get_ram_block(addr);
1771
1772 if (xen_enabled() && block->host == NULL) {
0d6d3c87
PB
1773 /* We need to check if the requested address is in the RAM
1774 * because we don't want to map the entire memory in QEMU.
1775 * In that case just map until the end of the page.
1776 */
1777 if (block->offset == 0) {
ae3a7047 1778 ptr = xen_map_cache(addr, 0, 0);
0dc3f44a 1779 goto unlock;
0d6d3c87 1780 }
ae3a7047
MD
1781
1782 block->host = xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87 1783 }
ae3a7047
MD
1784 ptr = ramblock_ptr(block, addr - block->offset);
1785
0dc3f44a
MD
1786unlock:
1787 rcu_read_unlock();
ae3a7047 1788 return ptr;
dc828ca1
PB
1789}
1790
38bee5dc 1791/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
ae3a7047 1792 * but takes a size argument.
0dc3f44a
MD
1793 *
1794 * By the time this function returns, the returned pointer is not protected
1795 * by RCU anymore. If the caller is not within an RCU critical section and
1796 * does not hold the iothread lock, it must have other means of protecting the
1797 * pointer, such as a reference to the region that includes the incoming
1798 * ram_addr_t.
ae3a7047 1799 */
cb85f7ab 1800static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1801{
ae3a7047 1802 void *ptr;
8ab934f9
SS
1803 if (*size == 0) {
1804 return NULL;
1805 }
868bb33f 1806 if (xen_enabled()) {
e41d7c69 1807 return xen_map_cache(addr, *size, 1);
868bb33f 1808 } else {
38bee5dc 1809 RAMBlock *block;
0dc3f44a
MD
1810 rcu_read_lock();
1811 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
9b8424d5
MT
1812 if (addr - block->offset < block->max_length) {
1813 if (addr - block->offset + *size > block->max_length)
1814 *size = block->max_length - addr + block->offset;
ae3a7047 1815 ptr = ramblock_ptr(block, addr - block->offset);
0dc3f44a 1816 rcu_read_unlock();
ae3a7047 1817 return ptr;
38bee5dc
SS
1818 }
1819 }
1820
1821 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1822 abort();
38bee5dc
SS
1823 }
1824}
1825
7443b437 1826/* Some of the softmmu routines need to translate from a host pointer
ae3a7047
MD
1827 * (typically a TLB entry) back to a ram offset.
1828 *
1829 * By the time this function returns, the returned pointer is not protected
1830 * by RCU anymore. If the caller is not within an RCU critical section and
1831 * does not hold the iothread lock, it must have other means of protecting the
1832 * pointer, such as a reference to the region that includes the incoming
1833 * ram_addr_t.
1834 */
1b5ec234 1835MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1836{
94a6b54f
PB
1837 RAMBlock *block;
1838 uint8_t *host = ptr;
ae3a7047 1839 MemoryRegion *mr;
94a6b54f 1840
868bb33f 1841 if (xen_enabled()) {
0dc3f44a 1842 rcu_read_lock();
e41d7c69 1843 *ram_addr = xen_ram_addr_from_mapcache(ptr);
ae3a7047 1844 mr = qemu_get_ram_block(*ram_addr)->mr;
0dc3f44a 1845 rcu_read_unlock();
ae3a7047 1846 return mr;
712c2b41
SS
1847 }
1848
0dc3f44a
MD
1849 rcu_read_lock();
1850 block = atomic_rcu_read(&ram_list.mru_block);
9b8424d5 1851 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1852 goto found;
1853 }
1854
0dc3f44a 1855 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
432d268c
JN
1856 /* This case append when the block is not mapped. */
1857 if (block->host == NULL) {
1858 continue;
1859 }
9b8424d5 1860 if (host - block->host < block->max_length) {
23887b79 1861 goto found;
f471a17e 1862 }
94a6b54f 1863 }
432d268c 1864
0dc3f44a 1865 rcu_read_unlock();
1b5ec234 1866 return NULL;
23887b79
PB
1867
1868found:
1869 *ram_addr = block->offset + (host - block->host);
ae3a7047 1870 mr = block->mr;
0dc3f44a 1871 rcu_read_unlock();
ae3a7047 1872 return mr;
e890261f 1873}
f471a17e 1874
a8170e5e 1875static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1876 uint64_t val, unsigned size)
9fa3e853 1877{
52159192 1878 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1879 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1880 }
0e0df1e2
AK
1881 switch (size) {
1882 case 1:
1883 stb_p(qemu_get_ram_ptr(ram_addr), val);
1884 break;
1885 case 2:
1886 stw_p(qemu_get_ram_ptr(ram_addr), val);
1887 break;
1888 case 4:
1889 stl_p(qemu_get_ram_ptr(ram_addr), val);
1890 break;
1891 default:
1892 abort();
3a7d929e 1893 }
58d2707e
PB
1894 /* Set both VGA and migration bits for simplicity and to remove
1895 * the notdirty callback faster.
1896 */
1897 cpu_physical_memory_set_dirty_range(ram_addr, size,
1898 DIRTY_CLIENTS_NOCODE);
f23db169
FB
1899 /* we remove the notdirty callback only if the code has been
1900 flushed */
a2cd8c85 1901 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1902 CPUArchState *env = current_cpu->env_ptr;
93afeade 1903 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1904 }
9fa3e853
FB
1905}
1906
b018ddf6
PB
1907static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1908 unsigned size, bool is_write)
1909{
1910 return is_write;
1911}
1912
0e0df1e2 1913static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1914 .write = notdirty_mem_write,
b018ddf6 1915 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1916 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1917};
1918
0f459d16 1919/* Generate a debug exception if a watchpoint has been hit. */
66b9b43c 1920static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
0f459d16 1921{
93afeade
AF
1922 CPUState *cpu = current_cpu;
1923 CPUArchState *env = cpu->env_ptr;
06d55cc1 1924 target_ulong pc, cs_base;
0f459d16 1925 target_ulong vaddr;
a1d1bb31 1926 CPUWatchpoint *wp;
06d55cc1 1927 int cpu_flags;
0f459d16 1928
ff4700b0 1929 if (cpu->watchpoint_hit) {
06d55cc1
AL
1930 /* We re-entered the check after replacing the TB. Now raise
1931 * the debug interrupt so that is will trigger after the
1932 * current instruction. */
93afeade 1933 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1934 return;
1935 }
93afeade 1936 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1937 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1938 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1939 && (wp->flags & flags)) {
08225676
PM
1940 if (flags == BP_MEM_READ) {
1941 wp->flags |= BP_WATCHPOINT_HIT_READ;
1942 } else {
1943 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1944 }
1945 wp->hitaddr = vaddr;
66b9b43c 1946 wp->hitattrs = attrs;
ff4700b0
AF
1947 if (!cpu->watchpoint_hit) {
1948 cpu->watchpoint_hit = wp;
239c51a5 1949 tb_check_watchpoint(cpu);
6e140f28 1950 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1951 cpu->exception_index = EXCP_DEBUG;
5638d180 1952 cpu_loop_exit(cpu);
6e140f28
AL
1953 } else {
1954 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1955 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1956 cpu_resume_from_signal(cpu, NULL);
6e140f28 1957 }
06d55cc1 1958 }
6e140f28
AL
1959 } else {
1960 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1961 }
1962 }
1963}
1964
6658ffb8
PB
1965/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1966 so these check for a hit then pass through to the normal out-of-line
1967 phys routines. */
66b9b43c
PM
1968static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1969 unsigned size, MemTxAttrs attrs)
6658ffb8 1970{
66b9b43c
PM
1971 MemTxResult res;
1972 uint64_t data;
1973
1974 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
1ec9b909 1975 switch (size) {
66b9b43c
PM
1976 case 1:
1977 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
1978 break;
1979 case 2:
1980 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
1981 break;
1982 case 4:
1983 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
1984 break;
1ec9b909
AK
1985 default: abort();
1986 }
66b9b43c
PM
1987 *pdata = data;
1988 return res;
6658ffb8
PB
1989}
1990
66b9b43c
PM
1991static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1992 uint64_t val, unsigned size,
1993 MemTxAttrs attrs)
6658ffb8 1994{
66b9b43c
PM
1995 MemTxResult res;
1996
1997 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1ec9b909 1998 switch (size) {
67364150 1999 case 1:
66b9b43c 2000 address_space_stb(&address_space_memory, addr, val, attrs, &res);
67364150
MF
2001 break;
2002 case 2:
66b9b43c 2003 address_space_stw(&address_space_memory, addr, val, attrs, &res);
67364150
MF
2004 break;
2005 case 4:
66b9b43c 2006 address_space_stl(&address_space_memory, addr, val, attrs, &res);
67364150 2007 break;
1ec9b909
AK
2008 default: abort();
2009 }
66b9b43c 2010 return res;
6658ffb8
PB
2011}
2012
1ec9b909 2013static const MemoryRegionOps watch_mem_ops = {
66b9b43c
PM
2014 .read_with_attrs = watch_mem_read,
2015 .write_with_attrs = watch_mem_write,
1ec9b909 2016 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 2017};
6658ffb8 2018
f25a49e0
PM
2019static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2020 unsigned len, MemTxAttrs attrs)
db7b5426 2021{
acc9d80b 2022 subpage_t *subpage = opaque;
ff6cff75 2023 uint8_t buf[8];
5c9eb028 2024 MemTxResult res;
791af8c8 2025
db7b5426 2026#if defined(DEBUG_SUBPAGE)
016e9d62 2027 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 2028 subpage, len, addr);
db7b5426 2029#endif
5c9eb028
PM
2030 res = address_space_read(subpage->as, addr + subpage->base,
2031 attrs, buf, len);
2032 if (res) {
2033 return res;
f25a49e0 2034 }
acc9d80b
JK
2035 switch (len) {
2036 case 1:
f25a49e0
PM
2037 *data = ldub_p(buf);
2038 return MEMTX_OK;
acc9d80b 2039 case 2:
f25a49e0
PM
2040 *data = lduw_p(buf);
2041 return MEMTX_OK;
acc9d80b 2042 case 4:
f25a49e0
PM
2043 *data = ldl_p(buf);
2044 return MEMTX_OK;
ff6cff75 2045 case 8:
f25a49e0
PM
2046 *data = ldq_p(buf);
2047 return MEMTX_OK;
acc9d80b
JK
2048 default:
2049 abort();
2050 }
db7b5426
BS
2051}
2052
f25a49e0
PM
2053static MemTxResult subpage_write(void *opaque, hwaddr addr,
2054 uint64_t value, unsigned len, MemTxAttrs attrs)
db7b5426 2055{
acc9d80b 2056 subpage_t *subpage = opaque;
ff6cff75 2057 uint8_t buf[8];
acc9d80b 2058
db7b5426 2059#if defined(DEBUG_SUBPAGE)
016e9d62 2060 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
2061 " value %"PRIx64"\n",
2062 __func__, subpage, len, addr, value);
db7b5426 2063#endif
acc9d80b
JK
2064 switch (len) {
2065 case 1:
2066 stb_p(buf, value);
2067 break;
2068 case 2:
2069 stw_p(buf, value);
2070 break;
2071 case 4:
2072 stl_p(buf, value);
2073 break;
ff6cff75
PB
2074 case 8:
2075 stq_p(buf, value);
2076 break;
acc9d80b
JK
2077 default:
2078 abort();
2079 }
5c9eb028
PM
2080 return address_space_write(subpage->as, addr + subpage->base,
2081 attrs, buf, len);
db7b5426
BS
2082}
2083
c353e4cc 2084static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 2085 unsigned len, bool is_write)
c353e4cc 2086{
acc9d80b 2087 subpage_t *subpage = opaque;
c353e4cc 2088#if defined(DEBUG_SUBPAGE)
016e9d62 2089 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 2090 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
2091#endif
2092
acc9d80b 2093 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 2094 len, is_write);
c353e4cc
PB
2095}
2096
70c68e44 2097static const MemoryRegionOps subpage_ops = {
f25a49e0
PM
2098 .read_with_attrs = subpage_read,
2099 .write_with_attrs = subpage_write,
ff6cff75
PB
2100 .impl.min_access_size = 1,
2101 .impl.max_access_size = 8,
2102 .valid.min_access_size = 1,
2103 .valid.max_access_size = 8,
c353e4cc 2104 .valid.accepts = subpage_accepts,
70c68e44 2105 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
2106};
2107
c227f099 2108static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2109 uint16_t section)
db7b5426
BS
2110{
2111 int idx, eidx;
2112
2113 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2114 return -1;
2115 idx = SUBPAGE_IDX(start);
2116 eidx = SUBPAGE_IDX(end);
2117#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2118 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2119 __func__, mmio, start, end, idx, eidx, section);
db7b5426 2120#endif
db7b5426 2121 for (; idx <= eidx; idx++) {
5312bd8b 2122 mmio->sub_section[idx] = section;
db7b5426
BS
2123 }
2124
2125 return 0;
2126}
2127
acc9d80b 2128static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 2129{
c227f099 2130 subpage_t *mmio;
db7b5426 2131
7267c094 2132 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 2133
acc9d80b 2134 mmio->as = as;
1eec614b 2135 mmio->base = base;
2c9b15ca 2136 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 2137 NULL, TARGET_PAGE_SIZE);
b3b00c78 2138 mmio->iomem.subpage = true;
db7b5426 2139#if defined(DEBUG_SUBPAGE)
016e9d62
AK
2140 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2141 mmio, base, TARGET_PAGE_SIZE);
db7b5426 2142#endif
b41aac4f 2143 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
2144
2145 return mmio;
2146}
2147
a656e22f
PC
2148static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2149 MemoryRegion *mr)
5312bd8b 2150{
a656e22f 2151 assert(as);
5312bd8b 2152 MemoryRegionSection section = {
a656e22f 2153 .address_space = as,
5312bd8b
AK
2154 .mr = mr,
2155 .offset_within_address_space = 0,
2156 .offset_within_region = 0,
052e87b0 2157 .size = int128_2_64(),
5312bd8b
AK
2158 };
2159
53cb28cb 2160 return phys_section_add(map, &section);
5312bd8b
AK
2161}
2162
9d82b5a7 2163MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 2164{
79e2b9ae
PB
2165 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2166 MemoryRegionSection *sections = d->map.sections;
9d82b5a7
PB
2167
2168 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
2169}
2170
e9179ce1
AK
2171static void io_mem_init(void)
2172{
1f6245e5 2173 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 2174 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 2175 NULL, UINT64_MAX);
2c9b15ca 2176 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 2177 NULL, UINT64_MAX);
2c9b15ca 2178 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 2179 NULL, UINT64_MAX);
e9179ce1
AK
2180}
2181
ac1970fb 2182static void mem_begin(MemoryListener *listener)
00752703
PB
2183{
2184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
2185 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2186 uint16_t n;
2187
a656e22f 2188 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 2189 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 2190 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 2191 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 2192 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 2193 assert(n == PHYS_SECTION_ROM);
a656e22f 2194 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 2195 assert(n == PHYS_SECTION_WATCH);
00752703 2196
9736e55b 2197 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
2198 d->as = as;
2199 as->next_dispatch = d;
2200}
2201
79e2b9ae
PB
2202static void address_space_dispatch_free(AddressSpaceDispatch *d)
2203{
2204 phys_sections_free(&d->map);
2205 g_free(d);
2206}
2207
00752703 2208static void mem_commit(MemoryListener *listener)
ac1970fb 2209{
89ae337a 2210 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2211 AddressSpaceDispatch *cur = as->dispatch;
2212 AddressSpaceDispatch *next = as->next_dispatch;
2213
53cb28cb 2214 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2215
79e2b9ae 2216 atomic_rcu_set(&as->dispatch, next);
53cb28cb 2217 if (cur) {
79e2b9ae 2218 call_rcu(cur, address_space_dispatch_free, rcu);
53cb28cb 2219 }
9affd6fc
PB
2220}
2221
1d71148e 2222static void tcg_commit(MemoryListener *listener)
50c1e149 2223{
182735ef 2224 CPUState *cpu;
117712c3
AK
2225
2226 /* since each CPU stores ram addresses in its TLB cache, we must
2227 reset the modified entries */
2228 /* XXX: slow ! */
bdc44640 2229 CPU_FOREACH(cpu) {
33bde2e1
EI
2230 /* FIXME: Disentangle the cpu.h circular files deps so we can
2231 directly get the right CPU from listener. */
2232 if (cpu->tcg_as_listener != listener) {
2233 continue;
2234 }
76e5c76f 2235 cpu_reload_memory_map(cpu);
117712c3 2236 }
50c1e149
AK
2237}
2238
ac1970fb
AK
2239void address_space_init_dispatch(AddressSpace *as)
2240{
00752703 2241 as->dispatch = NULL;
89ae337a 2242 as->dispatch_listener = (MemoryListener) {
ac1970fb 2243 .begin = mem_begin,
00752703 2244 .commit = mem_commit,
ac1970fb
AK
2245 .region_add = mem_add,
2246 .region_nop = mem_add,
2247 .priority = 0,
2248 };
89ae337a 2249 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2250}
2251
6e48e8f9
PB
2252void address_space_unregister(AddressSpace *as)
2253{
2254 memory_listener_unregister(&as->dispatch_listener);
2255}
2256
83f3c251
AK
2257void address_space_destroy_dispatch(AddressSpace *as)
2258{
2259 AddressSpaceDispatch *d = as->dispatch;
2260
79e2b9ae
PB
2261 atomic_rcu_set(&as->dispatch, NULL);
2262 if (d) {
2263 call_rcu(d, address_space_dispatch_free, rcu);
2264 }
83f3c251
AK
2265}
2266
62152b8a
AK
2267static void memory_map_init(void)
2268{
7267c094 2269 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2270
57271d63 2271 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2272 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2273
7267c094 2274 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2275 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2276 65536);
7dca8043 2277 address_space_init(&address_space_io, system_io, "I/O");
62152b8a
AK
2278}
2279
2280MemoryRegion *get_system_memory(void)
2281{
2282 return system_memory;
2283}
2284
309cb471
AK
2285MemoryRegion *get_system_io(void)
2286{
2287 return system_io;
2288}
2289
e2eef170
PB
2290#endif /* !defined(CONFIG_USER_ONLY) */
2291
13eb76e0
FB
2292/* physical memory access (slow version, mainly for debug) */
2293#if defined(CONFIG_USER_ONLY)
f17ec444 2294int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2295 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2296{
2297 int l, flags;
2298 target_ulong page;
53a5960a 2299 void * p;
13eb76e0
FB
2300
2301 while (len > 0) {
2302 page = addr & TARGET_PAGE_MASK;
2303 l = (page + TARGET_PAGE_SIZE) - addr;
2304 if (l > len)
2305 l = len;
2306 flags = page_get_flags(page);
2307 if (!(flags & PAGE_VALID))
a68fe89c 2308 return -1;
13eb76e0
FB
2309 if (is_write) {
2310 if (!(flags & PAGE_WRITE))
a68fe89c 2311 return -1;
579a97f7 2312 /* XXX: this code should not depend on lock_user */
72fb7daa 2313 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2314 return -1;
72fb7daa
AJ
2315 memcpy(p, buf, l);
2316 unlock_user(p, addr, l);
13eb76e0
FB
2317 } else {
2318 if (!(flags & PAGE_READ))
a68fe89c 2319 return -1;
579a97f7 2320 /* XXX: this code should not depend on lock_user */
72fb7daa 2321 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2322 return -1;
72fb7daa 2323 memcpy(buf, p, l);
5b257578 2324 unlock_user(p, addr, 0);
13eb76e0
FB
2325 }
2326 len -= l;
2327 buf += l;
2328 addr += l;
2329 }
a68fe89c 2330 return 0;
13eb76e0 2331}
8df1cd07 2332
13eb76e0 2333#else
51d7a9eb 2334
845b6214 2335static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
a8170e5e 2336 hwaddr length)
51d7a9eb 2337{
e87f7778
PB
2338 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2339 /* No early return if dirty_log_mask is or becomes 0, because
2340 * cpu_physical_memory_set_dirty_range will still call
2341 * xen_modified_memory.
2342 */
2343 if (dirty_log_mask) {
2344 dirty_log_mask =
2345 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2346 }
2347 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2348 tb_invalidate_phys_range(addr, addr + length);
2349 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
51d7a9eb 2350 }
e87f7778 2351 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
51d7a9eb
AP
2352}
2353
23326164 2354static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2355{
e1622f4b 2356 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2357
2358 /* Regions are assumed to support 1-4 byte accesses unless
2359 otherwise specified. */
23326164
RH
2360 if (access_size_max == 0) {
2361 access_size_max = 4;
2362 }
2363
2364 /* Bound the maximum access by the alignment of the address. */
2365 if (!mr->ops->impl.unaligned) {
2366 unsigned align_size_max = addr & -addr;
2367 if (align_size_max != 0 && align_size_max < access_size_max) {
2368 access_size_max = align_size_max;
2369 }
82f2563f 2370 }
23326164
RH
2371
2372 /* Don't attempt accesses larger than the maximum. */
2373 if (l > access_size_max) {
2374 l = access_size_max;
82f2563f 2375 }
098178f2
PB
2376 if (l & (l - 1)) {
2377 l = 1 << (qemu_fls(l) - 1);
2378 }
23326164
RH
2379
2380 return l;
82f2563f
PB
2381}
2382
4840f10e 2383static bool prepare_mmio_access(MemoryRegion *mr)
125b3806 2384{
4840f10e
JK
2385 bool unlocked = !qemu_mutex_iothread_locked();
2386 bool release_lock = false;
2387
2388 if (unlocked && mr->global_locking) {
2389 qemu_mutex_lock_iothread();
2390 unlocked = false;
2391 release_lock = true;
2392 }
125b3806 2393 if (mr->flush_coalesced_mmio) {
4840f10e
JK
2394 if (unlocked) {
2395 qemu_mutex_lock_iothread();
2396 }
125b3806 2397 qemu_flush_coalesced_mmio_buffer();
4840f10e
JK
2398 if (unlocked) {
2399 qemu_mutex_unlock_iothread();
2400 }
125b3806 2401 }
4840f10e
JK
2402
2403 return release_lock;
125b3806
PB
2404}
2405
5c9eb028
PM
2406MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2407 uint8_t *buf, int len, bool is_write)
13eb76e0 2408{
149f54b5 2409 hwaddr l;
13eb76e0 2410 uint8_t *ptr;
791af8c8 2411 uint64_t val;
149f54b5 2412 hwaddr addr1;
5c8a00ce 2413 MemoryRegion *mr;
3b643495 2414 MemTxResult result = MEMTX_OK;
4840f10e 2415 bool release_lock = false;
3b46e624 2416
41063e1e 2417 rcu_read_lock();
13eb76e0 2418 while (len > 0) {
149f54b5 2419 l = len;
5c8a00ce 2420 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2421
13eb76e0 2422 if (is_write) {
5c8a00ce 2423 if (!memory_access_is_direct(mr, is_write)) {
4840f10e 2424 release_lock |= prepare_mmio_access(mr);
5c8a00ce 2425 l = memory_access_size(mr, l, addr1);
4917cf44 2426 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2427 potential bugs */
23326164
RH
2428 switch (l) {
2429 case 8:
2430 /* 64 bit write access */
2431 val = ldq_p(buf);
3b643495
PM
2432 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2433 attrs);
23326164
RH
2434 break;
2435 case 4:
1c213d19 2436 /* 32 bit write access */
c27004ec 2437 val = ldl_p(buf);
3b643495
PM
2438 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2439 attrs);
23326164
RH
2440 break;
2441 case 2:
1c213d19 2442 /* 16 bit write access */
c27004ec 2443 val = lduw_p(buf);
3b643495
PM
2444 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2445 attrs);
23326164
RH
2446 break;
2447 case 1:
1c213d19 2448 /* 8 bit write access */
c27004ec 2449 val = ldub_p(buf);
3b643495
PM
2450 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2451 attrs);
23326164
RH
2452 break;
2453 default:
2454 abort();
13eb76e0 2455 }
2bbfa05d 2456 } else {
5c8a00ce 2457 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2458 /* RAM case */
5579c7f3 2459 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2460 memcpy(ptr, buf, l);
845b6214 2461 invalidate_and_set_dirty(mr, addr1, l);
13eb76e0
FB
2462 }
2463 } else {
5c8a00ce 2464 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2465 /* I/O case */
4840f10e 2466 release_lock |= prepare_mmio_access(mr);
5c8a00ce 2467 l = memory_access_size(mr, l, addr1);
23326164
RH
2468 switch (l) {
2469 case 8:
2470 /* 64 bit read access */
3b643495
PM
2471 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2472 attrs);
23326164
RH
2473 stq_p(buf, val);
2474 break;
2475 case 4:
13eb76e0 2476 /* 32 bit read access */
3b643495
PM
2477 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2478 attrs);
c27004ec 2479 stl_p(buf, val);
23326164
RH
2480 break;
2481 case 2:
13eb76e0 2482 /* 16 bit read access */
3b643495
PM
2483 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2484 attrs);
c27004ec 2485 stw_p(buf, val);
23326164
RH
2486 break;
2487 case 1:
1c213d19 2488 /* 8 bit read access */
3b643495
PM
2489 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2490 attrs);
c27004ec 2491 stb_p(buf, val);
23326164
RH
2492 break;
2493 default:
2494 abort();
13eb76e0
FB
2495 }
2496 } else {
2497 /* RAM case */
5c8a00ce 2498 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2499 memcpy(buf, ptr, l);
13eb76e0
FB
2500 }
2501 }
4840f10e
JK
2502
2503 if (release_lock) {
2504 qemu_mutex_unlock_iothread();
2505 release_lock = false;
2506 }
2507
13eb76e0
FB
2508 len -= l;
2509 buf += l;
2510 addr += l;
2511 }
41063e1e 2512 rcu_read_unlock();
fd8aaa76 2513
3b643495 2514 return result;
13eb76e0 2515}
8df1cd07 2516
5c9eb028
PM
2517MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2518 const uint8_t *buf, int len)
ac1970fb 2519{
5c9eb028 2520 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
ac1970fb
AK
2521}
2522
5c9eb028
PM
2523MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2524 uint8_t *buf, int len)
ac1970fb 2525{
5c9eb028 2526 return address_space_rw(as, addr, attrs, buf, len, false);
ac1970fb
AK
2527}
2528
2529
a8170e5e 2530void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2531 int len, int is_write)
2532{
5c9eb028
PM
2533 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2534 buf, len, is_write);
ac1970fb
AK
2535}
2536
582b55a9
AG
2537enum write_rom_type {
2538 WRITE_DATA,
2539 FLUSH_CACHE,
2540};
2541
2a221651 2542static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2543 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2544{
149f54b5 2545 hwaddr l;
d0ecd2aa 2546 uint8_t *ptr;
149f54b5 2547 hwaddr addr1;
5c8a00ce 2548 MemoryRegion *mr;
3b46e624 2549
41063e1e 2550 rcu_read_lock();
d0ecd2aa 2551 while (len > 0) {
149f54b5 2552 l = len;
2a221651 2553 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2554
5c8a00ce
PB
2555 if (!(memory_region_is_ram(mr) ||
2556 memory_region_is_romd(mr))) {
b242e0e0 2557 l = memory_access_size(mr, l, addr1);
d0ecd2aa 2558 } else {
5c8a00ce 2559 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2560 /* ROM/RAM case */
5579c7f3 2561 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2562 switch (type) {
2563 case WRITE_DATA:
2564 memcpy(ptr, buf, l);
845b6214 2565 invalidate_and_set_dirty(mr, addr1, l);
582b55a9
AG
2566 break;
2567 case FLUSH_CACHE:
2568 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2569 break;
2570 }
d0ecd2aa
FB
2571 }
2572 len -= l;
2573 buf += l;
2574 addr += l;
2575 }
41063e1e 2576 rcu_read_unlock();
d0ecd2aa
FB
2577}
2578
582b55a9 2579/* used for ROM loading : can write in RAM and ROM */
2a221651 2580void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2581 const uint8_t *buf, int len)
2582{
2a221651 2583 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2584}
2585
2586void cpu_flush_icache_range(hwaddr start, int len)
2587{
2588 /*
2589 * This function should do the same thing as an icache flush that was
2590 * triggered from within the guest. For TCG we are always cache coherent,
2591 * so there is no need to flush anything. For KVM / Xen we need to flush
2592 * the host's instruction cache at least.
2593 */
2594 if (tcg_enabled()) {
2595 return;
2596 }
2597
2a221651
EI
2598 cpu_physical_memory_write_rom_internal(&address_space_memory,
2599 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2600}
2601
6d16c2f8 2602typedef struct {
d3e71559 2603 MemoryRegion *mr;
6d16c2f8 2604 void *buffer;
a8170e5e
AK
2605 hwaddr addr;
2606 hwaddr len;
c2cba0ff 2607 bool in_use;
6d16c2f8
AL
2608} BounceBuffer;
2609
2610static BounceBuffer bounce;
2611
ba223c29 2612typedef struct MapClient {
e95205e1 2613 QEMUBH *bh;
72cf2d4f 2614 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2615} MapClient;
2616
38e047b5 2617QemuMutex map_client_list_lock;
72cf2d4f
BS
2618static QLIST_HEAD(map_client_list, MapClient) map_client_list
2619 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29 2620
e95205e1
FZ
2621static void cpu_unregister_map_client_do(MapClient *client)
2622{
2623 QLIST_REMOVE(client, link);
2624 g_free(client);
2625}
2626
33b6c2ed
FZ
2627static void cpu_notify_map_clients_locked(void)
2628{
2629 MapClient *client;
2630
2631 while (!QLIST_EMPTY(&map_client_list)) {
2632 client = QLIST_FIRST(&map_client_list);
e95205e1
FZ
2633 qemu_bh_schedule(client->bh);
2634 cpu_unregister_map_client_do(client);
33b6c2ed
FZ
2635 }
2636}
2637
e95205e1 2638void cpu_register_map_client(QEMUBH *bh)
ba223c29 2639{
7267c094 2640 MapClient *client = g_malloc(sizeof(*client));
ba223c29 2641
38e047b5 2642 qemu_mutex_lock(&map_client_list_lock);
e95205e1 2643 client->bh = bh;
72cf2d4f 2644 QLIST_INSERT_HEAD(&map_client_list, client, link);
33b6c2ed
FZ
2645 if (!atomic_read(&bounce.in_use)) {
2646 cpu_notify_map_clients_locked();
2647 }
38e047b5 2648 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2649}
2650
38e047b5 2651void cpu_exec_init_all(void)
ba223c29 2652{
38e047b5
FZ
2653 qemu_mutex_init(&ram_list.mutex);
2654 memory_map_init();
2655 io_mem_init();
2656 qemu_mutex_init(&map_client_list_lock);
ba223c29
AL
2657}
2658
e95205e1 2659void cpu_unregister_map_client(QEMUBH *bh)
ba223c29
AL
2660{
2661 MapClient *client;
2662
e95205e1
FZ
2663 qemu_mutex_lock(&map_client_list_lock);
2664 QLIST_FOREACH(client, &map_client_list, link) {
2665 if (client->bh == bh) {
2666 cpu_unregister_map_client_do(client);
2667 break;
2668 }
ba223c29 2669 }
e95205e1 2670 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2671}
2672
2673static void cpu_notify_map_clients(void)
2674{
38e047b5 2675 qemu_mutex_lock(&map_client_list_lock);
33b6c2ed 2676 cpu_notify_map_clients_locked();
38e047b5 2677 qemu_mutex_unlock(&map_client_list_lock);
ba223c29
AL
2678}
2679
51644ab7
PB
2680bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2681{
5c8a00ce 2682 MemoryRegion *mr;
51644ab7
PB
2683 hwaddr l, xlat;
2684
41063e1e 2685 rcu_read_lock();
51644ab7
PB
2686 while (len > 0) {
2687 l = len;
5c8a00ce
PB
2688 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2689 if (!memory_access_is_direct(mr, is_write)) {
2690 l = memory_access_size(mr, l, addr);
2691 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2692 return false;
2693 }
2694 }
2695
2696 len -= l;
2697 addr += l;
2698 }
41063e1e 2699 rcu_read_unlock();
51644ab7
PB
2700 return true;
2701}
2702
6d16c2f8
AL
2703/* Map a physical memory region into a host virtual address.
2704 * May map a subset of the requested range, given by and returned in *plen.
2705 * May return NULL if resources needed to perform the mapping are exhausted.
2706 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2707 * Use cpu_register_map_client() to know when retrying the map operation is
2708 * likely to succeed.
6d16c2f8 2709 */
ac1970fb 2710void *address_space_map(AddressSpace *as,
a8170e5e
AK
2711 hwaddr addr,
2712 hwaddr *plen,
ac1970fb 2713 bool is_write)
6d16c2f8 2714{
a8170e5e 2715 hwaddr len = *plen;
e3127ae0
PB
2716 hwaddr done = 0;
2717 hwaddr l, xlat, base;
2718 MemoryRegion *mr, *this_mr;
2719 ram_addr_t raddr;
6d16c2f8 2720
e3127ae0
PB
2721 if (len == 0) {
2722 return NULL;
2723 }
38bee5dc 2724
e3127ae0 2725 l = len;
41063e1e 2726 rcu_read_lock();
e3127ae0 2727 mr = address_space_translate(as, addr, &xlat, &l, is_write);
41063e1e 2728
e3127ae0 2729 if (!memory_access_is_direct(mr, is_write)) {
c2cba0ff 2730 if (atomic_xchg(&bounce.in_use, true)) {
41063e1e 2731 rcu_read_unlock();
e3127ae0 2732 return NULL;
6d16c2f8 2733 }
e85d9db5
KW
2734 /* Avoid unbounded allocations */
2735 l = MIN(l, TARGET_PAGE_SIZE);
2736 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2737 bounce.addr = addr;
2738 bounce.len = l;
d3e71559
PB
2739
2740 memory_region_ref(mr);
2741 bounce.mr = mr;
e3127ae0 2742 if (!is_write) {
5c9eb028
PM
2743 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2744 bounce.buffer, l);
8ab934f9 2745 }
6d16c2f8 2746
41063e1e 2747 rcu_read_unlock();
e3127ae0
PB
2748 *plen = l;
2749 return bounce.buffer;
2750 }
2751
2752 base = xlat;
2753 raddr = memory_region_get_ram_addr(mr);
2754
2755 for (;;) {
6d16c2f8
AL
2756 len -= l;
2757 addr += l;
e3127ae0
PB
2758 done += l;
2759 if (len == 0) {
2760 break;
2761 }
2762
2763 l = len;
2764 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2765 if (this_mr != mr || xlat != base + done) {
2766 break;
2767 }
6d16c2f8 2768 }
e3127ae0 2769
d3e71559 2770 memory_region_ref(mr);
41063e1e 2771 rcu_read_unlock();
e3127ae0
PB
2772 *plen = done;
2773 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2774}
2775
ac1970fb 2776/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2777 * Will also mark the memory as dirty if is_write == 1. access_len gives
2778 * the amount of memory that was actually read or written by the caller.
2779 */
a8170e5e
AK
2780void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2781 int is_write, hwaddr access_len)
6d16c2f8
AL
2782{
2783 if (buffer != bounce.buffer) {
d3e71559
PB
2784 MemoryRegion *mr;
2785 ram_addr_t addr1;
2786
2787 mr = qemu_ram_addr_from_host(buffer, &addr1);
2788 assert(mr != NULL);
6d16c2f8 2789 if (is_write) {
845b6214 2790 invalidate_and_set_dirty(mr, addr1, access_len);
6d16c2f8 2791 }
868bb33f 2792 if (xen_enabled()) {
e41d7c69 2793 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2794 }
d3e71559 2795 memory_region_unref(mr);
6d16c2f8
AL
2796 return;
2797 }
2798 if (is_write) {
5c9eb028
PM
2799 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2800 bounce.buffer, access_len);
6d16c2f8 2801 }
f8a83245 2802 qemu_vfree(bounce.buffer);
6d16c2f8 2803 bounce.buffer = NULL;
d3e71559 2804 memory_region_unref(bounce.mr);
c2cba0ff 2805 atomic_mb_set(&bounce.in_use, false);
ba223c29 2806 cpu_notify_map_clients();
6d16c2f8 2807}
d0ecd2aa 2808
a8170e5e
AK
2809void *cpu_physical_memory_map(hwaddr addr,
2810 hwaddr *plen,
ac1970fb
AK
2811 int is_write)
2812{
2813 return address_space_map(&address_space_memory, addr, plen, is_write);
2814}
2815
a8170e5e
AK
2816void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2817 int is_write, hwaddr access_len)
ac1970fb
AK
2818{
2819 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2820}
2821
8df1cd07 2822/* warning: addr must be aligned */
50013115
PM
2823static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2824 MemTxAttrs attrs,
2825 MemTxResult *result,
2826 enum device_endian endian)
8df1cd07 2827{
8df1cd07 2828 uint8_t *ptr;
791af8c8 2829 uint64_t val;
5c8a00ce 2830 MemoryRegion *mr;
149f54b5
PB
2831 hwaddr l = 4;
2832 hwaddr addr1;
50013115 2833 MemTxResult r;
4840f10e 2834 bool release_lock = false;
8df1cd07 2835
41063e1e 2836 rcu_read_lock();
fdfba1a2 2837 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2838 if (l < 4 || !memory_access_is_direct(mr, false)) {
4840f10e 2839 release_lock |= prepare_mmio_access(mr);
125b3806 2840
8df1cd07 2841 /* I/O case */
50013115 2842 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
1e78bcc1
AG
2843#if defined(TARGET_WORDS_BIGENDIAN)
2844 if (endian == DEVICE_LITTLE_ENDIAN) {
2845 val = bswap32(val);
2846 }
2847#else
2848 if (endian == DEVICE_BIG_ENDIAN) {
2849 val = bswap32(val);
2850 }
2851#endif
8df1cd07
FB
2852 } else {
2853 /* RAM case */
5c8a00ce 2854 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2855 & TARGET_PAGE_MASK)
149f54b5 2856 + addr1);
1e78bcc1
AG
2857 switch (endian) {
2858 case DEVICE_LITTLE_ENDIAN:
2859 val = ldl_le_p(ptr);
2860 break;
2861 case DEVICE_BIG_ENDIAN:
2862 val = ldl_be_p(ptr);
2863 break;
2864 default:
2865 val = ldl_p(ptr);
2866 break;
2867 }
50013115
PM
2868 r = MEMTX_OK;
2869 }
2870 if (result) {
2871 *result = r;
8df1cd07 2872 }
4840f10e
JK
2873 if (release_lock) {
2874 qemu_mutex_unlock_iothread();
2875 }
41063e1e 2876 rcu_read_unlock();
8df1cd07
FB
2877 return val;
2878}
2879
50013115
PM
2880uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2881 MemTxAttrs attrs, MemTxResult *result)
2882{
2883 return address_space_ldl_internal(as, addr, attrs, result,
2884 DEVICE_NATIVE_ENDIAN);
2885}
2886
2887uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2888 MemTxAttrs attrs, MemTxResult *result)
2889{
2890 return address_space_ldl_internal(as, addr, attrs, result,
2891 DEVICE_LITTLE_ENDIAN);
2892}
2893
2894uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2895 MemTxAttrs attrs, MemTxResult *result)
2896{
2897 return address_space_ldl_internal(as, addr, attrs, result,
2898 DEVICE_BIG_ENDIAN);
2899}
2900
fdfba1a2 2901uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2902{
50013115 2903 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2904}
2905
fdfba1a2 2906uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2907{
50013115 2908 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2909}
2910
fdfba1a2 2911uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2912{
50013115 2913 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2914}
2915
84b7b8e7 2916/* warning: addr must be aligned */
50013115
PM
2917static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2918 MemTxAttrs attrs,
2919 MemTxResult *result,
2920 enum device_endian endian)
84b7b8e7 2921{
84b7b8e7
FB
2922 uint8_t *ptr;
2923 uint64_t val;
5c8a00ce 2924 MemoryRegion *mr;
149f54b5
PB
2925 hwaddr l = 8;
2926 hwaddr addr1;
50013115 2927 MemTxResult r;
4840f10e 2928 bool release_lock = false;
84b7b8e7 2929
41063e1e 2930 rcu_read_lock();
2c17449b 2931 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2932 false);
2933 if (l < 8 || !memory_access_is_direct(mr, false)) {
4840f10e 2934 release_lock |= prepare_mmio_access(mr);
125b3806 2935
84b7b8e7 2936 /* I/O case */
50013115 2937 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
968a5627
PB
2938#if defined(TARGET_WORDS_BIGENDIAN)
2939 if (endian == DEVICE_LITTLE_ENDIAN) {
2940 val = bswap64(val);
2941 }
2942#else
2943 if (endian == DEVICE_BIG_ENDIAN) {
2944 val = bswap64(val);
2945 }
84b7b8e7
FB
2946#endif
2947 } else {
2948 /* RAM case */
5c8a00ce 2949 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2950 & TARGET_PAGE_MASK)
149f54b5 2951 + addr1);
1e78bcc1
AG
2952 switch (endian) {
2953 case DEVICE_LITTLE_ENDIAN:
2954 val = ldq_le_p(ptr);
2955 break;
2956 case DEVICE_BIG_ENDIAN:
2957 val = ldq_be_p(ptr);
2958 break;
2959 default:
2960 val = ldq_p(ptr);
2961 break;
2962 }
50013115
PM
2963 r = MEMTX_OK;
2964 }
2965 if (result) {
2966 *result = r;
84b7b8e7 2967 }
4840f10e
JK
2968 if (release_lock) {
2969 qemu_mutex_unlock_iothread();
2970 }
41063e1e 2971 rcu_read_unlock();
84b7b8e7
FB
2972 return val;
2973}
2974
50013115
PM
2975uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2976 MemTxAttrs attrs, MemTxResult *result)
2977{
2978 return address_space_ldq_internal(as, addr, attrs, result,
2979 DEVICE_NATIVE_ENDIAN);
2980}
2981
2982uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2983 MemTxAttrs attrs, MemTxResult *result)
2984{
2985 return address_space_ldq_internal(as, addr, attrs, result,
2986 DEVICE_LITTLE_ENDIAN);
2987}
2988
2989uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2990 MemTxAttrs attrs, MemTxResult *result)
2991{
2992 return address_space_ldq_internal(as, addr, attrs, result,
2993 DEVICE_BIG_ENDIAN);
2994}
2995
2c17449b 2996uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2997{
50013115 2998 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
2999}
3000
2c17449b 3001uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3002{
50013115 3003 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3004}
3005
2c17449b 3006uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3007{
50013115 3008 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3009}
3010
aab33094 3011/* XXX: optimize */
50013115
PM
3012uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3013 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3014{
3015 uint8_t val;
50013115
PM
3016 MemTxResult r;
3017
3018 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3019 if (result) {
3020 *result = r;
3021 }
aab33094
FB
3022 return val;
3023}
3024
50013115
PM
3025uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3026{
3027 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3028}
3029
733f0b02 3030/* warning: addr must be aligned */
50013115
PM
3031static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3032 hwaddr addr,
3033 MemTxAttrs attrs,
3034 MemTxResult *result,
3035 enum device_endian endian)
aab33094 3036{
733f0b02
MT
3037 uint8_t *ptr;
3038 uint64_t val;
5c8a00ce 3039 MemoryRegion *mr;
149f54b5
PB
3040 hwaddr l = 2;
3041 hwaddr addr1;
50013115 3042 MemTxResult r;
4840f10e 3043 bool release_lock = false;
733f0b02 3044
41063e1e 3045 rcu_read_lock();
41701aa4 3046 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3047 false);
3048 if (l < 2 || !memory_access_is_direct(mr, false)) {
4840f10e 3049 release_lock |= prepare_mmio_access(mr);
125b3806 3050
733f0b02 3051 /* I/O case */
50013115 3052 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
1e78bcc1
AG
3053#if defined(TARGET_WORDS_BIGENDIAN)
3054 if (endian == DEVICE_LITTLE_ENDIAN) {
3055 val = bswap16(val);
3056 }
3057#else
3058 if (endian == DEVICE_BIG_ENDIAN) {
3059 val = bswap16(val);
3060 }
3061#endif
733f0b02
MT
3062 } else {
3063 /* RAM case */
5c8a00ce 3064 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 3065 & TARGET_PAGE_MASK)
149f54b5 3066 + addr1);
1e78bcc1
AG
3067 switch (endian) {
3068 case DEVICE_LITTLE_ENDIAN:
3069 val = lduw_le_p(ptr);
3070 break;
3071 case DEVICE_BIG_ENDIAN:
3072 val = lduw_be_p(ptr);
3073 break;
3074 default:
3075 val = lduw_p(ptr);
3076 break;
3077 }
50013115
PM
3078 r = MEMTX_OK;
3079 }
3080 if (result) {
3081 *result = r;
733f0b02 3082 }
4840f10e
JK
3083 if (release_lock) {
3084 qemu_mutex_unlock_iothread();
3085 }
41063e1e 3086 rcu_read_unlock();
733f0b02 3087 return val;
aab33094
FB
3088}
3089
50013115
PM
3090uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3091 MemTxAttrs attrs, MemTxResult *result)
3092{
3093 return address_space_lduw_internal(as, addr, attrs, result,
3094 DEVICE_NATIVE_ENDIAN);
3095}
3096
3097uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3098 MemTxAttrs attrs, MemTxResult *result)
3099{
3100 return address_space_lduw_internal(as, addr, attrs, result,
3101 DEVICE_LITTLE_ENDIAN);
3102}
3103
3104uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3105 MemTxAttrs attrs, MemTxResult *result)
3106{
3107 return address_space_lduw_internal(as, addr, attrs, result,
3108 DEVICE_BIG_ENDIAN);
3109}
3110
41701aa4 3111uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3112{
50013115 3113 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3114}
3115
41701aa4 3116uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3117{
50013115 3118 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3119}
3120
41701aa4 3121uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 3122{
50013115 3123 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3124}
3125
8df1cd07
FB
3126/* warning: addr must be aligned. The ram page is not masked as dirty
3127 and the code inside is not invalidated. It is useful if the dirty
3128 bits are used to track modified PTEs */
50013115
PM
3129void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3130 MemTxAttrs attrs, MemTxResult *result)
8df1cd07 3131{
8df1cd07 3132 uint8_t *ptr;
5c8a00ce 3133 MemoryRegion *mr;
149f54b5
PB
3134 hwaddr l = 4;
3135 hwaddr addr1;
50013115 3136 MemTxResult r;
845b6214 3137 uint8_t dirty_log_mask;
4840f10e 3138 bool release_lock = false;
8df1cd07 3139
41063e1e 3140 rcu_read_lock();
2198a121 3141 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3142 true);
3143 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3144 release_lock |= prepare_mmio_access(mr);
125b3806 3145
50013115 3146 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3147 } else {
5c8a00ce 3148 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3149 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3150 stl_p(ptr, val);
74576198 3151
845b6214
PB
3152 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3153 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
58d2707e 3154 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
50013115
PM
3155 r = MEMTX_OK;
3156 }
3157 if (result) {
3158 *result = r;
8df1cd07 3159 }
4840f10e
JK
3160 if (release_lock) {
3161 qemu_mutex_unlock_iothread();
3162 }
41063e1e 3163 rcu_read_unlock();
8df1cd07
FB
3164}
3165
50013115
PM
3166void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3167{
3168 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3169}
3170
8df1cd07 3171/* warning: addr must be aligned */
50013115
PM
3172static inline void address_space_stl_internal(AddressSpace *as,
3173 hwaddr addr, uint32_t val,
3174 MemTxAttrs attrs,
3175 MemTxResult *result,
3176 enum device_endian endian)
8df1cd07 3177{
8df1cd07 3178 uint8_t *ptr;
5c8a00ce 3179 MemoryRegion *mr;
149f54b5
PB
3180 hwaddr l = 4;
3181 hwaddr addr1;
50013115 3182 MemTxResult r;
4840f10e 3183 bool release_lock = false;
8df1cd07 3184
41063e1e 3185 rcu_read_lock();
ab1da857 3186 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
3187 true);
3188 if (l < 4 || !memory_access_is_direct(mr, true)) {
4840f10e 3189 release_lock |= prepare_mmio_access(mr);
125b3806 3190
1e78bcc1
AG
3191#if defined(TARGET_WORDS_BIGENDIAN)
3192 if (endian == DEVICE_LITTLE_ENDIAN) {
3193 val = bswap32(val);
3194 }
3195#else
3196 if (endian == DEVICE_BIG_ENDIAN) {
3197 val = bswap32(val);
3198 }
3199#endif
50013115 3200 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
8df1cd07 3201 } else {
8df1cd07 3202 /* RAM case */
5c8a00ce 3203 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 3204 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3205 switch (endian) {
3206 case DEVICE_LITTLE_ENDIAN:
3207 stl_le_p(ptr, val);
3208 break;
3209 case DEVICE_BIG_ENDIAN:
3210 stl_be_p(ptr, val);
3211 break;
3212 default:
3213 stl_p(ptr, val);
3214 break;
3215 }
845b6214 3216 invalidate_and_set_dirty(mr, addr1, 4);
50013115
PM
3217 r = MEMTX_OK;
3218 }
3219 if (result) {
3220 *result = r;
8df1cd07 3221 }
4840f10e
JK
3222 if (release_lock) {
3223 qemu_mutex_unlock_iothread();
3224 }
41063e1e 3225 rcu_read_unlock();
8df1cd07
FB
3226}
3227
50013115
PM
3228void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3229 MemTxAttrs attrs, MemTxResult *result)
3230{
3231 address_space_stl_internal(as, addr, val, attrs, result,
3232 DEVICE_NATIVE_ENDIAN);
3233}
3234
3235void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3236 MemTxAttrs attrs, MemTxResult *result)
3237{
3238 address_space_stl_internal(as, addr, val, attrs, result,
3239 DEVICE_LITTLE_ENDIAN);
3240}
3241
3242void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3243 MemTxAttrs attrs, MemTxResult *result)
3244{
3245 address_space_stl_internal(as, addr, val, attrs, result,
3246 DEVICE_BIG_ENDIAN);
3247}
3248
ab1da857 3249void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3250{
50013115 3251 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3252}
3253
ab1da857 3254void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3255{
50013115 3256 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3257}
3258
ab1da857 3259void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3260{
50013115 3261 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3262}
3263
aab33094 3264/* XXX: optimize */
50013115
PM
3265void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3266 MemTxAttrs attrs, MemTxResult *result)
aab33094
FB
3267{
3268 uint8_t v = val;
50013115
PM
3269 MemTxResult r;
3270
3271 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3272 if (result) {
3273 *result = r;
3274 }
3275}
3276
3277void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3278{
3279 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
aab33094
FB
3280}
3281
733f0b02 3282/* warning: addr must be aligned */
50013115
PM
3283static inline void address_space_stw_internal(AddressSpace *as,
3284 hwaddr addr, uint32_t val,
3285 MemTxAttrs attrs,
3286 MemTxResult *result,
3287 enum device_endian endian)
aab33094 3288{
733f0b02 3289 uint8_t *ptr;
5c8a00ce 3290 MemoryRegion *mr;
149f54b5
PB
3291 hwaddr l = 2;
3292 hwaddr addr1;
50013115 3293 MemTxResult r;
4840f10e 3294 bool release_lock = false;
733f0b02 3295
41063e1e 3296 rcu_read_lock();
5ce5944d 3297 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 3298 if (l < 2 || !memory_access_is_direct(mr, true)) {
4840f10e 3299 release_lock |= prepare_mmio_access(mr);
125b3806 3300
1e78bcc1
AG
3301#if defined(TARGET_WORDS_BIGENDIAN)
3302 if (endian == DEVICE_LITTLE_ENDIAN) {
3303 val = bswap16(val);
3304 }
3305#else
3306 if (endian == DEVICE_BIG_ENDIAN) {
3307 val = bswap16(val);
3308 }
3309#endif
50013115 3310 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
733f0b02 3311 } else {
733f0b02 3312 /* RAM case */
5c8a00ce 3313 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 3314 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3315 switch (endian) {
3316 case DEVICE_LITTLE_ENDIAN:
3317 stw_le_p(ptr, val);
3318 break;
3319 case DEVICE_BIG_ENDIAN:
3320 stw_be_p(ptr, val);
3321 break;
3322 default:
3323 stw_p(ptr, val);
3324 break;
3325 }
845b6214 3326 invalidate_and_set_dirty(mr, addr1, 2);
50013115
PM
3327 r = MEMTX_OK;
3328 }
3329 if (result) {
3330 *result = r;
733f0b02 3331 }
4840f10e
JK
3332 if (release_lock) {
3333 qemu_mutex_unlock_iothread();
3334 }
41063e1e 3335 rcu_read_unlock();
aab33094
FB
3336}
3337
50013115
PM
3338void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3339 MemTxAttrs attrs, MemTxResult *result)
3340{
3341 address_space_stw_internal(as, addr, val, attrs, result,
3342 DEVICE_NATIVE_ENDIAN);
3343}
3344
3345void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3346 MemTxAttrs attrs, MemTxResult *result)
3347{
3348 address_space_stw_internal(as, addr, val, attrs, result,
3349 DEVICE_LITTLE_ENDIAN);
3350}
3351
3352void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3353 MemTxAttrs attrs, MemTxResult *result)
3354{
3355 address_space_stw_internal(as, addr, val, attrs, result,
3356 DEVICE_BIG_ENDIAN);
3357}
3358
5ce5944d 3359void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3360{
50013115 3361 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3362}
3363
5ce5944d 3364void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3365{
50013115 3366 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3367}
3368
5ce5944d 3369void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 3370{
50013115 3371 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3372}
3373
aab33094 3374/* XXX: optimize */
50013115
PM
3375void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3376 MemTxAttrs attrs, MemTxResult *result)
aab33094 3377{
50013115 3378 MemTxResult r;
aab33094 3379 val = tswap64(val);
50013115
PM
3380 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3381 if (result) {
3382 *result = r;
3383 }
aab33094
FB
3384}
3385
50013115
PM
3386void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3387 MemTxAttrs attrs, MemTxResult *result)
1e78bcc1 3388{
50013115 3389 MemTxResult r;
1e78bcc1 3390 val = cpu_to_le64(val);
50013115
PM
3391 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3392 if (result) {
3393 *result = r;
3394 }
3395}
3396void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3397 MemTxAttrs attrs, MemTxResult *result)
3398{
3399 MemTxResult r;
3400 val = cpu_to_be64(val);
3401 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3402 if (result) {
3403 *result = r;
3404 }
3405}
3406
3407void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3408{
3409 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3410}
3411
3412void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3413{
3414 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3415}
3416
f606604f 3417void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1 3418{
50013115 3419 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
1e78bcc1
AG
3420}
3421
5e2972fd 3422/* virtual memory access for debug (includes writing to ROM) */
f17ec444 3423int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 3424 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3425{
3426 int l;
a8170e5e 3427 hwaddr phys_addr;
9b3c35e0 3428 target_ulong page;
13eb76e0
FB
3429
3430 while (len > 0) {
3431 page = addr & TARGET_PAGE_MASK;
f17ec444 3432 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
3433 /* if no physical page mapped, return an error */
3434 if (phys_addr == -1)
3435 return -1;
3436 l = (page + TARGET_PAGE_SIZE) - addr;
3437 if (l > len)
3438 l = len;
5e2972fd 3439 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
3440 if (is_write) {
3441 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3442 } else {
5c9eb028
PM
3443 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3444 buf, l, 0);
2e38847b 3445 }
13eb76e0
FB
3446 len -= l;
3447 buf += l;
3448 addr += l;
3449 }
3450 return 0;
3451}
a68fe89c 3452#endif
13eb76e0 3453
8e4a424b
BS
3454/*
3455 * A helper function for the _utterly broken_ virtio device model to find out if
3456 * it's running on a big endian machine. Don't do this at home kids!
3457 */
98ed8ecf
GK
3458bool target_words_bigendian(void);
3459bool target_words_bigendian(void)
8e4a424b
BS
3460{
3461#if defined(TARGET_WORDS_BIGENDIAN)
3462 return true;
3463#else
3464 return false;
3465#endif
3466}
3467
76f35538 3468#ifndef CONFIG_USER_ONLY
a8170e5e 3469bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 3470{
5c8a00ce 3471 MemoryRegion*mr;
149f54b5 3472 hwaddr l = 1;
41063e1e 3473 bool res;
76f35538 3474
41063e1e 3475 rcu_read_lock();
5c8a00ce
PB
3476 mr = address_space_translate(&address_space_memory,
3477 phys_addr, &phys_addr, &l, false);
76f35538 3478
41063e1e
PB
3479 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3480 rcu_read_unlock();
3481 return res;
76f35538 3482}
bd2fa51f 3483
e3807054 3484int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
bd2fa51f
MH
3485{
3486 RAMBlock *block;
e3807054 3487 int ret = 0;
bd2fa51f 3488
0dc3f44a
MD
3489 rcu_read_lock();
3490 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
e3807054
DDAG
3491 ret = func(block->idstr, block->host, block->offset,
3492 block->used_length, opaque);
3493 if (ret) {
3494 break;
3495 }
bd2fa51f 3496 }
0dc3f44a 3497 rcu_read_unlock();
e3807054 3498 return ret;
bd2fa51f 3499}
ec3f8c99 3500#endif