]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
Fix cross compilation (nm command)
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
8d7b8cb9 376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
6c3bff0e
PD
433static int cpu_common_pre_load(void *opaque)
434{
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440}
441
442static bool cpu_common_exception_index_needed(void *opaque)
443{
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447}
448
449static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457};
458
1a1562f5 459const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
6c3bff0e 463 .pre_load = cpu_common_pre_load,
5b6dd868 464 .post_load = cpu_common_post_load,
35d08458 465 .fields = (VMStateField[]) {
259186a7
AF
466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 468 VMSTATE_END_OF_LIST()
6c3bff0e
PD
469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
5b6dd868
BS
477 }
478};
1a1562f5 479
5b6dd868 480#endif
ea041c0e 481
38d8f5c8 482CPUState *qemu_get_cpu(int index)
ea041c0e 483{
bdc44640 484 CPUState *cpu;
ea041c0e 485
bdc44640 486 CPU_FOREACH(cpu) {
55e5c285 487 if (cpu->cpu_index == index) {
bdc44640 488 return cpu;
55e5c285 489 }
ea041c0e 490 }
5b6dd868 491
bdc44640 492 return NULL;
ea041c0e
FB
493}
494
09daed84
EI
495#if !defined(CONFIG_USER_ONLY)
496void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497{
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508}
509#endif
510
5b6dd868 511void cpu_exec_init(CPUArchState *env)
ea041c0e 512{
5b6dd868 513 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 514 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 515 CPUState *some_cpu;
5b6dd868
BS
516 int cpu_index;
517
518#if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520#endif
5b6dd868 521 cpu_index = 0;
bdc44640 522 CPU_FOREACH(some_cpu) {
5b6dd868
BS
523 cpu_index++;
524 }
55e5c285 525 cpu->cpu_index = cpu_index;
1b1ed8dc 526 cpu->numa_node = 0;
f0c3c505 527 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 528 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 529#ifndef CONFIG_USER_ONLY
09daed84 530 cpu->as = &address_space_memory;
5b6dd868
BS
531 cpu->thread_id = qemu_get_thread_id();
532#endif
bdc44640 533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
534#if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536#endif
e0d47944
AF
537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
5b6dd868 540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542 cpu_save, cpu_load, env);
b170fce3 543 assert(cc->vmsd == NULL);
e0d47944 544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 545#endif
b170fce3
AF
546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
ea041c0e
FB
549}
550
1fddef4b 551#if defined(TARGET_HAS_ICE)
94df27fd 552#if defined(CONFIG_USER_ONLY)
00b941e5 553static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
554{
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556}
557#else
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 559{
e8262a1b
MF
560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
09daed84 562 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 563 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 564 }
1e7855a5 565}
c27004ec 566#endif
94df27fd 567#endif /* TARGET_HAS_ICE */
d720b93d 568
c527ee8f 569#if defined(CONFIG_USER_ONLY)
75a34036 570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
571
572{
573}
574
3ee887e8
PM
575int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
576 int flags)
577{
578 return -ENOSYS;
579}
580
581void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
582{
583}
584
75a34036 585int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
586 int flags, CPUWatchpoint **watchpoint)
587{
588 return -ENOSYS;
589}
590#else
6658ffb8 591/* Add a watchpoint. */
75a34036 592int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 593 int flags, CPUWatchpoint **watchpoint)
6658ffb8 594{
c0ce998e 595 CPUWatchpoint *wp;
6658ffb8 596
05068c0d
PM
597 /* forbid ranges which are empty or run off the end of the address space */
598 if (len == 0 || (addr + len - 1) <= addr) {
75a34036
AF
599 error_report("tried to set invalid watchpoint at %"
600 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
601 return -EINVAL;
602 }
7267c094 603 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
604
605 wp->vaddr = addr;
05068c0d 606 wp->len = len;
a1d1bb31
AL
607 wp->flags = flags;
608
2dc9f411 609 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
610 if (flags & BP_GDB) {
611 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
612 } else {
613 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
614 }
6658ffb8 615
31b030d4 616 tlb_flush_page(cpu, addr);
a1d1bb31
AL
617
618 if (watchpoint)
619 *watchpoint = wp;
620 return 0;
6658ffb8
PB
621}
622
a1d1bb31 623/* Remove a specific watchpoint. */
75a34036 624int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 625 int flags)
6658ffb8 626{
a1d1bb31 627 CPUWatchpoint *wp;
6658ffb8 628
ff4700b0 629 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 630 if (addr == wp->vaddr && len == wp->len
6e140f28 631 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 632 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
633 return 0;
634 }
635 }
a1d1bb31 636 return -ENOENT;
6658ffb8
PB
637}
638
a1d1bb31 639/* Remove a specific watchpoint by reference. */
75a34036 640void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 641{
ff4700b0 642 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 643
31b030d4 644 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 645
7267c094 646 g_free(watchpoint);
a1d1bb31
AL
647}
648
649/* Remove all matching watchpoints. */
75a34036 650void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 651{
c0ce998e 652 CPUWatchpoint *wp, *next;
a1d1bb31 653
ff4700b0 654 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
655 if (wp->flags & mask) {
656 cpu_watchpoint_remove_by_ref(cpu, wp);
657 }
c0ce998e 658 }
7d03f82f 659}
05068c0d
PM
660
661/* Return true if this watchpoint address matches the specified
662 * access (ie the address range covered by the watchpoint overlaps
663 * partially or completely with the address range covered by the
664 * access).
665 */
666static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
667 vaddr addr,
668 vaddr len)
669{
670 /* We know the lengths are non-zero, but a little caution is
671 * required to avoid errors in the case where the range ends
672 * exactly at the top of the address space and so addr + len
673 * wraps round to zero.
674 */
675 vaddr wpend = wp->vaddr + wp->len - 1;
676 vaddr addrend = addr + len - 1;
677
678 return !(addr > wpend || wp->vaddr > addrend);
679}
680
c527ee8f 681#endif
7d03f82f 682
a1d1bb31 683/* Add a breakpoint. */
b3310ab3 684int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 685 CPUBreakpoint **breakpoint)
4c3a88a2 686{
1fddef4b 687#if defined(TARGET_HAS_ICE)
c0ce998e 688 CPUBreakpoint *bp;
3b46e624 689
7267c094 690 bp = g_malloc(sizeof(*bp));
4c3a88a2 691
a1d1bb31
AL
692 bp->pc = pc;
693 bp->flags = flags;
694
2dc9f411 695 /* keep all GDB-injected breakpoints in front */
00b941e5 696 if (flags & BP_GDB) {
f0c3c505 697 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 698 } else {
f0c3c505 699 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 700 }
3b46e624 701
f0c3c505 702 breakpoint_invalidate(cpu, pc);
a1d1bb31 703
00b941e5 704 if (breakpoint) {
a1d1bb31 705 *breakpoint = bp;
00b941e5 706 }
4c3a88a2
FB
707 return 0;
708#else
a1d1bb31 709 return -ENOSYS;
4c3a88a2
FB
710#endif
711}
712
a1d1bb31 713/* Remove a specific breakpoint. */
b3310ab3 714int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 715{
7d03f82f 716#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
717 CPUBreakpoint *bp;
718
f0c3c505 719 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 720 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 721 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
722 return 0;
723 }
7d03f82f 724 }
a1d1bb31
AL
725 return -ENOENT;
726#else
727 return -ENOSYS;
7d03f82f
EI
728#endif
729}
730
a1d1bb31 731/* Remove a specific breakpoint by reference. */
b3310ab3 732void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 733{
1fddef4b 734#if defined(TARGET_HAS_ICE)
f0c3c505
AF
735 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
736
737 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 738
7267c094 739 g_free(breakpoint);
a1d1bb31
AL
740#endif
741}
742
743/* Remove all matching breakpoints. */
b3310ab3 744void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
745{
746#if defined(TARGET_HAS_ICE)
c0ce998e 747 CPUBreakpoint *bp, *next;
a1d1bb31 748
f0c3c505 749 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
750 if (bp->flags & mask) {
751 cpu_breakpoint_remove_by_ref(cpu, bp);
752 }
c0ce998e 753 }
4c3a88a2
FB
754#endif
755}
756
c33a346e
FB
757/* enable or disable single step mode. EXCP_DEBUG is returned by the
758 CPU loop after each instruction */
3825b28f 759void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 760{
1fddef4b 761#if defined(TARGET_HAS_ICE)
ed2803da
AF
762 if (cpu->singlestep_enabled != enabled) {
763 cpu->singlestep_enabled = enabled;
764 if (kvm_enabled()) {
38e478ec 765 kvm_update_guest_debug(cpu, 0);
ed2803da 766 } else {
ccbb4d44 767 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 768 /* XXX: only flush what is necessary */
38e478ec 769 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
770 tb_flush(env);
771 }
c33a346e
FB
772 }
773#endif
774}
775
a47dddd7 776void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
777{
778 va_list ap;
493ae1f0 779 va_list ap2;
7501267e
FB
780
781 va_start(ap, fmt);
493ae1f0 782 va_copy(ap2, ap);
7501267e
FB
783 fprintf(stderr, "qemu: fatal: ");
784 vfprintf(stderr, fmt, ap);
785 fprintf(stderr, "\n");
878096ee 786 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
787 if (qemu_log_enabled()) {
788 qemu_log("qemu: fatal: ");
789 qemu_log_vprintf(fmt, ap2);
790 qemu_log("\n");
a0762859 791 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 792 qemu_log_flush();
93fcfe39 793 qemu_log_close();
924edcae 794 }
493ae1f0 795 va_end(ap2);
f9373291 796 va_end(ap);
fd052bf6
RV
797#if defined(CONFIG_USER_ONLY)
798 {
799 struct sigaction act;
800 sigfillset(&act.sa_mask);
801 act.sa_handler = SIG_DFL;
802 sigaction(SIGABRT, &act, NULL);
803 }
804#endif
7501267e
FB
805 abort();
806}
807
0124311e 808#if !defined(CONFIG_USER_ONLY)
041603fe
PB
809static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
810{
811 RAMBlock *block;
812
813 /* The list is protected by the iothread lock here. */
814 block = ram_list.mru_block;
815 if (block && addr - block->offset < block->length) {
816 goto found;
817 }
818 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
819 if (addr - block->offset < block->length) {
820 goto found;
821 }
822 }
823
824 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
825 abort();
826
827found:
828 ram_list.mru_block = block;
829 return block;
830}
831
a2f4d5be 832static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 833{
041603fe 834 ram_addr_t start1;
a2f4d5be
JQ
835 RAMBlock *block;
836 ram_addr_t end;
837
838 end = TARGET_PAGE_ALIGN(start + length);
839 start &= TARGET_PAGE_MASK;
d24981d3 840
041603fe
PB
841 block = qemu_get_ram_block(start);
842 assert(block == qemu_get_ram_block(end - 1));
843 start1 = (uintptr_t)block->host + (start - block->offset);
844 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
845}
846
5579c7f3 847/* Note: start and end must be within the same ram block. */
a2f4d5be 848void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 849 unsigned client)
1ccde1cb 850{
1ccde1cb
FB
851 if (length == 0)
852 return;
ace694cc 853 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 854
d24981d3 855 if (tcg_enabled()) {
a2f4d5be 856 tlb_reset_dirty_range_all(start, length);
5579c7f3 857 }
1ccde1cb
FB
858}
859
981fdf23 860static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
861{
862 in_migration = enable;
74576198
AL
863}
864
bb0e627a 865hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
866 MemoryRegionSection *section,
867 target_ulong vaddr,
868 hwaddr paddr, hwaddr xlat,
869 int prot,
870 target_ulong *address)
e5548617 871{
a8170e5e 872 hwaddr iotlb;
e5548617
BS
873 CPUWatchpoint *wp;
874
cc5bea60 875 if (memory_region_is_ram(section->mr)) {
e5548617
BS
876 /* Normal RAM. */
877 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 878 + xlat;
e5548617 879 if (!section->readonly) {
b41aac4f 880 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 881 } else {
b41aac4f 882 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
883 }
884 } else {
1b3fb98f 885 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 886 iotlb += xlat;
e5548617
BS
887 }
888
889 /* Make accesses to pages with watchpoints go via the
890 watchpoint trap routines. */
ff4700b0 891 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 892 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
893 /* Avoid trapping reads of pages with a write breakpoint. */
894 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 895 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
896 *address |= TLB_MMIO;
897 break;
898 }
899 }
900 }
901
902 return iotlb;
903}
9fa3e853
FB
904#endif /* defined(CONFIG_USER_ONLY) */
905
e2eef170 906#if !defined(CONFIG_USER_ONLY)
8da3ff18 907
c227f099 908static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 909 uint16_t section);
acc9d80b 910static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 911
575ddeb4 912static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
913
914/*
915 * Set a custom physical guest memory alloator.
916 * Accelerators with unusual needs may need this. Hopefully, we can
917 * get rid of it eventually.
918 */
575ddeb4 919void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
920{
921 phys_mem_alloc = alloc;
922}
923
53cb28cb
MA
924static uint16_t phys_section_add(PhysPageMap *map,
925 MemoryRegionSection *section)
5312bd8b 926{
68f3f65b
PB
927 /* The physical section number is ORed with a page-aligned
928 * pointer to produce the iotlb entries. Thus it should
929 * never overflow into the page-aligned value.
930 */
53cb28cb 931 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 932
53cb28cb
MA
933 if (map->sections_nb == map->sections_nb_alloc) {
934 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
935 map->sections = g_renew(MemoryRegionSection, map->sections,
936 map->sections_nb_alloc);
5312bd8b 937 }
53cb28cb 938 map->sections[map->sections_nb] = *section;
dfde4e6e 939 memory_region_ref(section->mr);
53cb28cb 940 return map->sections_nb++;
5312bd8b
AK
941}
942
058bc4b5
PB
943static void phys_section_destroy(MemoryRegion *mr)
944{
dfde4e6e
PB
945 memory_region_unref(mr);
946
058bc4b5
PB
947 if (mr->subpage) {
948 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 949 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
950 g_free(subpage);
951 }
952}
953
6092666e 954static void phys_sections_free(PhysPageMap *map)
5312bd8b 955{
9affd6fc
PB
956 while (map->sections_nb > 0) {
957 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
958 phys_section_destroy(section->mr);
959 }
9affd6fc
PB
960 g_free(map->sections);
961 g_free(map->nodes);
5312bd8b
AK
962}
963
ac1970fb 964static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
965{
966 subpage_t *subpage;
a8170e5e 967 hwaddr base = section->offset_within_address_space
0f0cb164 968 & TARGET_PAGE_MASK;
97115a8d 969 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 970 d->map.nodes, d->map.sections);
0f0cb164
AK
971 MemoryRegionSection subsection = {
972 .offset_within_address_space = base,
052e87b0 973 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 974 };
a8170e5e 975 hwaddr start, end;
0f0cb164 976
f3705d53 977 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 978
f3705d53 979 if (!(existing->mr->subpage)) {
acc9d80b 980 subpage = subpage_init(d->as, base);
3be91e86 981 subsection.address_space = d->as;
0f0cb164 982 subsection.mr = &subpage->iomem;
ac1970fb 983 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 984 phys_section_add(&d->map, &subsection));
0f0cb164 985 } else {
f3705d53 986 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
987 }
988 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 989 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
990 subpage_register(subpage, start, end,
991 phys_section_add(&d->map, section));
0f0cb164
AK
992}
993
994
052e87b0
PB
995static void register_multipage(AddressSpaceDispatch *d,
996 MemoryRegionSection *section)
33417e70 997{
a8170e5e 998 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 999 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1000 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1001 TARGET_PAGE_BITS));
dd81124b 1002
733d5ef5
PB
1003 assert(num_pages);
1004 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1005}
1006
ac1970fb 1007static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1008{
89ae337a 1009 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1010 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1011 MemoryRegionSection now = *section, remain = *section;
052e87b0 1012 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1013
733d5ef5
PB
1014 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1015 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1016 - now.offset_within_address_space;
1017
052e87b0 1018 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1019 register_subpage(d, &now);
733d5ef5 1020 } else {
052e87b0 1021 now.size = int128_zero();
733d5ef5 1022 }
052e87b0
PB
1023 while (int128_ne(remain.size, now.size)) {
1024 remain.size = int128_sub(remain.size, now.size);
1025 remain.offset_within_address_space += int128_get64(now.size);
1026 remain.offset_within_region += int128_get64(now.size);
69b67646 1027 now = remain;
052e87b0 1028 if (int128_lt(remain.size, page_size)) {
733d5ef5 1029 register_subpage(d, &now);
88266249 1030 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1031 now.size = page_size;
ac1970fb 1032 register_subpage(d, &now);
69b67646 1033 } else {
052e87b0 1034 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1035 register_multipage(d, &now);
69b67646 1036 }
0f0cb164
AK
1037 }
1038}
1039
62a2744c
SY
1040void qemu_flush_coalesced_mmio_buffer(void)
1041{
1042 if (kvm_enabled())
1043 kvm_flush_coalesced_mmio_buffer();
1044}
1045
b2a8658e
UD
1046void qemu_mutex_lock_ramlist(void)
1047{
1048 qemu_mutex_lock(&ram_list.mutex);
1049}
1050
1051void qemu_mutex_unlock_ramlist(void)
1052{
1053 qemu_mutex_unlock(&ram_list.mutex);
1054}
1055
e1e84ba0 1056#ifdef __linux__
c902760f
MT
1057
1058#include <sys/vfs.h>
1059
1060#define HUGETLBFS_MAGIC 0x958458f6
1061
fc7a5800 1062static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1063{
1064 struct statfs fs;
1065 int ret;
1066
1067 do {
9742bf26 1068 ret = statfs(path, &fs);
c902760f
MT
1069 } while (ret != 0 && errno == EINTR);
1070
1071 if (ret != 0) {
fc7a5800
HT
1072 error_setg_errno(errp, errno, "failed to get page size of file %s",
1073 path);
9742bf26 1074 return 0;
c902760f
MT
1075 }
1076
1077 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1078 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1079
1080 return fs.f_bsize;
1081}
1082
04b16653
AW
1083static void *file_ram_alloc(RAMBlock *block,
1084 ram_addr_t memory,
7f56e740
PB
1085 const char *path,
1086 Error **errp)
c902760f
MT
1087{
1088 char *filename;
8ca761f6
PF
1089 char *sanitized_name;
1090 char *c;
557529dd 1091 void *area = NULL;
c902760f 1092 int fd;
557529dd 1093 uint64_t hpagesize;
fc7a5800 1094 Error *local_err = NULL;
c902760f 1095
fc7a5800
HT
1096 hpagesize = gethugepagesize(path, &local_err);
1097 if (local_err) {
1098 error_propagate(errp, local_err);
f9a49dfa 1099 goto error;
c902760f
MT
1100 }
1101
1102 if (memory < hpagesize) {
557529dd
HT
1103 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1104 "or larger than huge page size 0x%" PRIx64,
1105 memory, hpagesize);
1106 goto error;
c902760f
MT
1107 }
1108
1109 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1110 error_setg(errp,
1111 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1112 goto error;
c902760f
MT
1113 }
1114
8ca761f6 1115 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1116 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1117 for (c = sanitized_name; *c != '\0'; c++) {
1118 if (*c == '/')
1119 *c = '_';
1120 }
1121
1122 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1123 sanitized_name);
1124 g_free(sanitized_name);
c902760f
MT
1125
1126 fd = mkstemp(filename);
1127 if (fd < 0) {
7f56e740
PB
1128 error_setg_errno(errp, errno,
1129 "unable to create backing store for hugepages");
e4ada482 1130 g_free(filename);
f9a49dfa 1131 goto error;
c902760f
MT
1132 }
1133 unlink(filename);
e4ada482 1134 g_free(filename);
c902760f
MT
1135
1136 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1137
1138 /*
1139 * ftruncate is not supported by hugetlbfs in older
1140 * hosts, so don't bother bailing out on errors.
1141 * If anything goes wrong with it under other filesystems,
1142 * mmap will fail.
1143 */
7f56e740 1144 if (ftruncate(fd, memory)) {
9742bf26 1145 perror("ftruncate");
7f56e740 1146 }
c902760f 1147
dbcb8981
PB
1148 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1149 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1150 fd, 0);
c902760f 1151 if (area == MAP_FAILED) {
7f56e740
PB
1152 error_setg_errno(errp, errno,
1153 "unable to map backing store for hugepages");
9742bf26 1154 close(fd);
f9a49dfa 1155 goto error;
c902760f 1156 }
ef36fa14
MT
1157
1158 if (mem_prealloc) {
38183310 1159 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1160 }
1161
04b16653 1162 block->fd = fd;
c902760f 1163 return area;
f9a49dfa
MT
1164
1165error:
1166 if (mem_prealloc) {
e4d9df4f 1167 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1168 exit(1);
1169 }
1170 return NULL;
c902760f
MT
1171}
1172#endif
1173
d17b5288 1174static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1175{
1176 RAMBlock *block, *next_block;
3e837b2c 1177 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1178
49cd9ac6
SH
1179 assert(size != 0); /* it would hand out same offset multiple times */
1180
a3161038 1181 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1182 return 0;
1183
a3161038 1184 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1185 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1186
1187 end = block->offset + block->length;
1188
a3161038 1189 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1190 if (next_block->offset >= end) {
1191 next = MIN(next, next_block->offset);
1192 }
1193 }
1194 if (next - end >= size && next - end < mingap) {
3e837b2c 1195 offset = end;
04b16653
AW
1196 mingap = next - end;
1197 }
1198 }
3e837b2c
AW
1199
1200 if (offset == RAM_ADDR_MAX) {
1201 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1202 (uint64_t)size);
1203 abort();
1204 }
1205
04b16653
AW
1206 return offset;
1207}
1208
652d7ec2 1209ram_addr_t last_ram_offset(void)
d17b5288
AW
1210{
1211 RAMBlock *block;
1212 ram_addr_t last = 0;
1213
a3161038 1214 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1215 last = MAX(last, block->offset + block->length);
1216
1217 return last;
1218}
1219
ddb97f1d
JB
1220static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1221{
1222 int ret;
ddb97f1d
JB
1223
1224 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1225 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1226 "dump-guest-core", true)) {
ddb97f1d
JB
1227 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1228 if (ret) {
1229 perror("qemu_madvise");
1230 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1231 "but dump_guest_core=off specified\n");
1232 }
1233 }
1234}
1235
20cfe881 1236static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1237{
20cfe881 1238 RAMBlock *block;
84b89d78 1239
a3161038 1240 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1241 if (block->offset == addr) {
20cfe881 1242 return block;
c5705a77
AK
1243 }
1244 }
20cfe881
HT
1245
1246 return NULL;
1247}
1248
1249void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1250{
1251 RAMBlock *new_block = find_ram_block(addr);
1252 RAMBlock *block;
1253
c5705a77
AK
1254 assert(new_block);
1255 assert(!new_block->idstr[0]);
84b89d78 1256
09e5ab63
AL
1257 if (dev) {
1258 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1259 if (id) {
1260 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1261 g_free(id);
84b89d78
CM
1262 }
1263 }
1264 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1265
b2a8658e
UD
1266 /* This assumes the iothread lock is taken here too. */
1267 qemu_mutex_lock_ramlist();
a3161038 1268 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1269 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1270 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1271 new_block->idstr);
1272 abort();
1273 }
1274 }
b2a8658e 1275 qemu_mutex_unlock_ramlist();
c5705a77
AK
1276}
1277
20cfe881
HT
1278void qemu_ram_unset_idstr(ram_addr_t addr)
1279{
1280 RAMBlock *block = find_ram_block(addr);
1281
1282 if (block) {
1283 memset(block->idstr, 0, sizeof(block->idstr));
1284 }
1285}
1286
8490fc78
LC
1287static int memory_try_enable_merging(void *addr, size_t len)
1288{
2ff3de68 1289 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1290 /* disabled by the user */
1291 return 0;
1292 }
1293
1294 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1295}
1296
ef701d7b 1297static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1298{
e1c57ab8 1299 RAMBlock *block;
2152f5ca
JQ
1300 ram_addr_t old_ram_size, new_ram_size;
1301
1302 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1303
b2a8658e
UD
1304 /* This assumes the iothread lock is taken here too. */
1305 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1306 new_block->offset = find_ram_offset(new_block->length);
1307
1308 if (!new_block->host) {
1309 if (xen_enabled()) {
1310 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1311 } else {
1312 new_block->host = phys_mem_alloc(new_block->length);
39228250 1313 if (!new_block->host) {
ef701d7b
HT
1314 error_setg_errno(errp, errno,
1315 "cannot set up guest memory '%s'",
1316 memory_region_name(new_block->mr));
1317 qemu_mutex_unlock_ramlist();
1318 return -1;
39228250 1319 }
e1c57ab8 1320 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1321 }
c902760f 1322 }
94a6b54f 1323
abb26d63
PB
1324 /* Keep the list sorted from biggest to smallest block. */
1325 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1326 if (block->length < new_block->length) {
1327 break;
1328 }
1329 }
1330 if (block) {
1331 QTAILQ_INSERT_BEFORE(block, new_block, next);
1332 } else {
1333 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1334 }
0d6d3c87 1335 ram_list.mru_block = NULL;
94a6b54f 1336
f798b07f 1337 ram_list.version++;
b2a8658e 1338 qemu_mutex_unlock_ramlist();
f798b07f 1339
2152f5ca
JQ
1340 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1341
1342 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1343 int i;
1344 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1345 ram_list.dirty_memory[i] =
1346 bitmap_zero_extend(ram_list.dirty_memory[i],
1347 old_ram_size, new_ram_size);
1348 }
2152f5ca 1349 }
e1c57ab8 1350 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1351
e1c57ab8
PB
1352 qemu_ram_setup_dump(new_block->host, new_block->length);
1353 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1354 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1355
e1c57ab8
PB
1356 if (kvm_enabled()) {
1357 kvm_setup_guest_memory(new_block->host, new_block->length);
1358 }
6f0437e8 1359
94a6b54f
PB
1360 return new_block->offset;
1361}
e9a1ab19 1362
0b183fc8 1363#ifdef __linux__
e1c57ab8 1364ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1365 bool share, const char *mem_path,
7f56e740 1366 Error **errp)
e1c57ab8
PB
1367{
1368 RAMBlock *new_block;
ef701d7b
HT
1369 ram_addr_t addr;
1370 Error *local_err = NULL;
e1c57ab8
PB
1371
1372 if (xen_enabled()) {
7f56e740
PB
1373 error_setg(errp, "-mem-path not supported with Xen");
1374 return -1;
e1c57ab8
PB
1375 }
1376
1377 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1378 /*
1379 * file_ram_alloc() needs to allocate just like
1380 * phys_mem_alloc, but we haven't bothered to provide
1381 * a hook there.
1382 */
7f56e740
PB
1383 error_setg(errp,
1384 "-mem-path not supported with this accelerator");
1385 return -1;
e1c57ab8
PB
1386 }
1387
1388 size = TARGET_PAGE_ALIGN(size);
1389 new_block = g_malloc0(sizeof(*new_block));
1390 new_block->mr = mr;
1391 new_block->length = size;
dbcb8981 1392 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1393 new_block->host = file_ram_alloc(new_block, size,
1394 mem_path, errp);
1395 if (!new_block->host) {
1396 g_free(new_block);
1397 return -1;
1398 }
1399
ef701d7b
HT
1400 addr = ram_block_add(new_block, &local_err);
1401 if (local_err) {
1402 g_free(new_block);
1403 error_propagate(errp, local_err);
1404 return -1;
1405 }
1406 return addr;
e1c57ab8 1407}
0b183fc8 1408#endif
e1c57ab8
PB
1409
1410ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ef701d7b 1411 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1412{
1413 RAMBlock *new_block;
ef701d7b
HT
1414 ram_addr_t addr;
1415 Error *local_err = NULL;
e1c57ab8
PB
1416
1417 size = TARGET_PAGE_ALIGN(size);
1418 new_block = g_malloc0(sizeof(*new_block));
1419 new_block->mr = mr;
1420 new_block->length = size;
1421 new_block->fd = -1;
1422 new_block->host = host;
1423 if (host) {
7bd4f430 1424 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1425 }
ef701d7b
HT
1426 addr = ram_block_add(new_block, &local_err);
1427 if (local_err) {
1428 g_free(new_block);
1429 error_propagate(errp, local_err);
1430 return -1;
1431 }
1432 return addr;
e1c57ab8
PB
1433}
1434
ef701d7b 1435ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1436{
ef701d7b 1437 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
6977dfe6
YT
1438}
1439
1f2e98b6
AW
1440void qemu_ram_free_from_ptr(ram_addr_t addr)
1441{
1442 RAMBlock *block;
1443
b2a8658e
UD
1444 /* This assumes the iothread lock is taken here too. */
1445 qemu_mutex_lock_ramlist();
a3161038 1446 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1447 if (addr == block->offset) {
a3161038 1448 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1449 ram_list.mru_block = NULL;
f798b07f 1450 ram_list.version++;
7267c094 1451 g_free(block);
b2a8658e 1452 break;
1f2e98b6
AW
1453 }
1454 }
b2a8658e 1455 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1456}
1457
c227f099 1458void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1459{
04b16653
AW
1460 RAMBlock *block;
1461
b2a8658e
UD
1462 /* This assumes the iothread lock is taken here too. */
1463 qemu_mutex_lock_ramlist();
a3161038 1464 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1465 if (addr == block->offset) {
a3161038 1466 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1467 ram_list.mru_block = NULL;
f798b07f 1468 ram_list.version++;
7bd4f430 1469 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1470 ;
dfeaf2ab
MA
1471 } else if (xen_enabled()) {
1472 xen_invalidate_map_cache_entry(block->host);
089f3f76 1473#ifndef _WIN32
3435f395
MA
1474 } else if (block->fd >= 0) {
1475 munmap(block->host, block->length);
1476 close(block->fd);
089f3f76 1477#endif
04b16653 1478 } else {
dfeaf2ab 1479 qemu_anon_ram_free(block->host, block->length);
04b16653 1480 }
7267c094 1481 g_free(block);
b2a8658e 1482 break;
04b16653
AW
1483 }
1484 }
b2a8658e 1485 qemu_mutex_unlock_ramlist();
04b16653 1486
e9a1ab19
FB
1487}
1488
cd19cfa2
HY
1489#ifndef _WIN32
1490void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1491{
1492 RAMBlock *block;
1493 ram_addr_t offset;
1494 int flags;
1495 void *area, *vaddr;
1496
a3161038 1497 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1498 offset = addr - block->offset;
1499 if (offset < block->length) {
1500 vaddr = block->host + offset;
7bd4f430 1501 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1502 ;
dfeaf2ab
MA
1503 } else if (xen_enabled()) {
1504 abort();
cd19cfa2
HY
1505 } else {
1506 flags = MAP_FIXED;
1507 munmap(vaddr, length);
3435f395 1508 if (block->fd >= 0) {
dbcb8981
PB
1509 flags |= (block->flags & RAM_SHARED ?
1510 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1511 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1512 flags, block->fd, offset);
cd19cfa2 1513 } else {
2eb9fbaa
MA
1514 /*
1515 * Remap needs to match alloc. Accelerators that
1516 * set phys_mem_alloc never remap. If they did,
1517 * we'd need a remap hook here.
1518 */
1519 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1520
cd19cfa2
HY
1521 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1522 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1523 flags, -1, 0);
cd19cfa2
HY
1524 }
1525 if (area != vaddr) {
f15fbc4b
AP
1526 fprintf(stderr, "Could not remap addr: "
1527 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1528 length, addr);
1529 exit(1);
1530 }
8490fc78 1531 memory_try_enable_merging(vaddr, length);
ddb97f1d 1532 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1533 }
1534 return;
1535 }
1536 }
1537}
1538#endif /* !_WIN32 */
1539
a35ba7be
PB
1540int qemu_get_ram_fd(ram_addr_t addr)
1541{
1542 RAMBlock *block = qemu_get_ram_block(addr);
1543
1544 return block->fd;
1545}
1546
3fd74b84
DM
1547void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1548{
1549 RAMBlock *block = qemu_get_ram_block(addr);
1550
1551 return block->host;
1552}
1553
1b5ec234
PB
1554/* Return a host pointer to ram allocated with qemu_ram_alloc.
1555 With the exception of the softmmu code in this file, this should
1556 only be used for local memory (e.g. video ram) that the device owns,
1557 and knows it isn't going to access beyond the end of the block.
1558
1559 It should not be used for general purpose DMA.
1560 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1561 */
1562void *qemu_get_ram_ptr(ram_addr_t addr)
1563{
1564 RAMBlock *block = qemu_get_ram_block(addr);
1565
0d6d3c87
PB
1566 if (xen_enabled()) {
1567 /* We need to check if the requested address is in the RAM
1568 * because we don't want to map the entire memory in QEMU.
1569 * In that case just map until the end of the page.
1570 */
1571 if (block->offset == 0) {
1572 return xen_map_cache(addr, 0, 0);
1573 } else if (block->host == NULL) {
1574 block->host =
1575 xen_map_cache(block->offset, block->length, 1);
1576 }
1577 }
1578 return block->host + (addr - block->offset);
dc828ca1
PB
1579}
1580
38bee5dc
SS
1581/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1582 * but takes a size argument */
cb85f7ab 1583static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1584{
8ab934f9
SS
1585 if (*size == 0) {
1586 return NULL;
1587 }
868bb33f 1588 if (xen_enabled()) {
e41d7c69 1589 return xen_map_cache(addr, *size, 1);
868bb33f 1590 } else {
38bee5dc
SS
1591 RAMBlock *block;
1592
a3161038 1593 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1594 if (addr - block->offset < block->length) {
1595 if (addr - block->offset + *size > block->length)
1596 *size = block->length - addr + block->offset;
1597 return block->host + (addr - block->offset);
1598 }
1599 }
1600
1601 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1602 abort();
38bee5dc
SS
1603 }
1604}
1605
7443b437
PB
1606/* Some of the softmmu routines need to translate from a host pointer
1607 (typically a TLB entry) back to a ram offset. */
1b5ec234 1608MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1609{
94a6b54f
PB
1610 RAMBlock *block;
1611 uint8_t *host = ptr;
1612
868bb33f 1613 if (xen_enabled()) {
e41d7c69 1614 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1615 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1616 }
1617
23887b79
PB
1618 block = ram_list.mru_block;
1619 if (block && block->host && host - block->host < block->length) {
1620 goto found;
1621 }
1622
a3161038 1623 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1624 /* This case append when the block is not mapped. */
1625 if (block->host == NULL) {
1626 continue;
1627 }
f471a17e 1628 if (host - block->host < block->length) {
23887b79 1629 goto found;
f471a17e 1630 }
94a6b54f 1631 }
432d268c 1632
1b5ec234 1633 return NULL;
23887b79
PB
1634
1635found:
1636 *ram_addr = block->offset + (host - block->host);
1b5ec234 1637 return block->mr;
e890261f 1638}
f471a17e 1639
a8170e5e 1640static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1641 uint64_t val, unsigned size)
9fa3e853 1642{
52159192 1643 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1644 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1645 }
0e0df1e2
AK
1646 switch (size) {
1647 case 1:
1648 stb_p(qemu_get_ram_ptr(ram_addr), val);
1649 break;
1650 case 2:
1651 stw_p(qemu_get_ram_ptr(ram_addr), val);
1652 break;
1653 case 4:
1654 stl_p(qemu_get_ram_ptr(ram_addr), val);
1655 break;
1656 default:
1657 abort();
3a7d929e 1658 }
6886867e 1659 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1660 /* we remove the notdirty callback only if the code has been
1661 flushed */
a2cd8c85 1662 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1663 CPUArchState *env = current_cpu->env_ptr;
93afeade 1664 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1665 }
9fa3e853
FB
1666}
1667
b018ddf6
PB
1668static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1669 unsigned size, bool is_write)
1670{
1671 return is_write;
1672}
1673
0e0df1e2 1674static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1675 .write = notdirty_mem_write,
b018ddf6 1676 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1677 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1678};
1679
0f459d16 1680/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1681static void check_watchpoint(int offset, int len, int flags)
0f459d16 1682{
93afeade
AF
1683 CPUState *cpu = current_cpu;
1684 CPUArchState *env = cpu->env_ptr;
06d55cc1 1685 target_ulong pc, cs_base;
0f459d16 1686 target_ulong vaddr;
a1d1bb31 1687 CPUWatchpoint *wp;
06d55cc1 1688 int cpu_flags;
0f459d16 1689
ff4700b0 1690 if (cpu->watchpoint_hit) {
06d55cc1
AL
1691 /* We re-entered the check after replacing the TB. Now raise
1692 * the debug interrupt so that is will trigger after the
1693 * current instruction. */
93afeade 1694 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1695 return;
1696 }
93afeade 1697 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1698 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1699 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1700 && (wp->flags & flags)) {
08225676
PM
1701 if (flags == BP_MEM_READ) {
1702 wp->flags |= BP_WATCHPOINT_HIT_READ;
1703 } else {
1704 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1705 }
1706 wp->hitaddr = vaddr;
ff4700b0
AF
1707 if (!cpu->watchpoint_hit) {
1708 cpu->watchpoint_hit = wp;
239c51a5 1709 tb_check_watchpoint(cpu);
6e140f28 1710 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1711 cpu->exception_index = EXCP_DEBUG;
5638d180 1712 cpu_loop_exit(cpu);
6e140f28
AL
1713 } else {
1714 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1715 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1716 cpu_resume_from_signal(cpu, NULL);
6e140f28 1717 }
06d55cc1 1718 }
6e140f28
AL
1719 } else {
1720 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1721 }
1722 }
1723}
1724
6658ffb8
PB
1725/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1726 so these check for a hit then pass through to the normal out-of-line
1727 phys routines. */
a8170e5e 1728static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1729 unsigned size)
6658ffb8 1730{
05068c0d 1731 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1732 switch (size) {
2c17449b 1733 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1734 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1735 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1736 default: abort();
1737 }
6658ffb8
PB
1738}
1739
a8170e5e 1740static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1741 uint64_t val, unsigned size)
6658ffb8 1742{
05068c0d 1743 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1744 switch (size) {
67364150 1745 case 1:
db3be60d 1746 stb_phys(&address_space_memory, addr, val);
67364150
MF
1747 break;
1748 case 2:
5ce5944d 1749 stw_phys(&address_space_memory, addr, val);
67364150
MF
1750 break;
1751 case 4:
ab1da857 1752 stl_phys(&address_space_memory, addr, val);
67364150 1753 break;
1ec9b909
AK
1754 default: abort();
1755 }
6658ffb8
PB
1756}
1757
1ec9b909
AK
1758static const MemoryRegionOps watch_mem_ops = {
1759 .read = watch_mem_read,
1760 .write = watch_mem_write,
1761 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1762};
6658ffb8 1763
a8170e5e 1764static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1765 unsigned len)
db7b5426 1766{
acc9d80b
JK
1767 subpage_t *subpage = opaque;
1768 uint8_t buf[4];
791af8c8 1769
db7b5426 1770#if defined(DEBUG_SUBPAGE)
016e9d62 1771 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1772 subpage, len, addr);
db7b5426 1773#endif
acc9d80b
JK
1774 address_space_read(subpage->as, addr + subpage->base, buf, len);
1775 switch (len) {
1776 case 1:
1777 return ldub_p(buf);
1778 case 2:
1779 return lduw_p(buf);
1780 case 4:
1781 return ldl_p(buf);
1782 default:
1783 abort();
1784 }
db7b5426
BS
1785}
1786
a8170e5e 1787static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1788 uint64_t value, unsigned len)
db7b5426 1789{
acc9d80b
JK
1790 subpage_t *subpage = opaque;
1791 uint8_t buf[4];
1792
db7b5426 1793#if defined(DEBUG_SUBPAGE)
016e9d62 1794 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1795 " value %"PRIx64"\n",
1796 __func__, subpage, len, addr, value);
db7b5426 1797#endif
acc9d80b
JK
1798 switch (len) {
1799 case 1:
1800 stb_p(buf, value);
1801 break;
1802 case 2:
1803 stw_p(buf, value);
1804 break;
1805 case 4:
1806 stl_p(buf, value);
1807 break;
1808 default:
1809 abort();
1810 }
1811 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1812}
1813
c353e4cc 1814static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1815 unsigned len, bool is_write)
c353e4cc 1816{
acc9d80b 1817 subpage_t *subpage = opaque;
c353e4cc 1818#if defined(DEBUG_SUBPAGE)
016e9d62 1819 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1820 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1821#endif
1822
acc9d80b 1823 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1824 len, is_write);
c353e4cc
PB
1825}
1826
70c68e44
AK
1827static const MemoryRegionOps subpage_ops = {
1828 .read = subpage_read,
1829 .write = subpage_write,
c353e4cc 1830 .valid.accepts = subpage_accepts,
70c68e44 1831 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1832};
1833
c227f099 1834static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1835 uint16_t section)
db7b5426
BS
1836{
1837 int idx, eidx;
1838
1839 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1840 return -1;
1841 idx = SUBPAGE_IDX(start);
1842 eidx = SUBPAGE_IDX(end);
1843#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1844 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1845 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1846#endif
db7b5426 1847 for (; idx <= eidx; idx++) {
5312bd8b 1848 mmio->sub_section[idx] = section;
db7b5426
BS
1849 }
1850
1851 return 0;
1852}
1853
acc9d80b 1854static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1855{
c227f099 1856 subpage_t *mmio;
db7b5426 1857
7267c094 1858 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1859
acc9d80b 1860 mmio->as = as;
1eec614b 1861 mmio->base = base;
2c9b15ca 1862 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1863 NULL, TARGET_PAGE_SIZE);
b3b00c78 1864 mmio->iomem.subpage = true;
db7b5426 1865#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1866 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1867 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1868#endif
b41aac4f 1869 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1870
1871 return mmio;
1872}
1873
a656e22f
PC
1874static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1875 MemoryRegion *mr)
5312bd8b 1876{
a656e22f 1877 assert(as);
5312bd8b 1878 MemoryRegionSection section = {
a656e22f 1879 .address_space = as,
5312bd8b
AK
1880 .mr = mr,
1881 .offset_within_address_space = 0,
1882 .offset_within_region = 0,
052e87b0 1883 .size = int128_2_64(),
5312bd8b
AK
1884 };
1885
53cb28cb 1886 return phys_section_add(map, &section);
5312bd8b
AK
1887}
1888
77717094 1889MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1890{
77717094 1891 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1892}
1893
e9179ce1
AK
1894static void io_mem_init(void)
1895{
1f6245e5 1896 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1897 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1898 NULL, UINT64_MAX);
2c9b15ca 1899 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1900 NULL, UINT64_MAX);
2c9b15ca 1901 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1902 NULL, UINT64_MAX);
e9179ce1
AK
1903}
1904
ac1970fb 1905static void mem_begin(MemoryListener *listener)
00752703
PB
1906{
1907 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1908 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1909 uint16_t n;
1910
a656e22f 1911 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1912 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1913 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1914 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1915 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1916 assert(n == PHYS_SECTION_ROM);
a656e22f 1917 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1918 assert(n == PHYS_SECTION_WATCH);
00752703 1919
9736e55b 1920 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1921 d->as = as;
1922 as->next_dispatch = d;
1923}
1924
1925static void mem_commit(MemoryListener *listener)
ac1970fb 1926{
89ae337a 1927 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1928 AddressSpaceDispatch *cur = as->dispatch;
1929 AddressSpaceDispatch *next = as->next_dispatch;
1930
53cb28cb 1931 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1932
0475d94f 1933 as->dispatch = next;
b41aac4f 1934
53cb28cb
MA
1935 if (cur) {
1936 phys_sections_free(&cur->map);
1937 g_free(cur);
1938 }
9affd6fc
PB
1939}
1940
1d71148e 1941static void tcg_commit(MemoryListener *listener)
50c1e149 1942{
182735ef 1943 CPUState *cpu;
117712c3
AK
1944
1945 /* since each CPU stores ram addresses in its TLB cache, we must
1946 reset the modified entries */
1947 /* XXX: slow ! */
bdc44640 1948 CPU_FOREACH(cpu) {
33bde2e1
EI
1949 /* FIXME: Disentangle the cpu.h circular files deps so we can
1950 directly get the right CPU from listener. */
1951 if (cpu->tcg_as_listener != listener) {
1952 continue;
1953 }
00c8cb0a 1954 tlb_flush(cpu, 1);
117712c3 1955 }
50c1e149
AK
1956}
1957
93632747
AK
1958static void core_log_global_start(MemoryListener *listener)
1959{
981fdf23 1960 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1961}
1962
1963static void core_log_global_stop(MemoryListener *listener)
1964{
981fdf23 1965 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1966}
1967
93632747 1968static MemoryListener core_memory_listener = {
93632747
AK
1969 .log_global_start = core_log_global_start,
1970 .log_global_stop = core_log_global_stop,
ac1970fb 1971 .priority = 1,
93632747
AK
1972};
1973
ac1970fb
AK
1974void address_space_init_dispatch(AddressSpace *as)
1975{
00752703 1976 as->dispatch = NULL;
89ae337a 1977 as->dispatch_listener = (MemoryListener) {
ac1970fb 1978 .begin = mem_begin,
00752703 1979 .commit = mem_commit,
ac1970fb
AK
1980 .region_add = mem_add,
1981 .region_nop = mem_add,
1982 .priority = 0,
1983 };
89ae337a 1984 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1985}
1986
83f3c251
AK
1987void address_space_destroy_dispatch(AddressSpace *as)
1988{
1989 AddressSpaceDispatch *d = as->dispatch;
1990
89ae337a 1991 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1992 g_free(d);
1993 as->dispatch = NULL;
1994}
1995
62152b8a
AK
1996static void memory_map_init(void)
1997{
7267c094 1998 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1999
57271d63 2000 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2001 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2002
7267c094 2003 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2004 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2005 65536);
7dca8043 2006 address_space_init(&address_space_io, system_io, "I/O");
93632747 2007
f6790af6 2008 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2009}
2010
2011MemoryRegion *get_system_memory(void)
2012{
2013 return system_memory;
2014}
2015
309cb471
AK
2016MemoryRegion *get_system_io(void)
2017{
2018 return system_io;
2019}
2020
e2eef170
PB
2021#endif /* !defined(CONFIG_USER_ONLY) */
2022
13eb76e0
FB
2023/* physical memory access (slow version, mainly for debug) */
2024#if defined(CONFIG_USER_ONLY)
f17ec444 2025int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2026 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2027{
2028 int l, flags;
2029 target_ulong page;
53a5960a 2030 void * p;
13eb76e0
FB
2031
2032 while (len > 0) {
2033 page = addr & TARGET_PAGE_MASK;
2034 l = (page + TARGET_PAGE_SIZE) - addr;
2035 if (l > len)
2036 l = len;
2037 flags = page_get_flags(page);
2038 if (!(flags & PAGE_VALID))
a68fe89c 2039 return -1;
13eb76e0
FB
2040 if (is_write) {
2041 if (!(flags & PAGE_WRITE))
a68fe89c 2042 return -1;
579a97f7 2043 /* XXX: this code should not depend on lock_user */
72fb7daa 2044 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2045 return -1;
72fb7daa
AJ
2046 memcpy(p, buf, l);
2047 unlock_user(p, addr, l);
13eb76e0
FB
2048 } else {
2049 if (!(flags & PAGE_READ))
a68fe89c 2050 return -1;
579a97f7 2051 /* XXX: this code should not depend on lock_user */
72fb7daa 2052 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2053 return -1;
72fb7daa 2054 memcpy(buf, p, l);
5b257578 2055 unlock_user(p, addr, 0);
13eb76e0
FB
2056 }
2057 len -= l;
2058 buf += l;
2059 addr += l;
2060 }
a68fe89c 2061 return 0;
13eb76e0 2062}
8df1cd07 2063
13eb76e0 2064#else
51d7a9eb 2065
a8170e5e
AK
2066static void invalidate_and_set_dirty(hwaddr addr,
2067 hwaddr length)
51d7a9eb 2068{
a2cd8c85 2069 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
2070 /* invalidate code */
2071 tb_invalidate_phys_page_range(addr, addr + length, 0);
2072 /* set dirty bit */
6886867e 2073 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2074 }
e226939d 2075 xen_modified_memory(addr, length);
51d7a9eb
AP
2076}
2077
23326164 2078static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2079{
e1622f4b 2080 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2081
2082 /* Regions are assumed to support 1-4 byte accesses unless
2083 otherwise specified. */
23326164
RH
2084 if (access_size_max == 0) {
2085 access_size_max = 4;
2086 }
2087
2088 /* Bound the maximum access by the alignment of the address. */
2089 if (!mr->ops->impl.unaligned) {
2090 unsigned align_size_max = addr & -addr;
2091 if (align_size_max != 0 && align_size_max < access_size_max) {
2092 access_size_max = align_size_max;
2093 }
82f2563f 2094 }
23326164
RH
2095
2096 /* Don't attempt accesses larger than the maximum. */
2097 if (l > access_size_max) {
2098 l = access_size_max;
82f2563f 2099 }
098178f2
PB
2100 if (l & (l - 1)) {
2101 l = 1 << (qemu_fls(l) - 1);
2102 }
23326164
RH
2103
2104 return l;
82f2563f
PB
2105}
2106
fd8aaa76 2107bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2108 int len, bool is_write)
13eb76e0 2109{
149f54b5 2110 hwaddr l;
13eb76e0 2111 uint8_t *ptr;
791af8c8 2112 uint64_t val;
149f54b5 2113 hwaddr addr1;
5c8a00ce 2114 MemoryRegion *mr;
fd8aaa76 2115 bool error = false;
3b46e624 2116
13eb76e0 2117 while (len > 0) {
149f54b5 2118 l = len;
5c8a00ce 2119 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2120
13eb76e0 2121 if (is_write) {
5c8a00ce
PB
2122 if (!memory_access_is_direct(mr, is_write)) {
2123 l = memory_access_size(mr, l, addr1);
4917cf44 2124 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2125 potential bugs */
23326164
RH
2126 switch (l) {
2127 case 8:
2128 /* 64 bit write access */
2129 val = ldq_p(buf);
2130 error |= io_mem_write(mr, addr1, val, 8);
2131 break;
2132 case 4:
1c213d19 2133 /* 32 bit write access */
c27004ec 2134 val = ldl_p(buf);
5c8a00ce 2135 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2136 break;
2137 case 2:
1c213d19 2138 /* 16 bit write access */
c27004ec 2139 val = lduw_p(buf);
5c8a00ce 2140 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2141 break;
2142 case 1:
1c213d19 2143 /* 8 bit write access */
c27004ec 2144 val = ldub_p(buf);
5c8a00ce 2145 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2146 break;
2147 default:
2148 abort();
13eb76e0 2149 }
2bbfa05d 2150 } else {
5c8a00ce 2151 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2152 /* RAM case */
5579c7f3 2153 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2154 memcpy(ptr, buf, l);
51d7a9eb 2155 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2156 }
2157 } else {
5c8a00ce 2158 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2159 /* I/O case */
5c8a00ce 2160 l = memory_access_size(mr, l, addr1);
23326164
RH
2161 switch (l) {
2162 case 8:
2163 /* 64 bit read access */
2164 error |= io_mem_read(mr, addr1, &val, 8);
2165 stq_p(buf, val);
2166 break;
2167 case 4:
13eb76e0 2168 /* 32 bit read access */
5c8a00ce 2169 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2170 stl_p(buf, val);
23326164
RH
2171 break;
2172 case 2:
13eb76e0 2173 /* 16 bit read access */
5c8a00ce 2174 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2175 stw_p(buf, val);
23326164
RH
2176 break;
2177 case 1:
1c213d19 2178 /* 8 bit read access */
5c8a00ce 2179 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2180 stb_p(buf, val);
23326164
RH
2181 break;
2182 default:
2183 abort();
13eb76e0
FB
2184 }
2185 } else {
2186 /* RAM case */
5c8a00ce 2187 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2188 memcpy(buf, ptr, l);
13eb76e0
FB
2189 }
2190 }
2191 len -= l;
2192 buf += l;
2193 addr += l;
2194 }
fd8aaa76
PB
2195
2196 return error;
13eb76e0 2197}
8df1cd07 2198
fd8aaa76 2199bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2200 const uint8_t *buf, int len)
2201{
fd8aaa76 2202 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2203}
2204
fd8aaa76 2205bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2206{
fd8aaa76 2207 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2208}
2209
2210
a8170e5e 2211void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2212 int len, int is_write)
2213{
fd8aaa76 2214 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2215}
2216
582b55a9
AG
2217enum write_rom_type {
2218 WRITE_DATA,
2219 FLUSH_CACHE,
2220};
2221
2a221651 2222static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2223 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2224{
149f54b5 2225 hwaddr l;
d0ecd2aa 2226 uint8_t *ptr;
149f54b5 2227 hwaddr addr1;
5c8a00ce 2228 MemoryRegion *mr;
3b46e624 2229
d0ecd2aa 2230 while (len > 0) {
149f54b5 2231 l = len;
2a221651 2232 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2233
5c8a00ce
PB
2234 if (!(memory_region_is_ram(mr) ||
2235 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2236 /* do nothing */
2237 } else {
5c8a00ce 2238 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2239 /* ROM/RAM case */
5579c7f3 2240 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2241 switch (type) {
2242 case WRITE_DATA:
2243 memcpy(ptr, buf, l);
2244 invalidate_and_set_dirty(addr1, l);
2245 break;
2246 case FLUSH_CACHE:
2247 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2248 break;
2249 }
d0ecd2aa
FB
2250 }
2251 len -= l;
2252 buf += l;
2253 addr += l;
2254 }
2255}
2256
582b55a9 2257/* used for ROM loading : can write in RAM and ROM */
2a221651 2258void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2259 const uint8_t *buf, int len)
2260{
2a221651 2261 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2262}
2263
2264void cpu_flush_icache_range(hwaddr start, int len)
2265{
2266 /*
2267 * This function should do the same thing as an icache flush that was
2268 * triggered from within the guest. For TCG we are always cache coherent,
2269 * so there is no need to flush anything. For KVM / Xen we need to flush
2270 * the host's instruction cache at least.
2271 */
2272 if (tcg_enabled()) {
2273 return;
2274 }
2275
2a221651
EI
2276 cpu_physical_memory_write_rom_internal(&address_space_memory,
2277 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2278}
2279
6d16c2f8 2280typedef struct {
d3e71559 2281 MemoryRegion *mr;
6d16c2f8 2282 void *buffer;
a8170e5e
AK
2283 hwaddr addr;
2284 hwaddr len;
6d16c2f8
AL
2285} BounceBuffer;
2286
2287static BounceBuffer bounce;
2288
ba223c29
AL
2289typedef struct MapClient {
2290 void *opaque;
2291 void (*callback)(void *opaque);
72cf2d4f 2292 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2293} MapClient;
2294
72cf2d4f
BS
2295static QLIST_HEAD(map_client_list, MapClient) map_client_list
2296 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2297
2298void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2299{
7267c094 2300 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2301
2302 client->opaque = opaque;
2303 client->callback = callback;
72cf2d4f 2304 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2305 return client;
2306}
2307
8b9c99d9 2308static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2309{
2310 MapClient *client = (MapClient *)_client;
2311
72cf2d4f 2312 QLIST_REMOVE(client, link);
7267c094 2313 g_free(client);
ba223c29
AL
2314}
2315
2316static void cpu_notify_map_clients(void)
2317{
2318 MapClient *client;
2319
72cf2d4f
BS
2320 while (!QLIST_EMPTY(&map_client_list)) {
2321 client = QLIST_FIRST(&map_client_list);
ba223c29 2322 client->callback(client->opaque);
34d5e948 2323 cpu_unregister_map_client(client);
ba223c29
AL
2324 }
2325}
2326
51644ab7
PB
2327bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2328{
5c8a00ce 2329 MemoryRegion *mr;
51644ab7
PB
2330 hwaddr l, xlat;
2331
2332 while (len > 0) {
2333 l = len;
5c8a00ce
PB
2334 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2335 if (!memory_access_is_direct(mr, is_write)) {
2336 l = memory_access_size(mr, l, addr);
2337 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2338 return false;
2339 }
2340 }
2341
2342 len -= l;
2343 addr += l;
2344 }
2345 return true;
2346}
2347
6d16c2f8
AL
2348/* Map a physical memory region into a host virtual address.
2349 * May map a subset of the requested range, given by and returned in *plen.
2350 * May return NULL if resources needed to perform the mapping are exhausted.
2351 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2352 * Use cpu_register_map_client() to know when retrying the map operation is
2353 * likely to succeed.
6d16c2f8 2354 */
ac1970fb 2355void *address_space_map(AddressSpace *as,
a8170e5e
AK
2356 hwaddr addr,
2357 hwaddr *plen,
ac1970fb 2358 bool is_write)
6d16c2f8 2359{
a8170e5e 2360 hwaddr len = *plen;
e3127ae0
PB
2361 hwaddr done = 0;
2362 hwaddr l, xlat, base;
2363 MemoryRegion *mr, *this_mr;
2364 ram_addr_t raddr;
6d16c2f8 2365
e3127ae0
PB
2366 if (len == 0) {
2367 return NULL;
2368 }
38bee5dc 2369
e3127ae0
PB
2370 l = len;
2371 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2372 if (!memory_access_is_direct(mr, is_write)) {
2373 if (bounce.buffer) {
2374 return NULL;
6d16c2f8 2375 }
e85d9db5
KW
2376 /* Avoid unbounded allocations */
2377 l = MIN(l, TARGET_PAGE_SIZE);
2378 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2379 bounce.addr = addr;
2380 bounce.len = l;
d3e71559
PB
2381
2382 memory_region_ref(mr);
2383 bounce.mr = mr;
e3127ae0
PB
2384 if (!is_write) {
2385 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2386 }
6d16c2f8 2387
e3127ae0
PB
2388 *plen = l;
2389 return bounce.buffer;
2390 }
2391
2392 base = xlat;
2393 raddr = memory_region_get_ram_addr(mr);
2394
2395 for (;;) {
6d16c2f8
AL
2396 len -= l;
2397 addr += l;
e3127ae0
PB
2398 done += l;
2399 if (len == 0) {
2400 break;
2401 }
2402
2403 l = len;
2404 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2405 if (this_mr != mr || xlat != base + done) {
2406 break;
2407 }
6d16c2f8 2408 }
e3127ae0 2409
d3e71559 2410 memory_region_ref(mr);
e3127ae0
PB
2411 *plen = done;
2412 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2413}
2414
ac1970fb 2415/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2416 * Will also mark the memory as dirty if is_write == 1. access_len gives
2417 * the amount of memory that was actually read or written by the caller.
2418 */
a8170e5e
AK
2419void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2420 int is_write, hwaddr access_len)
6d16c2f8
AL
2421{
2422 if (buffer != bounce.buffer) {
d3e71559
PB
2423 MemoryRegion *mr;
2424 ram_addr_t addr1;
2425
2426 mr = qemu_ram_addr_from_host(buffer, &addr1);
2427 assert(mr != NULL);
6d16c2f8 2428 if (is_write) {
6886867e 2429 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2430 }
868bb33f 2431 if (xen_enabled()) {
e41d7c69 2432 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2433 }
d3e71559 2434 memory_region_unref(mr);
6d16c2f8
AL
2435 return;
2436 }
2437 if (is_write) {
ac1970fb 2438 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2439 }
f8a83245 2440 qemu_vfree(bounce.buffer);
6d16c2f8 2441 bounce.buffer = NULL;
d3e71559 2442 memory_region_unref(bounce.mr);
ba223c29 2443 cpu_notify_map_clients();
6d16c2f8 2444}
d0ecd2aa 2445
a8170e5e
AK
2446void *cpu_physical_memory_map(hwaddr addr,
2447 hwaddr *plen,
ac1970fb
AK
2448 int is_write)
2449{
2450 return address_space_map(&address_space_memory, addr, plen, is_write);
2451}
2452
a8170e5e
AK
2453void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2454 int is_write, hwaddr access_len)
ac1970fb
AK
2455{
2456 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2457}
2458
8df1cd07 2459/* warning: addr must be aligned */
fdfba1a2 2460static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2461 enum device_endian endian)
8df1cd07 2462{
8df1cd07 2463 uint8_t *ptr;
791af8c8 2464 uint64_t val;
5c8a00ce 2465 MemoryRegion *mr;
149f54b5
PB
2466 hwaddr l = 4;
2467 hwaddr addr1;
8df1cd07 2468
fdfba1a2 2469 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2470 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2471 /* I/O case */
5c8a00ce 2472 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2473#if defined(TARGET_WORDS_BIGENDIAN)
2474 if (endian == DEVICE_LITTLE_ENDIAN) {
2475 val = bswap32(val);
2476 }
2477#else
2478 if (endian == DEVICE_BIG_ENDIAN) {
2479 val = bswap32(val);
2480 }
2481#endif
8df1cd07
FB
2482 } else {
2483 /* RAM case */
5c8a00ce 2484 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2485 & TARGET_PAGE_MASK)
149f54b5 2486 + addr1);
1e78bcc1
AG
2487 switch (endian) {
2488 case DEVICE_LITTLE_ENDIAN:
2489 val = ldl_le_p(ptr);
2490 break;
2491 case DEVICE_BIG_ENDIAN:
2492 val = ldl_be_p(ptr);
2493 break;
2494 default:
2495 val = ldl_p(ptr);
2496 break;
2497 }
8df1cd07
FB
2498 }
2499 return val;
2500}
2501
fdfba1a2 2502uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2503{
fdfba1a2 2504 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2505}
2506
fdfba1a2 2507uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2508{
fdfba1a2 2509 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2510}
2511
fdfba1a2 2512uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2513{
fdfba1a2 2514 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2515}
2516
84b7b8e7 2517/* warning: addr must be aligned */
2c17449b 2518static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2519 enum device_endian endian)
84b7b8e7 2520{
84b7b8e7
FB
2521 uint8_t *ptr;
2522 uint64_t val;
5c8a00ce 2523 MemoryRegion *mr;
149f54b5
PB
2524 hwaddr l = 8;
2525 hwaddr addr1;
84b7b8e7 2526
2c17449b 2527 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2528 false);
2529 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2530 /* I/O case */
5c8a00ce 2531 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2532#if defined(TARGET_WORDS_BIGENDIAN)
2533 if (endian == DEVICE_LITTLE_ENDIAN) {
2534 val = bswap64(val);
2535 }
2536#else
2537 if (endian == DEVICE_BIG_ENDIAN) {
2538 val = bswap64(val);
2539 }
84b7b8e7
FB
2540#endif
2541 } else {
2542 /* RAM case */
5c8a00ce 2543 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2544 & TARGET_PAGE_MASK)
149f54b5 2545 + addr1);
1e78bcc1
AG
2546 switch (endian) {
2547 case DEVICE_LITTLE_ENDIAN:
2548 val = ldq_le_p(ptr);
2549 break;
2550 case DEVICE_BIG_ENDIAN:
2551 val = ldq_be_p(ptr);
2552 break;
2553 default:
2554 val = ldq_p(ptr);
2555 break;
2556 }
84b7b8e7
FB
2557 }
2558 return val;
2559}
2560
2c17449b 2561uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2562{
2c17449b 2563 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2564}
2565
2c17449b 2566uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2567{
2c17449b 2568 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2569}
2570
2c17449b 2571uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2572{
2c17449b 2573 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2574}
2575
aab33094 2576/* XXX: optimize */
2c17449b 2577uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2578{
2579 uint8_t val;
2c17449b 2580 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2581 return val;
2582}
2583
733f0b02 2584/* warning: addr must be aligned */
41701aa4 2585static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2586 enum device_endian endian)
aab33094 2587{
733f0b02
MT
2588 uint8_t *ptr;
2589 uint64_t val;
5c8a00ce 2590 MemoryRegion *mr;
149f54b5
PB
2591 hwaddr l = 2;
2592 hwaddr addr1;
733f0b02 2593
41701aa4 2594 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2595 false);
2596 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2597 /* I/O case */
5c8a00ce 2598 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2599#if defined(TARGET_WORDS_BIGENDIAN)
2600 if (endian == DEVICE_LITTLE_ENDIAN) {
2601 val = bswap16(val);
2602 }
2603#else
2604 if (endian == DEVICE_BIG_ENDIAN) {
2605 val = bswap16(val);
2606 }
2607#endif
733f0b02
MT
2608 } else {
2609 /* RAM case */
5c8a00ce 2610 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2611 & TARGET_PAGE_MASK)
149f54b5 2612 + addr1);
1e78bcc1
AG
2613 switch (endian) {
2614 case DEVICE_LITTLE_ENDIAN:
2615 val = lduw_le_p(ptr);
2616 break;
2617 case DEVICE_BIG_ENDIAN:
2618 val = lduw_be_p(ptr);
2619 break;
2620 default:
2621 val = lduw_p(ptr);
2622 break;
2623 }
733f0b02
MT
2624 }
2625 return val;
aab33094
FB
2626}
2627
41701aa4 2628uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2629{
41701aa4 2630 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2631}
2632
41701aa4 2633uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2634{
41701aa4 2635 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2636}
2637
41701aa4 2638uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2639{
41701aa4 2640 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2641}
2642
8df1cd07
FB
2643/* warning: addr must be aligned. The ram page is not masked as dirty
2644 and the code inside is not invalidated. It is useful if the dirty
2645 bits are used to track modified PTEs */
2198a121 2646void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2647{
8df1cd07 2648 uint8_t *ptr;
5c8a00ce 2649 MemoryRegion *mr;
149f54b5
PB
2650 hwaddr l = 4;
2651 hwaddr addr1;
8df1cd07 2652
2198a121 2653 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2654 true);
2655 if (l < 4 || !memory_access_is_direct(mr, true)) {
2656 io_mem_write(mr, addr1, val, 4);
8df1cd07 2657 } else {
5c8a00ce 2658 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2659 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2660 stl_p(ptr, val);
74576198
AL
2661
2662 if (unlikely(in_migration)) {
a2cd8c85 2663 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2664 /* invalidate code */
2665 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2666 /* set dirty bit */
6886867e 2667 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2668 }
2669 }
8df1cd07
FB
2670 }
2671}
2672
2673/* warning: addr must be aligned */
ab1da857
EI
2674static inline void stl_phys_internal(AddressSpace *as,
2675 hwaddr addr, uint32_t val,
1e78bcc1 2676 enum device_endian endian)
8df1cd07 2677{
8df1cd07 2678 uint8_t *ptr;
5c8a00ce 2679 MemoryRegion *mr;
149f54b5
PB
2680 hwaddr l = 4;
2681 hwaddr addr1;
8df1cd07 2682
ab1da857 2683 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2684 true);
2685 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2686#if defined(TARGET_WORDS_BIGENDIAN)
2687 if (endian == DEVICE_LITTLE_ENDIAN) {
2688 val = bswap32(val);
2689 }
2690#else
2691 if (endian == DEVICE_BIG_ENDIAN) {
2692 val = bswap32(val);
2693 }
2694#endif
5c8a00ce 2695 io_mem_write(mr, addr1, val, 4);
8df1cd07 2696 } else {
8df1cd07 2697 /* RAM case */
5c8a00ce 2698 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2699 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2700 switch (endian) {
2701 case DEVICE_LITTLE_ENDIAN:
2702 stl_le_p(ptr, val);
2703 break;
2704 case DEVICE_BIG_ENDIAN:
2705 stl_be_p(ptr, val);
2706 break;
2707 default:
2708 stl_p(ptr, val);
2709 break;
2710 }
51d7a9eb 2711 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2712 }
2713}
2714
ab1da857 2715void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2716{
ab1da857 2717 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2718}
2719
ab1da857 2720void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2721{
ab1da857 2722 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2723}
2724
ab1da857 2725void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2726{
ab1da857 2727 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2728}
2729
aab33094 2730/* XXX: optimize */
db3be60d 2731void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2732{
2733 uint8_t v = val;
db3be60d 2734 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2735}
2736
733f0b02 2737/* warning: addr must be aligned */
5ce5944d
EI
2738static inline void stw_phys_internal(AddressSpace *as,
2739 hwaddr addr, uint32_t val,
1e78bcc1 2740 enum device_endian endian)
aab33094 2741{
733f0b02 2742 uint8_t *ptr;
5c8a00ce 2743 MemoryRegion *mr;
149f54b5
PB
2744 hwaddr l = 2;
2745 hwaddr addr1;
733f0b02 2746
5ce5944d 2747 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2748 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2749#if defined(TARGET_WORDS_BIGENDIAN)
2750 if (endian == DEVICE_LITTLE_ENDIAN) {
2751 val = bswap16(val);
2752 }
2753#else
2754 if (endian == DEVICE_BIG_ENDIAN) {
2755 val = bswap16(val);
2756 }
2757#endif
5c8a00ce 2758 io_mem_write(mr, addr1, val, 2);
733f0b02 2759 } else {
733f0b02 2760 /* RAM case */
5c8a00ce 2761 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2762 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2763 switch (endian) {
2764 case DEVICE_LITTLE_ENDIAN:
2765 stw_le_p(ptr, val);
2766 break;
2767 case DEVICE_BIG_ENDIAN:
2768 stw_be_p(ptr, val);
2769 break;
2770 default:
2771 stw_p(ptr, val);
2772 break;
2773 }
51d7a9eb 2774 invalidate_and_set_dirty(addr1, 2);
733f0b02 2775 }
aab33094
FB
2776}
2777
5ce5944d 2778void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2779{
5ce5944d 2780 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2781}
2782
5ce5944d 2783void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2784{
5ce5944d 2785 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2786}
2787
5ce5944d 2788void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2789{
5ce5944d 2790 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2791}
2792
aab33094 2793/* XXX: optimize */
f606604f 2794void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2795{
2796 val = tswap64(val);
f606604f 2797 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2798}
2799
f606604f 2800void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2801{
2802 val = cpu_to_le64(val);
f606604f 2803 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2804}
2805
f606604f 2806void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2807{
2808 val = cpu_to_be64(val);
f606604f 2809 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2810}
2811
5e2972fd 2812/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2813int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2814 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2815{
2816 int l;
a8170e5e 2817 hwaddr phys_addr;
9b3c35e0 2818 target_ulong page;
13eb76e0
FB
2819
2820 while (len > 0) {
2821 page = addr & TARGET_PAGE_MASK;
f17ec444 2822 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2823 /* if no physical page mapped, return an error */
2824 if (phys_addr == -1)
2825 return -1;
2826 l = (page + TARGET_PAGE_SIZE) - addr;
2827 if (l > len)
2828 l = len;
5e2972fd 2829 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2830 if (is_write) {
2831 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2832 } else {
2833 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2834 }
13eb76e0
FB
2835 len -= l;
2836 buf += l;
2837 addr += l;
2838 }
2839 return 0;
2840}
a68fe89c 2841#endif
13eb76e0 2842
8e4a424b
BS
2843/*
2844 * A helper function for the _utterly broken_ virtio device model to find out if
2845 * it's running on a big endian machine. Don't do this at home kids!
2846 */
98ed8ecf
GK
2847bool target_words_bigendian(void);
2848bool target_words_bigendian(void)
8e4a424b
BS
2849{
2850#if defined(TARGET_WORDS_BIGENDIAN)
2851 return true;
2852#else
2853 return false;
2854#endif
2855}
2856
76f35538 2857#ifndef CONFIG_USER_ONLY
a8170e5e 2858bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2859{
5c8a00ce 2860 MemoryRegion*mr;
149f54b5 2861 hwaddr l = 1;
76f35538 2862
5c8a00ce
PB
2863 mr = address_space_translate(&address_space_memory,
2864 phys_addr, &phys_addr, &l, false);
76f35538 2865
5c8a00ce
PB
2866 return !(memory_region_is_ram(mr) ||
2867 memory_region_is_romd(mr));
76f35538 2868}
bd2fa51f
MH
2869
2870void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2871{
2872 RAMBlock *block;
2873
2874 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2875 func(block->host, block->offset, block->length, opaque);
2876 }
2877}
ec3f8c99 2878#endif