]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
cpu: initialize cpu->exception_index on reset
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
8d7b8cb9 376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
6c3bff0e
PD
433static int cpu_common_pre_load(void *opaque)
434{
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440}
441
442static bool cpu_common_exception_index_needed(void *opaque)
443{
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447}
448
449static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457};
458
1a1562f5 459const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
6c3bff0e 463 .pre_load = cpu_common_pre_load,
5b6dd868 464 .post_load = cpu_common_post_load,
35d08458 465 .fields = (VMStateField[]) {
259186a7
AF
466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 468 VMSTATE_END_OF_LIST()
6c3bff0e
PD
469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
5b6dd868
BS
477 }
478};
1a1562f5 479
5b6dd868 480#endif
ea041c0e 481
38d8f5c8 482CPUState *qemu_get_cpu(int index)
ea041c0e 483{
bdc44640 484 CPUState *cpu;
ea041c0e 485
bdc44640 486 CPU_FOREACH(cpu) {
55e5c285 487 if (cpu->cpu_index == index) {
bdc44640 488 return cpu;
55e5c285 489 }
ea041c0e 490 }
5b6dd868 491
bdc44640 492 return NULL;
ea041c0e
FB
493}
494
09daed84
EI
495#if !defined(CONFIG_USER_ONLY)
496void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497{
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508}
509#endif
510
5b6dd868 511void cpu_exec_init(CPUArchState *env)
ea041c0e 512{
5b6dd868 513 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 514 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 515 CPUState *some_cpu;
5b6dd868
BS
516 int cpu_index;
517
518#if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520#endif
5b6dd868 521 cpu_index = 0;
bdc44640 522 CPU_FOREACH(some_cpu) {
5b6dd868
BS
523 cpu_index++;
524 }
55e5c285 525 cpu->cpu_index = cpu_index;
1b1ed8dc 526 cpu->numa_node = 0;
f0c3c505 527 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 528 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 529#ifndef CONFIG_USER_ONLY
09daed84 530 cpu->as = &address_space_memory;
5b6dd868
BS
531 cpu->thread_id = qemu_get_thread_id();
532#endif
bdc44640 533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
534#if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536#endif
e0d47944
AF
537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
5b6dd868 540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542 cpu_save, cpu_load, env);
b170fce3 543 assert(cc->vmsd == NULL);
e0d47944 544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 545#endif
b170fce3
AF
546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
ea041c0e
FB
549}
550
1fddef4b 551#if defined(TARGET_HAS_ICE)
94df27fd 552#if defined(CONFIG_USER_ONLY)
00b941e5 553static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
554{
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556}
557#else
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 559{
e8262a1b
MF
560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
09daed84 562 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 563 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 564 }
1e7855a5 565}
c27004ec 566#endif
94df27fd 567#endif /* TARGET_HAS_ICE */
d720b93d 568
c527ee8f 569#if defined(CONFIG_USER_ONLY)
75a34036 570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
571
572{
573}
574
3ee887e8
PM
575int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
576 int flags)
577{
578 return -ENOSYS;
579}
580
581void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
582{
583}
584
75a34036 585int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
586 int flags, CPUWatchpoint **watchpoint)
587{
588 return -ENOSYS;
589}
590#else
6658ffb8 591/* Add a watchpoint. */
75a34036 592int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 593 int flags, CPUWatchpoint **watchpoint)
6658ffb8 594{
c0ce998e 595 CPUWatchpoint *wp;
6658ffb8 596
05068c0d 597 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 598 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
599 error_report("tried to set invalid watchpoint at %"
600 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
601 return -EINVAL;
602 }
7267c094 603 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
604
605 wp->vaddr = addr;
05068c0d 606 wp->len = len;
a1d1bb31
AL
607 wp->flags = flags;
608
2dc9f411 609 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
610 if (flags & BP_GDB) {
611 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
612 } else {
613 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
614 }
6658ffb8 615
31b030d4 616 tlb_flush_page(cpu, addr);
a1d1bb31
AL
617
618 if (watchpoint)
619 *watchpoint = wp;
620 return 0;
6658ffb8
PB
621}
622
a1d1bb31 623/* Remove a specific watchpoint. */
75a34036 624int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 625 int flags)
6658ffb8 626{
a1d1bb31 627 CPUWatchpoint *wp;
6658ffb8 628
ff4700b0 629 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 630 if (addr == wp->vaddr && len == wp->len
6e140f28 631 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 632 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
633 return 0;
634 }
635 }
a1d1bb31 636 return -ENOENT;
6658ffb8
PB
637}
638
a1d1bb31 639/* Remove a specific watchpoint by reference. */
75a34036 640void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 641{
ff4700b0 642 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 643
31b030d4 644 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 645
7267c094 646 g_free(watchpoint);
a1d1bb31
AL
647}
648
649/* Remove all matching watchpoints. */
75a34036 650void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 651{
c0ce998e 652 CPUWatchpoint *wp, *next;
a1d1bb31 653
ff4700b0 654 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
655 if (wp->flags & mask) {
656 cpu_watchpoint_remove_by_ref(cpu, wp);
657 }
c0ce998e 658 }
7d03f82f 659}
05068c0d
PM
660
661/* Return true if this watchpoint address matches the specified
662 * access (ie the address range covered by the watchpoint overlaps
663 * partially or completely with the address range covered by the
664 * access).
665 */
666static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
667 vaddr addr,
668 vaddr len)
669{
670 /* We know the lengths are non-zero, but a little caution is
671 * required to avoid errors in the case where the range ends
672 * exactly at the top of the address space and so addr + len
673 * wraps round to zero.
674 */
675 vaddr wpend = wp->vaddr + wp->len - 1;
676 vaddr addrend = addr + len - 1;
677
678 return !(addr > wpend || wp->vaddr > addrend);
679}
680
c527ee8f 681#endif
7d03f82f 682
a1d1bb31 683/* Add a breakpoint. */
b3310ab3 684int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 685 CPUBreakpoint **breakpoint)
4c3a88a2 686{
1fddef4b 687#if defined(TARGET_HAS_ICE)
c0ce998e 688 CPUBreakpoint *bp;
3b46e624 689
7267c094 690 bp = g_malloc(sizeof(*bp));
4c3a88a2 691
a1d1bb31
AL
692 bp->pc = pc;
693 bp->flags = flags;
694
2dc9f411 695 /* keep all GDB-injected breakpoints in front */
00b941e5 696 if (flags & BP_GDB) {
f0c3c505 697 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 698 } else {
f0c3c505 699 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 700 }
3b46e624 701
f0c3c505 702 breakpoint_invalidate(cpu, pc);
a1d1bb31 703
00b941e5 704 if (breakpoint) {
a1d1bb31 705 *breakpoint = bp;
00b941e5 706 }
4c3a88a2
FB
707 return 0;
708#else
a1d1bb31 709 return -ENOSYS;
4c3a88a2
FB
710#endif
711}
712
a1d1bb31 713/* Remove a specific breakpoint. */
b3310ab3 714int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 715{
7d03f82f 716#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
717 CPUBreakpoint *bp;
718
f0c3c505 719 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 720 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 721 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
722 return 0;
723 }
7d03f82f 724 }
a1d1bb31
AL
725 return -ENOENT;
726#else
727 return -ENOSYS;
7d03f82f
EI
728#endif
729}
730
a1d1bb31 731/* Remove a specific breakpoint by reference. */
b3310ab3 732void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 733{
1fddef4b 734#if defined(TARGET_HAS_ICE)
f0c3c505
AF
735 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
736
737 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 738
7267c094 739 g_free(breakpoint);
a1d1bb31
AL
740#endif
741}
742
743/* Remove all matching breakpoints. */
b3310ab3 744void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
745{
746#if defined(TARGET_HAS_ICE)
c0ce998e 747 CPUBreakpoint *bp, *next;
a1d1bb31 748
f0c3c505 749 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
750 if (bp->flags & mask) {
751 cpu_breakpoint_remove_by_ref(cpu, bp);
752 }
c0ce998e 753 }
4c3a88a2
FB
754#endif
755}
756
c33a346e
FB
757/* enable or disable single step mode. EXCP_DEBUG is returned by the
758 CPU loop after each instruction */
3825b28f 759void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 760{
1fddef4b 761#if defined(TARGET_HAS_ICE)
ed2803da
AF
762 if (cpu->singlestep_enabled != enabled) {
763 cpu->singlestep_enabled = enabled;
764 if (kvm_enabled()) {
38e478ec 765 kvm_update_guest_debug(cpu, 0);
ed2803da 766 } else {
ccbb4d44 767 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 768 /* XXX: only flush what is necessary */
38e478ec 769 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
770 tb_flush(env);
771 }
c33a346e
FB
772 }
773#endif
774}
775
a47dddd7 776void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
777{
778 va_list ap;
493ae1f0 779 va_list ap2;
7501267e
FB
780
781 va_start(ap, fmt);
493ae1f0 782 va_copy(ap2, ap);
7501267e
FB
783 fprintf(stderr, "qemu: fatal: ");
784 vfprintf(stderr, fmt, ap);
785 fprintf(stderr, "\n");
878096ee 786 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
787 if (qemu_log_enabled()) {
788 qemu_log("qemu: fatal: ");
789 qemu_log_vprintf(fmt, ap2);
790 qemu_log("\n");
a0762859 791 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 792 qemu_log_flush();
93fcfe39 793 qemu_log_close();
924edcae 794 }
493ae1f0 795 va_end(ap2);
f9373291 796 va_end(ap);
fd052bf6
RV
797#if defined(CONFIG_USER_ONLY)
798 {
799 struct sigaction act;
800 sigfillset(&act.sa_mask);
801 act.sa_handler = SIG_DFL;
802 sigaction(SIGABRT, &act, NULL);
803 }
804#endif
7501267e
FB
805 abort();
806}
807
0124311e 808#if !defined(CONFIG_USER_ONLY)
041603fe
PB
809static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
810{
811 RAMBlock *block;
812
813 /* The list is protected by the iothread lock here. */
814 block = ram_list.mru_block;
815 if (block && addr - block->offset < block->length) {
816 goto found;
817 }
818 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
819 if (addr - block->offset < block->length) {
820 goto found;
821 }
822 }
823
824 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
825 abort();
826
827found:
828 ram_list.mru_block = block;
829 return block;
830}
831
a2f4d5be 832static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 833{
041603fe 834 ram_addr_t start1;
a2f4d5be
JQ
835 RAMBlock *block;
836 ram_addr_t end;
837
838 end = TARGET_PAGE_ALIGN(start + length);
839 start &= TARGET_PAGE_MASK;
d24981d3 840
041603fe
PB
841 block = qemu_get_ram_block(start);
842 assert(block == qemu_get_ram_block(end - 1));
1240be24 843 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 844 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
845}
846
5579c7f3 847/* Note: start and end must be within the same ram block. */
a2f4d5be 848void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 849 unsigned client)
1ccde1cb 850{
1ccde1cb
FB
851 if (length == 0)
852 return;
ace694cc 853 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 854
d24981d3 855 if (tcg_enabled()) {
a2f4d5be 856 tlb_reset_dirty_range_all(start, length);
5579c7f3 857 }
1ccde1cb
FB
858}
859
981fdf23 860static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
861{
862 in_migration = enable;
74576198
AL
863}
864
bb0e627a 865hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
866 MemoryRegionSection *section,
867 target_ulong vaddr,
868 hwaddr paddr, hwaddr xlat,
869 int prot,
870 target_ulong *address)
e5548617 871{
a8170e5e 872 hwaddr iotlb;
e5548617
BS
873 CPUWatchpoint *wp;
874
cc5bea60 875 if (memory_region_is_ram(section->mr)) {
e5548617
BS
876 /* Normal RAM. */
877 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 878 + xlat;
e5548617 879 if (!section->readonly) {
b41aac4f 880 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 881 } else {
b41aac4f 882 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
883 }
884 } else {
1b3fb98f 885 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 886 iotlb += xlat;
e5548617
BS
887 }
888
889 /* Make accesses to pages with watchpoints go via the
890 watchpoint trap routines. */
ff4700b0 891 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 892 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
893 /* Avoid trapping reads of pages with a write breakpoint. */
894 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 895 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
896 *address |= TLB_MMIO;
897 break;
898 }
899 }
900 }
901
902 return iotlb;
903}
9fa3e853
FB
904#endif /* defined(CONFIG_USER_ONLY) */
905
e2eef170 906#if !defined(CONFIG_USER_ONLY)
8da3ff18 907
c227f099 908static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 909 uint16_t section);
acc9d80b 910static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 911
a2b257d6
IM
912static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
913 qemu_anon_ram_alloc;
91138037
MA
914
915/*
916 * Set a custom physical guest memory alloator.
917 * Accelerators with unusual needs may need this. Hopefully, we can
918 * get rid of it eventually.
919 */
a2b257d6 920void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
921{
922 phys_mem_alloc = alloc;
923}
924
53cb28cb
MA
925static uint16_t phys_section_add(PhysPageMap *map,
926 MemoryRegionSection *section)
5312bd8b 927{
68f3f65b
PB
928 /* The physical section number is ORed with a page-aligned
929 * pointer to produce the iotlb entries. Thus it should
930 * never overflow into the page-aligned value.
931 */
53cb28cb 932 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 933
53cb28cb
MA
934 if (map->sections_nb == map->sections_nb_alloc) {
935 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
936 map->sections = g_renew(MemoryRegionSection, map->sections,
937 map->sections_nb_alloc);
5312bd8b 938 }
53cb28cb 939 map->sections[map->sections_nb] = *section;
dfde4e6e 940 memory_region_ref(section->mr);
53cb28cb 941 return map->sections_nb++;
5312bd8b
AK
942}
943
058bc4b5
PB
944static void phys_section_destroy(MemoryRegion *mr)
945{
dfde4e6e
PB
946 memory_region_unref(mr);
947
058bc4b5
PB
948 if (mr->subpage) {
949 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 950 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
951 g_free(subpage);
952 }
953}
954
6092666e 955static void phys_sections_free(PhysPageMap *map)
5312bd8b 956{
9affd6fc
PB
957 while (map->sections_nb > 0) {
958 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
959 phys_section_destroy(section->mr);
960 }
9affd6fc
PB
961 g_free(map->sections);
962 g_free(map->nodes);
5312bd8b
AK
963}
964
ac1970fb 965static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
966{
967 subpage_t *subpage;
a8170e5e 968 hwaddr base = section->offset_within_address_space
0f0cb164 969 & TARGET_PAGE_MASK;
97115a8d 970 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 971 d->map.nodes, d->map.sections);
0f0cb164
AK
972 MemoryRegionSection subsection = {
973 .offset_within_address_space = base,
052e87b0 974 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 975 };
a8170e5e 976 hwaddr start, end;
0f0cb164 977
f3705d53 978 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 979
f3705d53 980 if (!(existing->mr->subpage)) {
acc9d80b 981 subpage = subpage_init(d->as, base);
3be91e86 982 subsection.address_space = d->as;
0f0cb164 983 subsection.mr = &subpage->iomem;
ac1970fb 984 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 985 phys_section_add(&d->map, &subsection));
0f0cb164 986 } else {
f3705d53 987 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
988 }
989 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 990 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
991 subpage_register(subpage, start, end,
992 phys_section_add(&d->map, section));
0f0cb164
AK
993}
994
995
052e87b0
PB
996static void register_multipage(AddressSpaceDispatch *d,
997 MemoryRegionSection *section)
33417e70 998{
a8170e5e 999 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 1000 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1001 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1002 TARGET_PAGE_BITS));
dd81124b 1003
733d5ef5
PB
1004 assert(num_pages);
1005 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1006}
1007
ac1970fb 1008static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1009{
89ae337a 1010 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1011 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1012 MemoryRegionSection now = *section, remain = *section;
052e87b0 1013 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1014
733d5ef5
PB
1015 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1016 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1017 - now.offset_within_address_space;
1018
052e87b0 1019 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1020 register_subpage(d, &now);
733d5ef5 1021 } else {
052e87b0 1022 now.size = int128_zero();
733d5ef5 1023 }
052e87b0
PB
1024 while (int128_ne(remain.size, now.size)) {
1025 remain.size = int128_sub(remain.size, now.size);
1026 remain.offset_within_address_space += int128_get64(now.size);
1027 remain.offset_within_region += int128_get64(now.size);
69b67646 1028 now = remain;
052e87b0 1029 if (int128_lt(remain.size, page_size)) {
733d5ef5 1030 register_subpage(d, &now);
88266249 1031 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1032 now.size = page_size;
ac1970fb 1033 register_subpage(d, &now);
69b67646 1034 } else {
052e87b0 1035 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1036 register_multipage(d, &now);
69b67646 1037 }
0f0cb164
AK
1038 }
1039}
1040
62a2744c
SY
1041void qemu_flush_coalesced_mmio_buffer(void)
1042{
1043 if (kvm_enabled())
1044 kvm_flush_coalesced_mmio_buffer();
1045}
1046
b2a8658e
UD
1047void qemu_mutex_lock_ramlist(void)
1048{
1049 qemu_mutex_lock(&ram_list.mutex);
1050}
1051
1052void qemu_mutex_unlock_ramlist(void)
1053{
1054 qemu_mutex_unlock(&ram_list.mutex);
1055}
1056
e1e84ba0 1057#ifdef __linux__
c902760f
MT
1058
1059#include <sys/vfs.h>
1060
1061#define HUGETLBFS_MAGIC 0x958458f6
1062
fc7a5800 1063static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1064{
1065 struct statfs fs;
1066 int ret;
1067
1068 do {
9742bf26 1069 ret = statfs(path, &fs);
c902760f
MT
1070 } while (ret != 0 && errno == EINTR);
1071
1072 if (ret != 0) {
fc7a5800
HT
1073 error_setg_errno(errp, errno, "failed to get page size of file %s",
1074 path);
9742bf26 1075 return 0;
c902760f
MT
1076 }
1077
1078 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1079 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1080
1081 return fs.f_bsize;
1082}
1083
04b16653
AW
1084static void *file_ram_alloc(RAMBlock *block,
1085 ram_addr_t memory,
7f56e740
PB
1086 const char *path,
1087 Error **errp)
c902760f
MT
1088{
1089 char *filename;
8ca761f6
PF
1090 char *sanitized_name;
1091 char *c;
557529dd 1092 void *area = NULL;
c902760f 1093 int fd;
557529dd 1094 uint64_t hpagesize;
fc7a5800 1095 Error *local_err = NULL;
c902760f 1096
fc7a5800
HT
1097 hpagesize = gethugepagesize(path, &local_err);
1098 if (local_err) {
1099 error_propagate(errp, local_err);
f9a49dfa 1100 goto error;
c902760f 1101 }
a2b257d6 1102 block->mr->align = hpagesize;
c902760f
MT
1103
1104 if (memory < hpagesize) {
557529dd
HT
1105 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1106 "or larger than huge page size 0x%" PRIx64,
1107 memory, hpagesize);
1108 goto error;
c902760f
MT
1109 }
1110
1111 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1112 error_setg(errp,
1113 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1114 goto error;
c902760f
MT
1115 }
1116
8ca761f6 1117 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1118 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1119 for (c = sanitized_name; *c != '\0'; c++) {
1120 if (*c == '/')
1121 *c = '_';
1122 }
1123
1124 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1125 sanitized_name);
1126 g_free(sanitized_name);
c902760f
MT
1127
1128 fd = mkstemp(filename);
1129 if (fd < 0) {
7f56e740
PB
1130 error_setg_errno(errp, errno,
1131 "unable to create backing store for hugepages");
e4ada482 1132 g_free(filename);
f9a49dfa 1133 goto error;
c902760f
MT
1134 }
1135 unlink(filename);
e4ada482 1136 g_free(filename);
c902760f
MT
1137
1138 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1139
1140 /*
1141 * ftruncate is not supported by hugetlbfs in older
1142 * hosts, so don't bother bailing out on errors.
1143 * If anything goes wrong with it under other filesystems,
1144 * mmap will fail.
1145 */
7f56e740 1146 if (ftruncate(fd, memory)) {
9742bf26 1147 perror("ftruncate");
7f56e740 1148 }
c902760f 1149
dbcb8981
PB
1150 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1151 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1152 fd, 0);
c902760f 1153 if (area == MAP_FAILED) {
7f56e740
PB
1154 error_setg_errno(errp, errno,
1155 "unable to map backing store for hugepages");
9742bf26 1156 close(fd);
f9a49dfa 1157 goto error;
c902760f 1158 }
ef36fa14
MT
1159
1160 if (mem_prealloc) {
38183310 1161 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1162 }
1163
04b16653 1164 block->fd = fd;
c902760f 1165 return area;
f9a49dfa
MT
1166
1167error:
1168 if (mem_prealloc) {
e4d9df4f 1169 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1170 exit(1);
1171 }
1172 return NULL;
c902760f
MT
1173}
1174#endif
1175
d17b5288 1176static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1177{
1178 RAMBlock *block, *next_block;
3e837b2c 1179 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1180
49cd9ac6
SH
1181 assert(size != 0); /* it would hand out same offset multiple times */
1182
a3161038 1183 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1184 return 0;
1185
a3161038 1186 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1187 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1188
1189 end = block->offset + block->length;
1190
a3161038 1191 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1192 if (next_block->offset >= end) {
1193 next = MIN(next, next_block->offset);
1194 }
1195 }
1196 if (next - end >= size && next - end < mingap) {
3e837b2c 1197 offset = end;
04b16653
AW
1198 mingap = next - end;
1199 }
1200 }
3e837b2c
AW
1201
1202 if (offset == RAM_ADDR_MAX) {
1203 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1204 (uint64_t)size);
1205 abort();
1206 }
1207
04b16653
AW
1208 return offset;
1209}
1210
652d7ec2 1211ram_addr_t last_ram_offset(void)
d17b5288
AW
1212{
1213 RAMBlock *block;
1214 ram_addr_t last = 0;
1215
a3161038 1216 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1217 last = MAX(last, block->offset + block->length);
1218
1219 return last;
1220}
1221
ddb97f1d
JB
1222static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1223{
1224 int ret;
ddb97f1d
JB
1225
1226 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1227 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1228 "dump-guest-core", true)) {
ddb97f1d
JB
1229 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1230 if (ret) {
1231 perror("qemu_madvise");
1232 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1233 "but dump_guest_core=off specified\n");
1234 }
1235 }
1236}
1237
20cfe881 1238static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1239{
20cfe881 1240 RAMBlock *block;
84b89d78 1241
a3161038 1242 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1243 if (block->offset == addr) {
20cfe881 1244 return block;
c5705a77
AK
1245 }
1246 }
20cfe881
HT
1247
1248 return NULL;
1249}
1250
1251void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1252{
1253 RAMBlock *new_block = find_ram_block(addr);
1254 RAMBlock *block;
1255
c5705a77
AK
1256 assert(new_block);
1257 assert(!new_block->idstr[0]);
84b89d78 1258
09e5ab63
AL
1259 if (dev) {
1260 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1261 if (id) {
1262 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1263 g_free(id);
84b89d78
CM
1264 }
1265 }
1266 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1267
b2a8658e
UD
1268 /* This assumes the iothread lock is taken here too. */
1269 qemu_mutex_lock_ramlist();
a3161038 1270 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1271 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1272 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1273 new_block->idstr);
1274 abort();
1275 }
1276 }
b2a8658e 1277 qemu_mutex_unlock_ramlist();
c5705a77
AK
1278}
1279
20cfe881
HT
1280void qemu_ram_unset_idstr(ram_addr_t addr)
1281{
1282 RAMBlock *block = find_ram_block(addr);
1283
1284 if (block) {
1285 memset(block->idstr, 0, sizeof(block->idstr));
1286 }
1287}
1288
8490fc78
LC
1289static int memory_try_enable_merging(void *addr, size_t len)
1290{
2ff3de68 1291 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1292 /* disabled by the user */
1293 return 0;
1294 }
1295
1296 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1297}
1298
ef701d7b 1299static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1300{
e1c57ab8 1301 RAMBlock *block;
2152f5ca
JQ
1302 ram_addr_t old_ram_size, new_ram_size;
1303
1304 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1305
b2a8658e
UD
1306 /* This assumes the iothread lock is taken here too. */
1307 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1308 new_block->offset = find_ram_offset(new_block->length);
1309
1310 if (!new_block->host) {
1311 if (xen_enabled()) {
1312 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1313 } else {
a2b257d6
IM
1314 new_block->host = phys_mem_alloc(new_block->length,
1315 &new_block->mr->align);
39228250 1316 if (!new_block->host) {
ef701d7b
HT
1317 error_setg_errno(errp, errno,
1318 "cannot set up guest memory '%s'",
1319 memory_region_name(new_block->mr));
1320 qemu_mutex_unlock_ramlist();
1321 return -1;
39228250 1322 }
e1c57ab8 1323 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1324 }
c902760f 1325 }
94a6b54f 1326
abb26d63
PB
1327 /* Keep the list sorted from biggest to smallest block. */
1328 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1329 if (block->length < new_block->length) {
1330 break;
1331 }
1332 }
1333 if (block) {
1334 QTAILQ_INSERT_BEFORE(block, new_block, next);
1335 } else {
1336 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1337 }
0d6d3c87 1338 ram_list.mru_block = NULL;
94a6b54f 1339
f798b07f 1340 ram_list.version++;
b2a8658e 1341 qemu_mutex_unlock_ramlist();
f798b07f 1342
2152f5ca
JQ
1343 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1344
1345 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1346 int i;
1347 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1348 ram_list.dirty_memory[i] =
1349 bitmap_zero_extend(ram_list.dirty_memory[i],
1350 old_ram_size, new_ram_size);
1351 }
2152f5ca 1352 }
e1c57ab8 1353 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1354
e1c57ab8
PB
1355 qemu_ram_setup_dump(new_block->host, new_block->length);
1356 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1357 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1358
e1c57ab8
PB
1359 if (kvm_enabled()) {
1360 kvm_setup_guest_memory(new_block->host, new_block->length);
1361 }
6f0437e8 1362
94a6b54f
PB
1363 return new_block->offset;
1364}
e9a1ab19 1365
0b183fc8 1366#ifdef __linux__
e1c57ab8 1367ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1368 bool share, const char *mem_path,
7f56e740 1369 Error **errp)
e1c57ab8
PB
1370{
1371 RAMBlock *new_block;
ef701d7b
HT
1372 ram_addr_t addr;
1373 Error *local_err = NULL;
e1c57ab8
PB
1374
1375 if (xen_enabled()) {
7f56e740
PB
1376 error_setg(errp, "-mem-path not supported with Xen");
1377 return -1;
e1c57ab8
PB
1378 }
1379
1380 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1381 /*
1382 * file_ram_alloc() needs to allocate just like
1383 * phys_mem_alloc, but we haven't bothered to provide
1384 * a hook there.
1385 */
7f56e740
PB
1386 error_setg(errp,
1387 "-mem-path not supported with this accelerator");
1388 return -1;
e1c57ab8
PB
1389 }
1390
1391 size = TARGET_PAGE_ALIGN(size);
1392 new_block = g_malloc0(sizeof(*new_block));
1393 new_block->mr = mr;
1394 new_block->length = size;
dbcb8981 1395 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1396 new_block->host = file_ram_alloc(new_block, size,
1397 mem_path, errp);
1398 if (!new_block->host) {
1399 g_free(new_block);
1400 return -1;
1401 }
1402
ef701d7b
HT
1403 addr = ram_block_add(new_block, &local_err);
1404 if (local_err) {
1405 g_free(new_block);
1406 error_propagate(errp, local_err);
1407 return -1;
1408 }
1409 return addr;
e1c57ab8 1410}
0b183fc8 1411#endif
e1c57ab8
PB
1412
1413ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ef701d7b 1414 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1415{
1416 RAMBlock *new_block;
ef701d7b
HT
1417 ram_addr_t addr;
1418 Error *local_err = NULL;
e1c57ab8
PB
1419
1420 size = TARGET_PAGE_ALIGN(size);
1421 new_block = g_malloc0(sizeof(*new_block));
1422 new_block->mr = mr;
1423 new_block->length = size;
1424 new_block->fd = -1;
1425 new_block->host = host;
1426 if (host) {
7bd4f430 1427 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1428 }
ef701d7b
HT
1429 addr = ram_block_add(new_block, &local_err);
1430 if (local_err) {
1431 g_free(new_block);
1432 error_propagate(errp, local_err);
1433 return -1;
1434 }
1435 return addr;
e1c57ab8
PB
1436}
1437
ef701d7b 1438ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1439{
ef701d7b 1440 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
6977dfe6
YT
1441}
1442
1f2e98b6
AW
1443void qemu_ram_free_from_ptr(ram_addr_t addr)
1444{
1445 RAMBlock *block;
1446
b2a8658e
UD
1447 /* This assumes the iothread lock is taken here too. */
1448 qemu_mutex_lock_ramlist();
a3161038 1449 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1450 if (addr == block->offset) {
a3161038 1451 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1452 ram_list.mru_block = NULL;
f798b07f 1453 ram_list.version++;
7267c094 1454 g_free(block);
b2a8658e 1455 break;
1f2e98b6
AW
1456 }
1457 }
b2a8658e 1458 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1459}
1460
c227f099 1461void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1462{
04b16653
AW
1463 RAMBlock *block;
1464
b2a8658e
UD
1465 /* This assumes the iothread lock is taken here too. */
1466 qemu_mutex_lock_ramlist();
a3161038 1467 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1468 if (addr == block->offset) {
a3161038 1469 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1470 ram_list.mru_block = NULL;
f798b07f 1471 ram_list.version++;
7bd4f430 1472 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1473 ;
dfeaf2ab
MA
1474 } else if (xen_enabled()) {
1475 xen_invalidate_map_cache_entry(block->host);
089f3f76 1476#ifndef _WIN32
3435f395
MA
1477 } else if (block->fd >= 0) {
1478 munmap(block->host, block->length);
1479 close(block->fd);
089f3f76 1480#endif
04b16653 1481 } else {
dfeaf2ab 1482 qemu_anon_ram_free(block->host, block->length);
04b16653 1483 }
7267c094 1484 g_free(block);
b2a8658e 1485 break;
04b16653
AW
1486 }
1487 }
b2a8658e 1488 qemu_mutex_unlock_ramlist();
04b16653 1489
e9a1ab19
FB
1490}
1491
cd19cfa2
HY
1492#ifndef _WIN32
1493void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1494{
1495 RAMBlock *block;
1496 ram_addr_t offset;
1497 int flags;
1498 void *area, *vaddr;
1499
a3161038 1500 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1501 offset = addr - block->offset;
1502 if (offset < block->length) {
1240be24 1503 vaddr = ramblock_ptr(block, offset);
7bd4f430 1504 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1505 ;
dfeaf2ab
MA
1506 } else if (xen_enabled()) {
1507 abort();
cd19cfa2
HY
1508 } else {
1509 flags = MAP_FIXED;
1510 munmap(vaddr, length);
3435f395 1511 if (block->fd >= 0) {
dbcb8981
PB
1512 flags |= (block->flags & RAM_SHARED ?
1513 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1514 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1515 flags, block->fd, offset);
cd19cfa2 1516 } else {
2eb9fbaa
MA
1517 /*
1518 * Remap needs to match alloc. Accelerators that
1519 * set phys_mem_alloc never remap. If they did,
1520 * we'd need a remap hook here.
1521 */
1522 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1523
cd19cfa2
HY
1524 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1525 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1526 flags, -1, 0);
cd19cfa2
HY
1527 }
1528 if (area != vaddr) {
f15fbc4b
AP
1529 fprintf(stderr, "Could not remap addr: "
1530 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1531 length, addr);
1532 exit(1);
1533 }
8490fc78 1534 memory_try_enable_merging(vaddr, length);
ddb97f1d 1535 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1536 }
1537 return;
1538 }
1539 }
1540}
1541#endif /* !_WIN32 */
1542
a35ba7be
PB
1543int qemu_get_ram_fd(ram_addr_t addr)
1544{
1545 RAMBlock *block = qemu_get_ram_block(addr);
1546
1547 return block->fd;
1548}
1549
3fd74b84
DM
1550void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1551{
1552 RAMBlock *block = qemu_get_ram_block(addr);
1553
1240be24 1554 return ramblock_ptr(block, 0);
3fd74b84
DM
1555}
1556
1b5ec234
PB
1557/* Return a host pointer to ram allocated with qemu_ram_alloc.
1558 With the exception of the softmmu code in this file, this should
1559 only be used for local memory (e.g. video ram) that the device owns,
1560 and knows it isn't going to access beyond the end of the block.
1561
1562 It should not be used for general purpose DMA.
1563 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1564 */
1565void *qemu_get_ram_ptr(ram_addr_t addr)
1566{
1567 RAMBlock *block = qemu_get_ram_block(addr);
1568
0d6d3c87
PB
1569 if (xen_enabled()) {
1570 /* We need to check if the requested address is in the RAM
1571 * because we don't want to map the entire memory in QEMU.
1572 * In that case just map until the end of the page.
1573 */
1574 if (block->offset == 0) {
1575 return xen_map_cache(addr, 0, 0);
1576 } else if (block->host == NULL) {
1577 block->host =
1578 xen_map_cache(block->offset, block->length, 1);
1579 }
1580 }
1240be24 1581 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1582}
1583
38bee5dc
SS
1584/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1585 * but takes a size argument */
cb85f7ab 1586static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1587{
8ab934f9
SS
1588 if (*size == 0) {
1589 return NULL;
1590 }
868bb33f 1591 if (xen_enabled()) {
e41d7c69 1592 return xen_map_cache(addr, *size, 1);
868bb33f 1593 } else {
38bee5dc
SS
1594 RAMBlock *block;
1595
a3161038 1596 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1597 if (addr - block->offset < block->length) {
1598 if (addr - block->offset + *size > block->length)
1599 *size = block->length - addr + block->offset;
1240be24 1600 return ramblock_ptr(block, addr - block->offset);
38bee5dc
SS
1601 }
1602 }
1603
1604 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1605 abort();
38bee5dc
SS
1606 }
1607}
1608
7443b437
PB
1609/* Some of the softmmu routines need to translate from a host pointer
1610 (typically a TLB entry) back to a ram offset. */
1b5ec234 1611MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1612{
94a6b54f
PB
1613 RAMBlock *block;
1614 uint8_t *host = ptr;
1615
868bb33f 1616 if (xen_enabled()) {
e41d7c69 1617 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1618 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1619 }
1620
23887b79
PB
1621 block = ram_list.mru_block;
1622 if (block && block->host && host - block->host < block->length) {
1623 goto found;
1624 }
1625
a3161038 1626 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1627 /* This case append when the block is not mapped. */
1628 if (block->host == NULL) {
1629 continue;
1630 }
f471a17e 1631 if (host - block->host < block->length) {
23887b79 1632 goto found;
f471a17e 1633 }
94a6b54f 1634 }
432d268c 1635
1b5ec234 1636 return NULL;
23887b79
PB
1637
1638found:
1639 *ram_addr = block->offset + (host - block->host);
1b5ec234 1640 return block->mr;
e890261f 1641}
f471a17e 1642
a8170e5e 1643static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1644 uint64_t val, unsigned size)
9fa3e853 1645{
52159192 1646 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1647 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1648 }
0e0df1e2
AK
1649 switch (size) {
1650 case 1:
1651 stb_p(qemu_get_ram_ptr(ram_addr), val);
1652 break;
1653 case 2:
1654 stw_p(qemu_get_ram_ptr(ram_addr), val);
1655 break;
1656 case 4:
1657 stl_p(qemu_get_ram_ptr(ram_addr), val);
1658 break;
1659 default:
1660 abort();
3a7d929e 1661 }
6886867e 1662 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1663 /* we remove the notdirty callback only if the code has been
1664 flushed */
a2cd8c85 1665 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1666 CPUArchState *env = current_cpu->env_ptr;
93afeade 1667 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1668 }
9fa3e853
FB
1669}
1670
b018ddf6
PB
1671static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1672 unsigned size, bool is_write)
1673{
1674 return is_write;
1675}
1676
0e0df1e2 1677static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1678 .write = notdirty_mem_write,
b018ddf6 1679 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1680 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1681};
1682
0f459d16 1683/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1684static void check_watchpoint(int offset, int len, int flags)
0f459d16 1685{
93afeade
AF
1686 CPUState *cpu = current_cpu;
1687 CPUArchState *env = cpu->env_ptr;
06d55cc1 1688 target_ulong pc, cs_base;
0f459d16 1689 target_ulong vaddr;
a1d1bb31 1690 CPUWatchpoint *wp;
06d55cc1 1691 int cpu_flags;
0f459d16 1692
ff4700b0 1693 if (cpu->watchpoint_hit) {
06d55cc1
AL
1694 /* We re-entered the check after replacing the TB. Now raise
1695 * the debug interrupt so that is will trigger after the
1696 * current instruction. */
93afeade 1697 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1698 return;
1699 }
93afeade 1700 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1701 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1702 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1703 && (wp->flags & flags)) {
08225676
PM
1704 if (flags == BP_MEM_READ) {
1705 wp->flags |= BP_WATCHPOINT_HIT_READ;
1706 } else {
1707 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1708 }
1709 wp->hitaddr = vaddr;
ff4700b0
AF
1710 if (!cpu->watchpoint_hit) {
1711 cpu->watchpoint_hit = wp;
239c51a5 1712 tb_check_watchpoint(cpu);
6e140f28 1713 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1714 cpu->exception_index = EXCP_DEBUG;
5638d180 1715 cpu_loop_exit(cpu);
6e140f28
AL
1716 } else {
1717 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1718 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1719 cpu_resume_from_signal(cpu, NULL);
6e140f28 1720 }
06d55cc1 1721 }
6e140f28
AL
1722 } else {
1723 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1724 }
1725 }
1726}
1727
6658ffb8
PB
1728/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1729 so these check for a hit then pass through to the normal out-of-line
1730 phys routines. */
a8170e5e 1731static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1732 unsigned size)
6658ffb8 1733{
05068c0d 1734 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1735 switch (size) {
2c17449b 1736 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1737 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1738 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1739 default: abort();
1740 }
6658ffb8
PB
1741}
1742
a8170e5e 1743static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1744 uint64_t val, unsigned size)
6658ffb8 1745{
05068c0d 1746 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1747 switch (size) {
67364150 1748 case 1:
db3be60d 1749 stb_phys(&address_space_memory, addr, val);
67364150
MF
1750 break;
1751 case 2:
5ce5944d 1752 stw_phys(&address_space_memory, addr, val);
67364150
MF
1753 break;
1754 case 4:
ab1da857 1755 stl_phys(&address_space_memory, addr, val);
67364150 1756 break;
1ec9b909
AK
1757 default: abort();
1758 }
6658ffb8
PB
1759}
1760
1ec9b909
AK
1761static const MemoryRegionOps watch_mem_ops = {
1762 .read = watch_mem_read,
1763 .write = watch_mem_write,
1764 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1765};
6658ffb8 1766
a8170e5e 1767static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1768 unsigned len)
db7b5426 1769{
acc9d80b
JK
1770 subpage_t *subpage = opaque;
1771 uint8_t buf[4];
791af8c8 1772
db7b5426 1773#if defined(DEBUG_SUBPAGE)
016e9d62 1774 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1775 subpage, len, addr);
db7b5426 1776#endif
acc9d80b
JK
1777 address_space_read(subpage->as, addr + subpage->base, buf, len);
1778 switch (len) {
1779 case 1:
1780 return ldub_p(buf);
1781 case 2:
1782 return lduw_p(buf);
1783 case 4:
1784 return ldl_p(buf);
1785 default:
1786 abort();
1787 }
db7b5426
BS
1788}
1789
a8170e5e 1790static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1791 uint64_t value, unsigned len)
db7b5426 1792{
acc9d80b
JK
1793 subpage_t *subpage = opaque;
1794 uint8_t buf[4];
1795
db7b5426 1796#if defined(DEBUG_SUBPAGE)
016e9d62 1797 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1798 " value %"PRIx64"\n",
1799 __func__, subpage, len, addr, value);
db7b5426 1800#endif
acc9d80b
JK
1801 switch (len) {
1802 case 1:
1803 stb_p(buf, value);
1804 break;
1805 case 2:
1806 stw_p(buf, value);
1807 break;
1808 case 4:
1809 stl_p(buf, value);
1810 break;
1811 default:
1812 abort();
1813 }
1814 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1815}
1816
c353e4cc 1817static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1818 unsigned len, bool is_write)
c353e4cc 1819{
acc9d80b 1820 subpage_t *subpage = opaque;
c353e4cc 1821#if defined(DEBUG_SUBPAGE)
016e9d62 1822 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1823 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1824#endif
1825
acc9d80b 1826 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1827 len, is_write);
c353e4cc
PB
1828}
1829
70c68e44
AK
1830static const MemoryRegionOps subpage_ops = {
1831 .read = subpage_read,
1832 .write = subpage_write,
c353e4cc 1833 .valid.accepts = subpage_accepts,
70c68e44 1834 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1835};
1836
c227f099 1837static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1838 uint16_t section)
db7b5426
BS
1839{
1840 int idx, eidx;
1841
1842 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1843 return -1;
1844 idx = SUBPAGE_IDX(start);
1845 eidx = SUBPAGE_IDX(end);
1846#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1847 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1848 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1849#endif
db7b5426 1850 for (; idx <= eidx; idx++) {
5312bd8b 1851 mmio->sub_section[idx] = section;
db7b5426
BS
1852 }
1853
1854 return 0;
1855}
1856
acc9d80b 1857static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1858{
c227f099 1859 subpage_t *mmio;
db7b5426 1860
7267c094 1861 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1862
acc9d80b 1863 mmio->as = as;
1eec614b 1864 mmio->base = base;
2c9b15ca 1865 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1866 NULL, TARGET_PAGE_SIZE);
b3b00c78 1867 mmio->iomem.subpage = true;
db7b5426 1868#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1869 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1870 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1871#endif
b41aac4f 1872 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1873
1874 return mmio;
1875}
1876
a656e22f
PC
1877static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1878 MemoryRegion *mr)
5312bd8b 1879{
a656e22f 1880 assert(as);
5312bd8b 1881 MemoryRegionSection section = {
a656e22f 1882 .address_space = as,
5312bd8b
AK
1883 .mr = mr,
1884 .offset_within_address_space = 0,
1885 .offset_within_region = 0,
052e87b0 1886 .size = int128_2_64(),
5312bd8b
AK
1887 };
1888
53cb28cb 1889 return phys_section_add(map, &section);
5312bd8b
AK
1890}
1891
77717094 1892MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1893{
77717094 1894 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1895}
1896
e9179ce1
AK
1897static void io_mem_init(void)
1898{
1f6245e5 1899 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1900 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1901 NULL, UINT64_MAX);
2c9b15ca 1902 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1903 NULL, UINT64_MAX);
2c9b15ca 1904 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1905 NULL, UINT64_MAX);
e9179ce1
AK
1906}
1907
ac1970fb 1908static void mem_begin(MemoryListener *listener)
00752703
PB
1909{
1910 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1911 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1912 uint16_t n;
1913
a656e22f 1914 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1915 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1916 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1917 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1918 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1919 assert(n == PHYS_SECTION_ROM);
a656e22f 1920 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1921 assert(n == PHYS_SECTION_WATCH);
00752703 1922
9736e55b 1923 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1924 d->as = as;
1925 as->next_dispatch = d;
1926}
1927
1928static void mem_commit(MemoryListener *listener)
ac1970fb 1929{
89ae337a 1930 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1931 AddressSpaceDispatch *cur = as->dispatch;
1932 AddressSpaceDispatch *next = as->next_dispatch;
1933
53cb28cb 1934 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1935
0475d94f 1936 as->dispatch = next;
b41aac4f 1937
53cb28cb
MA
1938 if (cur) {
1939 phys_sections_free(&cur->map);
1940 g_free(cur);
1941 }
9affd6fc
PB
1942}
1943
1d71148e 1944static void tcg_commit(MemoryListener *listener)
50c1e149 1945{
182735ef 1946 CPUState *cpu;
117712c3
AK
1947
1948 /* since each CPU stores ram addresses in its TLB cache, we must
1949 reset the modified entries */
1950 /* XXX: slow ! */
bdc44640 1951 CPU_FOREACH(cpu) {
33bde2e1
EI
1952 /* FIXME: Disentangle the cpu.h circular files deps so we can
1953 directly get the right CPU from listener. */
1954 if (cpu->tcg_as_listener != listener) {
1955 continue;
1956 }
00c8cb0a 1957 tlb_flush(cpu, 1);
117712c3 1958 }
50c1e149
AK
1959}
1960
93632747
AK
1961static void core_log_global_start(MemoryListener *listener)
1962{
981fdf23 1963 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1964}
1965
1966static void core_log_global_stop(MemoryListener *listener)
1967{
981fdf23 1968 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1969}
1970
93632747 1971static MemoryListener core_memory_listener = {
93632747
AK
1972 .log_global_start = core_log_global_start,
1973 .log_global_stop = core_log_global_stop,
ac1970fb 1974 .priority = 1,
93632747
AK
1975};
1976
ac1970fb
AK
1977void address_space_init_dispatch(AddressSpace *as)
1978{
00752703 1979 as->dispatch = NULL;
89ae337a 1980 as->dispatch_listener = (MemoryListener) {
ac1970fb 1981 .begin = mem_begin,
00752703 1982 .commit = mem_commit,
ac1970fb
AK
1983 .region_add = mem_add,
1984 .region_nop = mem_add,
1985 .priority = 0,
1986 };
89ae337a 1987 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1988}
1989
83f3c251
AK
1990void address_space_destroy_dispatch(AddressSpace *as)
1991{
1992 AddressSpaceDispatch *d = as->dispatch;
1993
89ae337a 1994 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1995 g_free(d);
1996 as->dispatch = NULL;
1997}
1998
62152b8a
AK
1999static void memory_map_init(void)
2000{
7267c094 2001 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2002
57271d63 2003 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2004 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2005
7267c094 2006 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2007 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2008 65536);
7dca8043 2009 address_space_init(&address_space_io, system_io, "I/O");
93632747 2010
f6790af6 2011 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2012}
2013
2014MemoryRegion *get_system_memory(void)
2015{
2016 return system_memory;
2017}
2018
309cb471
AK
2019MemoryRegion *get_system_io(void)
2020{
2021 return system_io;
2022}
2023
e2eef170
PB
2024#endif /* !defined(CONFIG_USER_ONLY) */
2025
13eb76e0
FB
2026/* physical memory access (slow version, mainly for debug) */
2027#if defined(CONFIG_USER_ONLY)
f17ec444 2028int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2029 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2030{
2031 int l, flags;
2032 target_ulong page;
53a5960a 2033 void * p;
13eb76e0
FB
2034
2035 while (len > 0) {
2036 page = addr & TARGET_PAGE_MASK;
2037 l = (page + TARGET_PAGE_SIZE) - addr;
2038 if (l > len)
2039 l = len;
2040 flags = page_get_flags(page);
2041 if (!(flags & PAGE_VALID))
a68fe89c 2042 return -1;
13eb76e0
FB
2043 if (is_write) {
2044 if (!(flags & PAGE_WRITE))
a68fe89c 2045 return -1;
579a97f7 2046 /* XXX: this code should not depend on lock_user */
72fb7daa 2047 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2048 return -1;
72fb7daa
AJ
2049 memcpy(p, buf, l);
2050 unlock_user(p, addr, l);
13eb76e0
FB
2051 } else {
2052 if (!(flags & PAGE_READ))
a68fe89c 2053 return -1;
579a97f7 2054 /* XXX: this code should not depend on lock_user */
72fb7daa 2055 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2056 return -1;
72fb7daa 2057 memcpy(buf, p, l);
5b257578 2058 unlock_user(p, addr, 0);
13eb76e0
FB
2059 }
2060 len -= l;
2061 buf += l;
2062 addr += l;
2063 }
a68fe89c 2064 return 0;
13eb76e0 2065}
8df1cd07 2066
13eb76e0 2067#else
51d7a9eb 2068
a8170e5e
AK
2069static void invalidate_and_set_dirty(hwaddr addr,
2070 hwaddr length)
51d7a9eb 2071{
f874bf90
PM
2072 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2073 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2074 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2075 }
e226939d 2076 xen_modified_memory(addr, length);
51d7a9eb
AP
2077}
2078
23326164 2079static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2080{
e1622f4b 2081 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2082
2083 /* Regions are assumed to support 1-4 byte accesses unless
2084 otherwise specified. */
23326164
RH
2085 if (access_size_max == 0) {
2086 access_size_max = 4;
2087 }
2088
2089 /* Bound the maximum access by the alignment of the address. */
2090 if (!mr->ops->impl.unaligned) {
2091 unsigned align_size_max = addr & -addr;
2092 if (align_size_max != 0 && align_size_max < access_size_max) {
2093 access_size_max = align_size_max;
2094 }
82f2563f 2095 }
23326164
RH
2096
2097 /* Don't attempt accesses larger than the maximum. */
2098 if (l > access_size_max) {
2099 l = access_size_max;
82f2563f 2100 }
098178f2
PB
2101 if (l & (l - 1)) {
2102 l = 1 << (qemu_fls(l) - 1);
2103 }
23326164
RH
2104
2105 return l;
82f2563f
PB
2106}
2107
fd8aaa76 2108bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2109 int len, bool is_write)
13eb76e0 2110{
149f54b5 2111 hwaddr l;
13eb76e0 2112 uint8_t *ptr;
791af8c8 2113 uint64_t val;
149f54b5 2114 hwaddr addr1;
5c8a00ce 2115 MemoryRegion *mr;
fd8aaa76 2116 bool error = false;
3b46e624 2117
13eb76e0 2118 while (len > 0) {
149f54b5 2119 l = len;
5c8a00ce 2120 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2121
13eb76e0 2122 if (is_write) {
5c8a00ce
PB
2123 if (!memory_access_is_direct(mr, is_write)) {
2124 l = memory_access_size(mr, l, addr1);
4917cf44 2125 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2126 potential bugs */
23326164
RH
2127 switch (l) {
2128 case 8:
2129 /* 64 bit write access */
2130 val = ldq_p(buf);
2131 error |= io_mem_write(mr, addr1, val, 8);
2132 break;
2133 case 4:
1c213d19 2134 /* 32 bit write access */
c27004ec 2135 val = ldl_p(buf);
5c8a00ce 2136 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2137 break;
2138 case 2:
1c213d19 2139 /* 16 bit write access */
c27004ec 2140 val = lduw_p(buf);
5c8a00ce 2141 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2142 break;
2143 case 1:
1c213d19 2144 /* 8 bit write access */
c27004ec 2145 val = ldub_p(buf);
5c8a00ce 2146 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2147 break;
2148 default:
2149 abort();
13eb76e0 2150 }
2bbfa05d 2151 } else {
5c8a00ce 2152 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2153 /* RAM case */
5579c7f3 2154 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2155 memcpy(ptr, buf, l);
51d7a9eb 2156 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2157 }
2158 } else {
5c8a00ce 2159 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2160 /* I/O case */
5c8a00ce 2161 l = memory_access_size(mr, l, addr1);
23326164
RH
2162 switch (l) {
2163 case 8:
2164 /* 64 bit read access */
2165 error |= io_mem_read(mr, addr1, &val, 8);
2166 stq_p(buf, val);
2167 break;
2168 case 4:
13eb76e0 2169 /* 32 bit read access */
5c8a00ce 2170 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2171 stl_p(buf, val);
23326164
RH
2172 break;
2173 case 2:
13eb76e0 2174 /* 16 bit read access */
5c8a00ce 2175 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2176 stw_p(buf, val);
23326164
RH
2177 break;
2178 case 1:
1c213d19 2179 /* 8 bit read access */
5c8a00ce 2180 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2181 stb_p(buf, val);
23326164
RH
2182 break;
2183 default:
2184 abort();
13eb76e0
FB
2185 }
2186 } else {
2187 /* RAM case */
5c8a00ce 2188 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2189 memcpy(buf, ptr, l);
13eb76e0
FB
2190 }
2191 }
2192 len -= l;
2193 buf += l;
2194 addr += l;
2195 }
fd8aaa76
PB
2196
2197 return error;
13eb76e0 2198}
8df1cd07 2199
fd8aaa76 2200bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2201 const uint8_t *buf, int len)
2202{
fd8aaa76 2203 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2204}
2205
fd8aaa76 2206bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2207{
fd8aaa76 2208 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2209}
2210
2211
a8170e5e 2212void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2213 int len, int is_write)
2214{
fd8aaa76 2215 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2216}
2217
582b55a9
AG
2218enum write_rom_type {
2219 WRITE_DATA,
2220 FLUSH_CACHE,
2221};
2222
2a221651 2223static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2224 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2225{
149f54b5 2226 hwaddr l;
d0ecd2aa 2227 uint8_t *ptr;
149f54b5 2228 hwaddr addr1;
5c8a00ce 2229 MemoryRegion *mr;
3b46e624 2230
d0ecd2aa 2231 while (len > 0) {
149f54b5 2232 l = len;
2a221651 2233 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2234
5c8a00ce
PB
2235 if (!(memory_region_is_ram(mr) ||
2236 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2237 /* do nothing */
2238 } else {
5c8a00ce 2239 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2240 /* ROM/RAM case */
5579c7f3 2241 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2242 switch (type) {
2243 case WRITE_DATA:
2244 memcpy(ptr, buf, l);
2245 invalidate_and_set_dirty(addr1, l);
2246 break;
2247 case FLUSH_CACHE:
2248 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2249 break;
2250 }
d0ecd2aa
FB
2251 }
2252 len -= l;
2253 buf += l;
2254 addr += l;
2255 }
2256}
2257
582b55a9 2258/* used for ROM loading : can write in RAM and ROM */
2a221651 2259void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2260 const uint8_t *buf, int len)
2261{
2a221651 2262 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2263}
2264
2265void cpu_flush_icache_range(hwaddr start, int len)
2266{
2267 /*
2268 * This function should do the same thing as an icache flush that was
2269 * triggered from within the guest. For TCG we are always cache coherent,
2270 * so there is no need to flush anything. For KVM / Xen we need to flush
2271 * the host's instruction cache at least.
2272 */
2273 if (tcg_enabled()) {
2274 return;
2275 }
2276
2a221651
EI
2277 cpu_physical_memory_write_rom_internal(&address_space_memory,
2278 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2279}
2280
6d16c2f8 2281typedef struct {
d3e71559 2282 MemoryRegion *mr;
6d16c2f8 2283 void *buffer;
a8170e5e
AK
2284 hwaddr addr;
2285 hwaddr len;
6d16c2f8
AL
2286} BounceBuffer;
2287
2288static BounceBuffer bounce;
2289
ba223c29
AL
2290typedef struct MapClient {
2291 void *opaque;
2292 void (*callback)(void *opaque);
72cf2d4f 2293 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2294} MapClient;
2295
72cf2d4f
BS
2296static QLIST_HEAD(map_client_list, MapClient) map_client_list
2297 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2298
2299void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2300{
7267c094 2301 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2302
2303 client->opaque = opaque;
2304 client->callback = callback;
72cf2d4f 2305 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2306 return client;
2307}
2308
8b9c99d9 2309static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2310{
2311 MapClient *client = (MapClient *)_client;
2312
72cf2d4f 2313 QLIST_REMOVE(client, link);
7267c094 2314 g_free(client);
ba223c29
AL
2315}
2316
2317static void cpu_notify_map_clients(void)
2318{
2319 MapClient *client;
2320
72cf2d4f
BS
2321 while (!QLIST_EMPTY(&map_client_list)) {
2322 client = QLIST_FIRST(&map_client_list);
ba223c29 2323 client->callback(client->opaque);
34d5e948 2324 cpu_unregister_map_client(client);
ba223c29
AL
2325 }
2326}
2327
51644ab7
PB
2328bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2329{
5c8a00ce 2330 MemoryRegion *mr;
51644ab7
PB
2331 hwaddr l, xlat;
2332
2333 while (len > 0) {
2334 l = len;
5c8a00ce
PB
2335 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2336 if (!memory_access_is_direct(mr, is_write)) {
2337 l = memory_access_size(mr, l, addr);
2338 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2339 return false;
2340 }
2341 }
2342
2343 len -= l;
2344 addr += l;
2345 }
2346 return true;
2347}
2348
6d16c2f8
AL
2349/* Map a physical memory region into a host virtual address.
2350 * May map a subset of the requested range, given by and returned in *plen.
2351 * May return NULL if resources needed to perform the mapping are exhausted.
2352 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2353 * Use cpu_register_map_client() to know when retrying the map operation is
2354 * likely to succeed.
6d16c2f8 2355 */
ac1970fb 2356void *address_space_map(AddressSpace *as,
a8170e5e
AK
2357 hwaddr addr,
2358 hwaddr *plen,
ac1970fb 2359 bool is_write)
6d16c2f8 2360{
a8170e5e 2361 hwaddr len = *plen;
e3127ae0
PB
2362 hwaddr done = 0;
2363 hwaddr l, xlat, base;
2364 MemoryRegion *mr, *this_mr;
2365 ram_addr_t raddr;
6d16c2f8 2366
e3127ae0
PB
2367 if (len == 0) {
2368 return NULL;
2369 }
38bee5dc 2370
e3127ae0
PB
2371 l = len;
2372 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2373 if (!memory_access_is_direct(mr, is_write)) {
2374 if (bounce.buffer) {
2375 return NULL;
6d16c2f8 2376 }
e85d9db5
KW
2377 /* Avoid unbounded allocations */
2378 l = MIN(l, TARGET_PAGE_SIZE);
2379 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2380 bounce.addr = addr;
2381 bounce.len = l;
d3e71559
PB
2382
2383 memory_region_ref(mr);
2384 bounce.mr = mr;
e3127ae0
PB
2385 if (!is_write) {
2386 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2387 }
6d16c2f8 2388
e3127ae0
PB
2389 *plen = l;
2390 return bounce.buffer;
2391 }
2392
2393 base = xlat;
2394 raddr = memory_region_get_ram_addr(mr);
2395
2396 for (;;) {
6d16c2f8
AL
2397 len -= l;
2398 addr += l;
e3127ae0
PB
2399 done += l;
2400 if (len == 0) {
2401 break;
2402 }
2403
2404 l = len;
2405 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2406 if (this_mr != mr || xlat != base + done) {
2407 break;
2408 }
6d16c2f8 2409 }
e3127ae0 2410
d3e71559 2411 memory_region_ref(mr);
e3127ae0
PB
2412 *plen = done;
2413 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2414}
2415
ac1970fb 2416/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2417 * Will also mark the memory as dirty if is_write == 1. access_len gives
2418 * the amount of memory that was actually read or written by the caller.
2419 */
a8170e5e
AK
2420void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2421 int is_write, hwaddr access_len)
6d16c2f8
AL
2422{
2423 if (buffer != bounce.buffer) {
d3e71559
PB
2424 MemoryRegion *mr;
2425 ram_addr_t addr1;
2426
2427 mr = qemu_ram_addr_from_host(buffer, &addr1);
2428 assert(mr != NULL);
6d16c2f8 2429 if (is_write) {
6886867e 2430 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2431 }
868bb33f 2432 if (xen_enabled()) {
e41d7c69 2433 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2434 }
d3e71559 2435 memory_region_unref(mr);
6d16c2f8
AL
2436 return;
2437 }
2438 if (is_write) {
ac1970fb 2439 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2440 }
f8a83245 2441 qemu_vfree(bounce.buffer);
6d16c2f8 2442 bounce.buffer = NULL;
d3e71559 2443 memory_region_unref(bounce.mr);
ba223c29 2444 cpu_notify_map_clients();
6d16c2f8 2445}
d0ecd2aa 2446
a8170e5e
AK
2447void *cpu_physical_memory_map(hwaddr addr,
2448 hwaddr *plen,
ac1970fb
AK
2449 int is_write)
2450{
2451 return address_space_map(&address_space_memory, addr, plen, is_write);
2452}
2453
a8170e5e
AK
2454void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2455 int is_write, hwaddr access_len)
ac1970fb
AK
2456{
2457 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2458}
2459
8df1cd07 2460/* warning: addr must be aligned */
fdfba1a2 2461static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2462 enum device_endian endian)
8df1cd07 2463{
8df1cd07 2464 uint8_t *ptr;
791af8c8 2465 uint64_t val;
5c8a00ce 2466 MemoryRegion *mr;
149f54b5
PB
2467 hwaddr l = 4;
2468 hwaddr addr1;
8df1cd07 2469
fdfba1a2 2470 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2471 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2472 /* I/O case */
5c8a00ce 2473 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2474#if defined(TARGET_WORDS_BIGENDIAN)
2475 if (endian == DEVICE_LITTLE_ENDIAN) {
2476 val = bswap32(val);
2477 }
2478#else
2479 if (endian == DEVICE_BIG_ENDIAN) {
2480 val = bswap32(val);
2481 }
2482#endif
8df1cd07
FB
2483 } else {
2484 /* RAM case */
5c8a00ce 2485 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2486 & TARGET_PAGE_MASK)
149f54b5 2487 + addr1);
1e78bcc1
AG
2488 switch (endian) {
2489 case DEVICE_LITTLE_ENDIAN:
2490 val = ldl_le_p(ptr);
2491 break;
2492 case DEVICE_BIG_ENDIAN:
2493 val = ldl_be_p(ptr);
2494 break;
2495 default:
2496 val = ldl_p(ptr);
2497 break;
2498 }
8df1cd07
FB
2499 }
2500 return val;
2501}
2502
fdfba1a2 2503uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2504{
fdfba1a2 2505 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2506}
2507
fdfba1a2 2508uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2509{
fdfba1a2 2510 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2511}
2512
fdfba1a2 2513uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2514{
fdfba1a2 2515 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2516}
2517
84b7b8e7 2518/* warning: addr must be aligned */
2c17449b 2519static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2520 enum device_endian endian)
84b7b8e7 2521{
84b7b8e7
FB
2522 uint8_t *ptr;
2523 uint64_t val;
5c8a00ce 2524 MemoryRegion *mr;
149f54b5
PB
2525 hwaddr l = 8;
2526 hwaddr addr1;
84b7b8e7 2527
2c17449b 2528 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2529 false);
2530 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2531 /* I/O case */
5c8a00ce 2532 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2533#if defined(TARGET_WORDS_BIGENDIAN)
2534 if (endian == DEVICE_LITTLE_ENDIAN) {
2535 val = bswap64(val);
2536 }
2537#else
2538 if (endian == DEVICE_BIG_ENDIAN) {
2539 val = bswap64(val);
2540 }
84b7b8e7
FB
2541#endif
2542 } else {
2543 /* RAM case */
5c8a00ce 2544 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2545 & TARGET_PAGE_MASK)
149f54b5 2546 + addr1);
1e78bcc1
AG
2547 switch (endian) {
2548 case DEVICE_LITTLE_ENDIAN:
2549 val = ldq_le_p(ptr);
2550 break;
2551 case DEVICE_BIG_ENDIAN:
2552 val = ldq_be_p(ptr);
2553 break;
2554 default:
2555 val = ldq_p(ptr);
2556 break;
2557 }
84b7b8e7
FB
2558 }
2559 return val;
2560}
2561
2c17449b 2562uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2563{
2c17449b 2564 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2565}
2566
2c17449b 2567uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2568{
2c17449b 2569 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2570}
2571
2c17449b 2572uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2573{
2c17449b 2574 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2575}
2576
aab33094 2577/* XXX: optimize */
2c17449b 2578uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2579{
2580 uint8_t val;
2c17449b 2581 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2582 return val;
2583}
2584
733f0b02 2585/* warning: addr must be aligned */
41701aa4 2586static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2587 enum device_endian endian)
aab33094 2588{
733f0b02
MT
2589 uint8_t *ptr;
2590 uint64_t val;
5c8a00ce 2591 MemoryRegion *mr;
149f54b5
PB
2592 hwaddr l = 2;
2593 hwaddr addr1;
733f0b02 2594
41701aa4 2595 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2596 false);
2597 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2598 /* I/O case */
5c8a00ce 2599 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2600#if defined(TARGET_WORDS_BIGENDIAN)
2601 if (endian == DEVICE_LITTLE_ENDIAN) {
2602 val = bswap16(val);
2603 }
2604#else
2605 if (endian == DEVICE_BIG_ENDIAN) {
2606 val = bswap16(val);
2607 }
2608#endif
733f0b02
MT
2609 } else {
2610 /* RAM case */
5c8a00ce 2611 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2612 & TARGET_PAGE_MASK)
149f54b5 2613 + addr1);
1e78bcc1
AG
2614 switch (endian) {
2615 case DEVICE_LITTLE_ENDIAN:
2616 val = lduw_le_p(ptr);
2617 break;
2618 case DEVICE_BIG_ENDIAN:
2619 val = lduw_be_p(ptr);
2620 break;
2621 default:
2622 val = lduw_p(ptr);
2623 break;
2624 }
733f0b02
MT
2625 }
2626 return val;
aab33094
FB
2627}
2628
41701aa4 2629uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2630{
41701aa4 2631 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2632}
2633
41701aa4 2634uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2635{
41701aa4 2636 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2637}
2638
41701aa4 2639uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2640{
41701aa4 2641 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2642}
2643
8df1cd07
FB
2644/* warning: addr must be aligned. The ram page is not masked as dirty
2645 and the code inside is not invalidated. It is useful if the dirty
2646 bits are used to track modified PTEs */
2198a121 2647void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2648{
8df1cd07 2649 uint8_t *ptr;
5c8a00ce 2650 MemoryRegion *mr;
149f54b5
PB
2651 hwaddr l = 4;
2652 hwaddr addr1;
8df1cd07 2653
2198a121 2654 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2655 true);
2656 if (l < 4 || !memory_access_is_direct(mr, true)) {
2657 io_mem_write(mr, addr1, val, 4);
8df1cd07 2658 } else {
5c8a00ce 2659 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2660 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2661 stl_p(ptr, val);
74576198
AL
2662
2663 if (unlikely(in_migration)) {
a2cd8c85 2664 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2665 /* invalidate code */
2666 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2667 /* set dirty bit */
6886867e 2668 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2669 }
2670 }
8df1cd07
FB
2671 }
2672}
2673
2674/* warning: addr must be aligned */
ab1da857
EI
2675static inline void stl_phys_internal(AddressSpace *as,
2676 hwaddr addr, uint32_t val,
1e78bcc1 2677 enum device_endian endian)
8df1cd07 2678{
8df1cd07 2679 uint8_t *ptr;
5c8a00ce 2680 MemoryRegion *mr;
149f54b5
PB
2681 hwaddr l = 4;
2682 hwaddr addr1;
8df1cd07 2683
ab1da857 2684 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2685 true);
2686 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2687#if defined(TARGET_WORDS_BIGENDIAN)
2688 if (endian == DEVICE_LITTLE_ENDIAN) {
2689 val = bswap32(val);
2690 }
2691#else
2692 if (endian == DEVICE_BIG_ENDIAN) {
2693 val = bswap32(val);
2694 }
2695#endif
5c8a00ce 2696 io_mem_write(mr, addr1, val, 4);
8df1cd07 2697 } else {
8df1cd07 2698 /* RAM case */
5c8a00ce 2699 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2700 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2701 switch (endian) {
2702 case DEVICE_LITTLE_ENDIAN:
2703 stl_le_p(ptr, val);
2704 break;
2705 case DEVICE_BIG_ENDIAN:
2706 stl_be_p(ptr, val);
2707 break;
2708 default:
2709 stl_p(ptr, val);
2710 break;
2711 }
51d7a9eb 2712 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2713 }
2714}
2715
ab1da857 2716void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2717{
ab1da857 2718 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2719}
2720
ab1da857 2721void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2722{
ab1da857 2723 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2724}
2725
ab1da857 2726void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2727{
ab1da857 2728 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2729}
2730
aab33094 2731/* XXX: optimize */
db3be60d 2732void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2733{
2734 uint8_t v = val;
db3be60d 2735 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2736}
2737
733f0b02 2738/* warning: addr must be aligned */
5ce5944d
EI
2739static inline void stw_phys_internal(AddressSpace *as,
2740 hwaddr addr, uint32_t val,
1e78bcc1 2741 enum device_endian endian)
aab33094 2742{
733f0b02 2743 uint8_t *ptr;
5c8a00ce 2744 MemoryRegion *mr;
149f54b5
PB
2745 hwaddr l = 2;
2746 hwaddr addr1;
733f0b02 2747
5ce5944d 2748 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2749 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2750#if defined(TARGET_WORDS_BIGENDIAN)
2751 if (endian == DEVICE_LITTLE_ENDIAN) {
2752 val = bswap16(val);
2753 }
2754#else
2755 if (endian == DEVICE_BIG_ENDIAN) {
2756 val = bswap16(val);
2757 }
2758#endif
5c8a00ce 2759 io_mem_write(mr, addr1, val, 2);
733f0b02 2760 } else {
733f0b02 2761 /* RAM case */
5c8a00ce 2762 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2763 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2764 switch (endian) {
2765 case DEVICE_LITTLE_ENDIAN:
2766 stw_le_p(ptr, val);
2767 break;
2768 case DEVICE_BIG_ENDIAN:
2769 stw_be_p(ptr, val);
2770 break;
2771 default:
2772 stw_p(ptr, val);
2773 break;
2774 }
51d7a9eb 2775 invalidate_and_set_dirty(addr1, 2);
733f0b02 2776 }
aab33094
FB
2777}
2778
5ce5944d 2779void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2780{
5ce5944d 2781 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2782}
2783
5ce5944d 2784void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2785{
5ce5944d 2786 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2787}
2788
5ce5944d 2789void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2790{
5ce5944d 2791 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2792}
2793
aab33094 2794/* XXX: optimize */
f606604f 2795void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2796{
2797 val = tswap64(val);
f606604f 2798 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2799}
2800
f606604f 2801void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2802{
2803 val = cpu_to_le64(val);
f606604f 2804 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2805}
2806
f606604f 2807void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2808{
2809 val = cpu_to_be64(val);
f606604f 2810 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2811}
2812
5e2972fd 2813/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2814int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2815 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2816{
2817 int l;
a8170e5e 2818 hwaddr phys_addr;
9b3c35e0 2819 target_ulong page;
13eb76e0
FB
2820
2821 while (len > 0) {
2822 page = addr & TARGET_PAGE_MASK;
f17ec444 2823 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2824 /* if no physical page mapped, return an error */
2825 if (phys_addr == -1)
2826 return -1;
2827 l = (page + TARGET_PAGE_SIZE) - addr;
2828 if (l > len)
2829 l = len;
5e2972fd 2830 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2831 if (is_write) {
2832 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2833 } else {
2834 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2835 }
13eb76e0
FB
2836 len -= l;
2837 buf += l;
2838 addr += l;
2839 }
2840 return 0;
2841}
a68fe89c 2842#endif
13eb76e0 2843
8e4a424b
BS
2844/*
2845 * A helper function for the _utterly broken_ virtio device model to find out if
2846 * it's running on a big endian machine. Don't do this at home kids!
2847 */
98ed8ecf
GK
2848bool target_words_bigendian(void);
2849bool target_words_bigendian(void)
8e4a424b
BS
2850{
2851#if defined(TARGET_WORDS_BIGENDIAN)
2852 return true;
2853#else
2854 return false;
2855#endif
2856}
2857
76f35538 2858#ifndef CONFIG_USER_ONLY
a8170e5e 2859bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2860{
5c8a00ce 2861 MemoryRegion*mr;
149f54b5 2862 hwaddr l = 1;
76f35538 2863
5c8a00ce
PB
2864 mr = address_space_translate(&address_space_memory,
2865 phys_addr, &phys_addr, &l, false);
76f35538 2866
5c8a00ce
PB
2867 return !(memory_region_is_ram(mr) ||
2868 memory_region_is_romd(mr));
76f35538 2869}
bd2fa51f
MH
2870
2871void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2872{
2873 RAMBlock *block;
2874
2875 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2876 func(block->host, block->offset, block->length, opaque);
2877 }
2878}
ec3f8c99 2879#endif