]> git.ipfire.org Git - thirdparty/qemu.git/blame - exec.c
.travis.yml: Add "--enable-modules"
[thirdparty/qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
62be4e3a
MT
78/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
e2eef170 83#endif
9fa3e853 84
bdc44640 85struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
86/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
4917cf44 88DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 89/* 0 = Do not count executed instructions.
bf20dc07 90 1 = Precise instruction counting.
2e70f6ef 91 2 = Adaptive rate instruction counting. */
5708fc66 92int use_icount;
6a00d601 93
e2eef170 94#if !defined(CONFIG_USER_ONLY)
4346ae3e 95
1db8abb1
PB
96typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
9736e55b 99 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 100 uint32_t skip : 6;
9736e55b 101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 102 uint32_t ptr : 26;
1db8abb1
PB
103};
104
8b795765
MT
105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
03f49957 107/* Size of the L2 (and L3, etc) page tables. */
57271d63 108#define ADDR_SPACE_BITS 64
03f49957 109
026736ce 110#define P_L2_BITS 9
03f49957
PB
111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 116
53cb28cb
MA
117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
1db8abb1
PB
126struct AddressSpaceDispatch {
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
129 */
130 PhysPageEntry phys_map;
53cb28cb 131 PhysPageMap map;
acc9d80b 132 AddressSpace *as;
1db8abb1
PB
133};
134
90260c6c
JK
135#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136typedef struct subpage_t {
137 MemoryRegion iomem;
acc9d80b 138 AddressSpace *as;
90260c6c
JK
139 hwaddr base;
140 uint16_t sub_section[TARGET_PAGE_SIZE];
141} subpage_t;
142
b41aac4f
LPF
143#define PHYS_SECTION_UNASSIGNED 0
144#define PHYS_SECTION_NOTDIRTY 1
145#define PHYS_SECTION_ROM 2
146#define PHYS_SECTION_WATCH 3
5312bd8b 147
e2eef170 148static void io_mem_init(void);
62152b8a 149static void memory_map_init(void);
09daed84 150static void tcg_commit(MemoryListener *listener);
e2eef170 151
1ec9b909 152static MemoryRegion io_mem_watch;
6658ffb8 153#endif
fd6ce8f6 154
6d9a1304 155#if !defined(CONFIG_USER_ONLY)
d6f2ea22 156
53cb28cb 157static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 158{
53cb28cb
MA
159 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
160 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
161 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
162 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 163 }
f7bf5461
AK
164}
165
53cb28cb 166static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
167{
168 unsigned i;
8b795765 169 uint32_t ret;
f7bf5461 170
53cb28cb 171 ret = map->nodes_nb++;
f7bf5461 172 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 173 assert(ret != map->nodes_nb_alloc);
03f49957 174 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
175 map->nodes[ret][i].skip = 1;
176 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 177 }
f7bf5461 178 return ret;
d6f2ea22
AK
179}
180
53cb28cb
MA
181static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
182 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 183 int level)
f7bf5461
AK
184{
185 PhysPageEntry *p;
186 int i;
03f49957 187 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 188
9736e55b 189 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
190 lp->ptr = phys_map_node_alloc(map);
191 p = map->nodes[lp->ptr];
f7bf5461 192 if (level == 0) {
03f49957 193 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 194 p[i].skip = 0;
b41aac4f 195 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 196 }
67c4d23c 197 }
f7bf5461 198 } else {
53cb28cb 199 p = map->nodes[lp->ptr];
92e873b9 200 }
03f49957 201 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 202
03f49957 203 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 204 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 205 lp->skip = 0;
c19e8800 206 lp->ptr = leaf;
07f07b31
AK
207 *index += step;
208 *nb -= step;
2999097b 209 } else {
53cb28cb 210 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
211 }
212 ++lp;
f7bf5461
AK
213 }
214}
215
ac1970fb 216static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 217 hwaddr index, hwaddr nb,
2999097b 218 uint16_t leaf)
f7bf5461 219{
2999097b 220 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 221 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 222
53cb28cb 223 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
224}
225
b35ba30f
MT
226/* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
228 */
229static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
230{
231 unsigned valid_ptr = P_L2_SIZE;
232 int valid = 0;
233 PhysPageEntry *p;
234 int i;
235
236 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 return;
238 }
239
240 p = nodes[lp->ptr];
241 for (i = 0; i < P_L2_SIZE; i++) {
242 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
243 continue;
244 }
245
246 valid_ptr = i;
247 valid++;
248 if (p[i].skip) {
249 phys_page_compact(&p[i], nodes, compacted);
250 }
251 }
252
253 /* We can only compress if there's only one child. */
254 if (valid != 1) {
255 return;
256 }
257
258 assert(valid_ptr < P_L2_SIZE);
259
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
262 return;
263 }
264
265 lp->ptr = p[valid_ptr].ptr;
266 if (!p[valid_ptr].skip) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
271 * change this rule.
272 */
273 lp->skip = 0;
274 } else {
275 lp->skip += p[valid_ptr].skip;
276 }
277}
278
279static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
280{
281 DECLARE_BITMAP(compacted, nodes_nb);
282
283 if (d->phys_map.skip) {
53cb28cb 284 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
285 }
286}
287
97115a8d 288static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 289 Node *nodes, MemoryRegionSection *sections)
92e873b9 290{
31ab2b4a 291 PhysPageEntry *p;
97115a8d 292 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 293 int i;
f1f6e3b8 294
9736e55b 295 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 296 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 297 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 298 }
9affd6fc 299 p = nodes[lp.ptr];
03f49957 300 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 301 }
b35ba30f
MT
302
303 if (sections[lp.ptr].size.hi ||
304 range_covers_byte(sections[lp.ptr].offset_within_address_space,
305 sections[lp.ptr].size.lo, addr)) {
306 return &sections[lp.ptr];
307 } else {
308 return &sections[PHYS_SECTION_UNASSIGNED];
309 }
f3705d53
AK
310}
311
e5548617
BS
312bool memory_region_is_unassigned(MemoryRegion *mr)
313{
2a8e7499 314 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 315 && mr != &io_mem_watch;
fd6ce8f6 316}
149f54b5 317
c7086b4a 318static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
319 hwaddr addr,
320 bool resolve_subpage)
9f029603 321{
90260c6c
JK
322 MemoryRegionSection *section;
323 subpage_t *subpage;
324
53cb28cb 325 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
326 if (resolve_subpage && section->mr->subpage) {
327 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 328 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
329 }
330 return section;
9f029603
JK
331}
332
90260c6c 333static MemoryRegionSection *
c7086b4a 334address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 335 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
336{
337 MemoryRegionSection *section;
a87f3954 338 Int128 diff;
149f54b5 339
c7086b4a 340 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
341 /* Compute offset within MemoryRegionSection */
342 addr -= section->offset_within_address_space;
343
344 /* Compute offset within MemoryRegion */
345 *xlat = addr + section->offset_within_region;
346
347 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 348 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
349 return section;
350}
90260c6c 351
a87f3954
PB
352static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
353{
354 if (memory_region_is_ram(mr)) {
355 return !(is_write && mr->readonly);
356 }
357 if (memory_region_is_romd(mr)) {
358 return !is_write;
359 }
360
361 return false;
362}
363
5c8a00ce
PB
364MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
365 hwaddr *xlat, hwaddr *plen,
366 bool is_write)
90260c6c 367{
30951157
AK
368 IOMMUTLBEntry iotlb;
369 MemoryRegionSection *section;
370 MemoryRegion *mr;
371 hwaddr len = *plen;
372
373 for (;;) {
a87f3954 374 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
375 mr = section->mr;
376
377 if (!mr->iommu_ops) {
378 break;
379 }
380
8d7b8cb9 381 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
382 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
383 | (addr & iotlb.addr_mask));
384 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
385 if (!(iotlb.perm & (1 << is_write))) {
386 mr = &io_mem_unassigned;
387 break;
388 }
389
390 as = iotlb.target_as;
391 }
392
fe680d0d 393 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
394 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
395 len = MIN(page, len);
396 }
397
30951157
AK
398 *plen = len;
399 *xlat = addr;
400 return mr;
90260c6c
JK
401}
402
403MemoryRegionSection *
404address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
405 hwaddr *plen)
406{
30951157 407 MemoryRegionSection *section;
c7086b4a 408 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
409
410 assert(!section->mr->iommu_ops);
411 return section;
90260c6c 412}
5b6dd868 413#endif
fd6ce8f6 414
5b6dd868 415void cpu_exec_init_all(void)
fdbb84d1 416{
5b6dd868 417#if !defined(CONFIG_USER_ONLY)
b2a8658e 418 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
419 memory_map_init();
420 io_mem_init();
fdbb84d1 421#endif
5b6dd868 422}
fdbb84d1 423
b170fce3 424#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
425
426static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 427{
259186a7 428 CPUState *cpu = opaque;
a513fe19 429
5b6dd868
BS
430 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
431 version_id is increased. */
259186a7 432 cpu->interrupt_request &= ~0x01;
c01a71c1 433 tlb_flush(cpu, 1);
5b6dd868
BS
434
435 return 0;
a513fe19 436}
7501267e 437
6c3bff0e
PD
438static int cpu_common_pre_load(void *opaque)
439{
440 CPUState *cpu = opaque;
441
adee6424 442 cpu->exception_index = -1;
6c3bff0e
PD
443
444 return 0;
445}
446
447static bool cpu_common_exception_index_needed(void *opaque)
448{
449 CPUState *cpu = opaque;
450
adee6424 451 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
452}
453
454static const VMStateDescription vmstate_cpu_common_exception_index = {
455 .name = "cpu_common/exception_index",
456 .version_id = 1,
457 .minimum_version_id = 1,
458 .fields = (VMStateField[]) {
459 VMSTATE_INT32(exception_index, CPUState),
460 VMSTATE_END_OF_LIST()
461 }
462};
463
1a1562f5 464const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
465 .name = "cpu_common",
466 .version_id = 1,
467 .minimum_version_id = 1,
6c3bff0e 468 .pre_load = cpu_common_pre_load,
5b6dd868 469 .post_load = cpu_common_post_load,
35d08458 470 .fields = (VMStateField[]) {
259186a7
AF
471 VMSTATE_UINT32(halted, CPUState),
472 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 473 VMSTATE_END_OF_LIST()
6c3bff0e
PD
474 },
475 .subsections = (VMStateSubsection[]) {
476 {
477 .vmsd = &vmstate_cpu_common_exception_index,
478 .needed = cpu_common_exception_index_needed,
479 } , {
480 /* empty */
481 }
5b6dd868
BS
482 }
483};
1a1562f5 484
5b6dd868 485#endif
ea041c0e 486
38d8f5c8 487CPUState *qemu_get_cpu(int index)
ea041c0e 488{
bdc44640 489 CPUState *cpu;
ea041c0e 490
bdc44640 491 CPU_FOREACH(cpu) {
55e5c285 492 if (cpu->cpu_index == index) {
bdc44640 493 return cpu;
55e5c285 494 }
ea041c0e 495 }
5b6dd868 496
bdc44640 497 return NULL;
ea041c0e
FB
498}
499
09daed84
EI
500#if !defined(CONFIG_USER_ONLY)
501void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
502{
503 /* We only support one address space per cpu at the moment. */
504 assert(cpu->as == as);
505
506 if (cpu->tcg_as_listener) {
507 memory_listener_unregister(cpu->tcg_as_listener);
508 } else {
509 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
510 }
511 cpu->tcg_as_listener->commit = tcg_commit;
512 memory_listener_register(cpu->tcg_as_listener, as);
513}
514#endif
515
5b6dd868 516void cpu_exec_init(CPUArchState *env)
ea041c0e 517{
5b6dd868 518 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 519 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 520 CPUState *some_cpu;
5b6dd868
BS
521 int cpu_index;
522
523#if defined(CONFIG_USER_ONLY)
524 cpu_list_lock();
525#endif
5b6dd868 526 cpu_index = 0;
bdc44640 527 CPU_FOREACH(some_cpu) {
5b6dd868
BS
528 cpu_index++;
529 }
55e5c285 530 cpu->cpu_index = cpu_index;
1b1ed8dc 531 cpu->numa_node = 0;
f0c3c505 532 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 533 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 534#ifndef CONFIG_USER_ONLY
09daed84 535 cpu->as = &address_space_memory;
5b6dd868
BS
536 cpu->thread_id = qemu_get_thread_id();
537#endif
bdc44640 538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
e0d47944
AF
542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
5b6dd868 545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
547 cpu_save, cpu_load, env);
b170fce3 548 assert(cc->vmsd == NULL);
e0d47944 549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 550#endif
b170fce3
AF
551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
ea041c0e
FB
554}
555
94df27fd 556#if defined(CONFIG_USER_ONLY)
00b941e5 557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
00b941e5 562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 563{
e8262a1b
MF
564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
09daed84 566 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 567 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 568 }
1e7855a5 569}
c27004ec 570#endif
d720b93d 571
c527ee8f 572#if defined(CONFIG_USER_ONLY)
75a34036 573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
574
575{
576}
577
3ee887e8
PM
578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
75a34036 588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
6658ffb8 594/* Add a watchpoint. */
75a34036 595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 596 int flags, CPUWatchpoint **watchpoint)
6658ffb8 597{
c0ce998e 598 CPUWatchpoint *wp;
6658ffb8 599
05068c0d 600 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 601 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
604 return -EINVAL;
605 }
7267c094 606 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
607
608 wp->vaddr = addr;
05068c0d 609 wp->len = len;
a1d1bb31
AL
610 wp->flags = flags;
611
2dc9f411 612 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
6658ffb8 618
31b030d4 619 tlb_flush_page(cpu, addr);
a1d1bb31
AL
620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
6658ffb8
PB
624}
625
a1d1bb31 626/* Remove a specific watchpoint. */
75a34036 627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 628 int flags)
6658ffb8 629{
a1d1bb31 630 CPUWatchpoint *wp;
6658ffb8 631
ff4700b0 632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 633 if (addr == wp->vaddr && len == wp->len
6e140f28 634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 635 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
636 return 0;
637 }
638 }
a1d1bb31 639 return -ENOENT;
6658ffb8
PB
640}
641
a1d1bb31 642/* Remove a specific watchpoint by reference. */
75a34036 643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 644{
ff4700b0 645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 646
31b030d4 647 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 648
7267c094 649 g_free(watchpoint);
a1d1bb31
AL
650}
651
652/* Remove all matching watchpoints. */
75a34036 653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 654{
c0ce998e 655 CPUWatchpoint *wp, *next;
a1d1bb31 656
ff4700b0 657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
c0ce998e 661 }
7d03f82f 662}
05068c0d
PM
663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
c527ee8f 684#endif
7d03f82f 685
a1d1bb31 686/* Add a breakpoint. */
b3310ab3 687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 688 CPUBreakpoint **breakpoint)
4c3a88a2 689{
c0ce998e 690 CPUBreakpoint *bp;
3b46e624 691
7267c094 692 bp = g_malloc(sizeof(*bp));
4c3a88a2 693
a1d1bb31
AL
694 bp->pc = pc;
695 bp->flags = flags;
696
2dc9f411 697 /* keep all GDB-injected breakpoints in front */
00b941e5 698 if (flags & BP_GDB) {
f0c3c505 699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 700 } else {
f0c3c505 701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 702 }
3b46e624 703
f0c3c505 704 breakpoint_invalidate(cpu, pc);
a1d1bb31 705
00b941e5 706 if (breakpoint) {
a1d1bb31 707 *breakpoint = bp;
00b941e5 708 }
4c3a88a2 709 return 0;
4c3a88a2
FB
710}
711
a1d1bb31 712/* Remove a specific breakpoint. */
b3310ab3 713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 714{
a1d1bb31
AL
715 CPUBreakpoint *bp;
716
f0c3c505 717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 718 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 719 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
720 return 0;
721 }
7d03f82f 722 }
a1d1bb31 723 return -ENOENT;
7d03f82f
EI
724}
725
a1d1bb31 726/* Remove a specific breakpoint by reference. */
b3310ab3 727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 728{
f0c3c505
AF
729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 732
7267c094 733 g_free(breakpoint);
a1d1bb31
AL
734}
735
736/* Remove all matching breakpoints. */
b3310ab3 737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 738{
c0ce998e 739 CPUBreakpoint *bp, *next;
a1d1bb31 740
f0c3c505 741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
c0ce998e 745 }
4c3a88a2
FB
746}
747
c33a346e
FB
748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
3825b28f 750void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 751{
ed2803da
AF
752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
38e478ec 755 kvm_update_guest_debug(cpu, 0);
ed2803da 756 } else {
ccbb4d44 757 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 758 /* XXX: only flush what is necessary */
38e478ec 759 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
760 tb_flush(env);
761 }
c33a346e 762 }
c33a346e
FB
763}
764
a47dddd7 765void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
766{
767 va_list ap;
493ae1f0 768 va_list ap2;
7501267e
FB
769
770 va_start(ap, fmt);
493ae1f0 771 va_copy(ap2, ap);
7501267e
FB
772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
878096ee 775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
a0762859 780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 781 qemu_log_flush();
93fcfe39 782 qemu_log_close();
924edcae 783 }
493ae1f0 784 va_end(ap2);
f9373291 785 va_end(ap);
fd052bf6
RV
786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
7501267e
FB
794 abort();
795}
796
0124311e 797#if !defined(CONFIG_USER_ONLY)
041603fe
PB
798static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
799{
800 RAMBlock *block;
801
802 /* The list is protected by the iothread lock here. */
803 block = ram_list.mru_block;
9b8424d5 804 if (block && addr - block->offset < block->max_length) {
041603fe
PB
805 goto found;
806 }
807 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 808 if (addr - block->offset < block->max_length) {
041603fe
PB
809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
817 ram_list.mru_block = block;
818 return block;
819}
820
a2f4d5be 821static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 822{
041603fe 823 ram_addr_t start1;
a2f4d5be
JQ
824 RAMBlock *block;
825 ram_addr_t end;
826
827 end = TARGET_PAGE_ALIGN(start + length);
828 start &= TARGET_PAGE_MASK;
d24981d3 829
041603fe
PB
830 block = qemu_get_ram_block(start);
831 assert(block == qemu_get_ram_block(end - 1));
1240be24 832 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 833 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
834}
835
5579c7f3 836/* Note: start and end must be within the same ram block. */
a2f4d5be 837void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 838 unsigned client)
1ccde1cb 839{
1ccde1cb
FB
840 if (length == 0)
841 return;
c8d6f66a 842 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 843
d24981d3 844 if (tcg_enabled()) {
a2f4d5be 845 tlb_reset_dirty_range_all(start, length);
5579c7f3 846 }
1ccde1cb
FB
847}
848
981fdf23 849static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
850{
851 in_migration = enable;
74576198
AL
852}
853
bb0e627a 854hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
855 MemoryRegionSection *section,
856 target_ulong vaddr,
857 hwaddr paddr, hwaddr xlat,
858 int prot,
859 target_ulong *address)
e5548617 860{
a8170e5e 861 hwaddr iotlb;
e5548617
BS
862 CPUWatchpoint *wp;
863
cc5bea60 864 if (memory_region_is_ram(section->mr)) {
e5548617
BS
865 /* Normal RAM. */
866 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 867 + xlat;
e5548617 868 if (!section->readonly) {
b41aac4f 869 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 870 } else {
b41aac4f 871 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
872 }
873 } else {
1b3fb98f 874 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 875 iotlb += xlat;
e5548617
BS
876 }
877
878 /* Make accesses to pages with watchpoints go via the
879 watchpoint trap routines. */
ff4700b0 880 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 881 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
882 /* Avoid trapping reads of pages with a write breakpoint. */
883 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 884 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
885 *address |= TLB_MMIO;
886 break;
887 }
888 }
889 }
890
891 return iotlb;
892}
9fa3e853
FB
893#endif /* defined(CONFIG_USER_ONLY) */
894
e2eef170 895#if !defined(CONFIG_USER_ONLY)
8da3ff18 896
c227f099 897static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 898 uint16_t section);
acc9d80b 899static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 900
a2b257d6
IM
901static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
902 qemu_anon_ram_alloc;
91138037
MA
903
904/*
905 * Set a custom physical guest memory alloator.
906 * Accelerators with unusual needs may need this. Hopefully, we can
907 * get rid of it eventually.
908 */
a2b257d6 909void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
910{
911 phys_mem_alloc = alloc;
912}
913
53cb28cb
MA
914static uint16_t phys_section_add(PhysPageMap *map,
915 MemoryRegionSection *section)
5312bd8b 916{
68f3f65b
PB
917 /* The physical section number is ORed with a page-aligned
918 * pointer to produce the iotlb entries. Thus it should
919 * never overflow into the page-aligned value.
920 */
53cb28cb 921 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 922
53cb28cb
MA
923 if (map->sections_nb == map->sections_nb_alloc) {
924 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
925 map->sections = g_renew(MemoryRegionSection, map->sections,
926 map->sections_nb_alloc);
5312bd8b 927 }
53cb28cb 928 map->sections[map->sections_nb] = *section;
dfde4e6e 929 memory_region_ref(section->mr);
53cb28cb 930 return map->sections_nb++;
5312bd8b
AK
931}
932
058bc4b5
PB
933static void phys_section_destroy(MemoryRegion *mr)
934{
dfde4e6e
PB
935 memory_region_unref(mr);
936
058bc4b5
PB
937 if (mr->subpage) {
938 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 939 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
940 g_free(subpage);
941 }
942}
943
6092666e 944static void phys_sections_free(PhysPageMap *map)
5312bd8b 945{
9affd6fc
PB
946 while (map->sections_nb > 0) {
947 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
948 phys_section_destroy(section->mr);
949 }
9affd6fc
PB
950 g_free(map->sections);
951 g_free(map->nodes);
5312bd8b
AK
952}
953
ac1970fb 954static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
955{
956 subpage_t *subpage;
a8170e5e 957 hwaddr base = section->offset_within_address_space
0f0cb164 958 & TARGET_PAGE_MASK;
97115a8d 959 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 960 d->map.nodes, d->map.sections);
0f0cb164
AK
961 MemoryRegionSection subsection = {
962 .offset_within_address_space = base,
052e87b0 963 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 964 };
a8170e5e 965 hwaddr start, end;
0f0cb164 966
f3705d53 967 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 968
f3705d53 969 if (!(existing->mr->subpage)) {
acc9d80b 970 subpage = subpage_init(d->as, base);
3be91e86 971 subsection.address_space = d->as;
0f0cb164 972 subsection.mr = &subpage->iomem;
ac1970fb 973 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 974 phys_section_add(&d->map, &subsection));
0f0cb164 975 } else {
f3705d53 976 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
977 }
978 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 979 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
980 subpage_register(subpage, start, end,
981 phys_section_add(&d->map, section));
0f0cb164
AK
982}
983
984
052e87b0
PB
985static void register_multipage(AddressSpaceDispatch *d,
986 MemoryRegionSection *section)
33417e70 987{
a8170e5e 988 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 989 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
990 uint64_t num_pages = int128_get64(int128_rshift(section->size,
991 TARGET_PAGE_BITS));
dd81124b 992
733d5ef5
PB
993 assert(num_pages);
994 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
995}
996
ac1970fb 997static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 998{
89ae337a 999 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1000 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1001 MemoryRegionSection now = *section, remain = *section;
052e87b0 1002 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1003
733d5ef5
PB
1004 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1005 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1006 - now.offset_within_address_space;
1007
052e87b0 1008 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1009 register_subpage(d, &now);
733d5ef5 1010 } else {
052e87b0 1011 now.size = int128_zero();
733d5ef5 1012 }
052e87b0
PB
1013 while (int128_ne(remain.size, now.size)) {
1014 remain.size = int128_sub(remain.size, now.size);
1015 remain.offset_within_address_space += int128_get64(now.size);
1016 remain.offset_within_region += int128_get64(now.size);
69b67646 1017 now = remain;
052e87b0 1018 if (int128_lt(remain.size, page_size)) {
733d5ef5 1019 register_subpage(d, &now);
88266249 1020 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1021 now.size = page_size;
ac1970fb 1022 register_subpage(d, &now);
69b67646 1023 } else {
052e87b0 1024 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1025 register_multipage(d, &now);
69b67646 1026 }
0f0cb164
AK
1027 }
1028}
1029
62a2744c
SY
1030void qemu_flush_coalesced_mmio_buffer(void)
1031{
1032 if (kvm_enabled())
1033 kvm_flush_coalesced_mmio_buffer();
1034}
1035
b2a8658e
UD
1036void qemu_mutex_lock_ramlist(void)
1037{
1038 qemu_mutex_lock(&ram_list.mutex);
1039}
1040
1041void qemu_mutex_unlock_ramlist(void)
1042{
1043 qemu_mutex_unlock(&ram_list.mutex);
1044}
1045
e1e84ba0 1046#ifdef __linux__
c902760f
MT
1047
1048#include <sys/vfs.h>
1049
1050#define HUGETLBFS_MAGIC 0x958458f6
1051
fc7a5800 1052static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1053{
1054 struct statfs fs;
1055 int ret;
1056
1057 do {
9742bf26 1058 ret = statfs(path, &fs);
c902760f
MT
1059 } while (ret != 0 && errno == EINTR);
1060
1061 if (ret != 0) {
fc7a5800
HT
1062 error_setg_errno(errp, errno, "failed to get page size of file %s",
1063 path);
9742bf26 1064 return 0;
c902760f
MT
1065 }
1066
1067 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1068 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1069
1070 return fs.f_bsize;
1071}
1072
04b16653
AW
1073static void *file_ram_alloc(RAMBlock *block,
1074 ram_addr_t memory,
7f56e740
PB
1075 const char *path,
1076 Error **errp)
c902760f
MT
1077{
1078 char *filename;
8ca761f6
PF
1079 char *sanitized_name;
1080 char *c;
557529dd 1081 void *area = NULL;
c902760f 1082 int fd;
557529dd 1083 uint64_t hpagesize;
fc7a5800 1084 Error *local_err = NULL;
c902760f 1085
fc7a5800
HT
1086 hpagesize = gethugepagesize(path, &local_err);
1087 if (local_err) {
1088 error_propagate(errp, local_err);
f9a49dfa 1089 goto error;
c902760f 1090 }
a2b257d6 1091 block->mr->align = hpagesize;
c902760f
MT
1092
1093 if (memory < hpagesize) {
557529dd
HT
1094 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1095 "or larger than huge page size 0x%" PRIx64,
1096 memory, hpagesize);
1097 goto error;
c902760f
MT
1098 }
1099
1100 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1101 error_setg(errp,
1102 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1103 goto error;
c902760f
MT
1104 }
1105
8ca761f6 1106 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1107 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1108 for (c = sanitized_name; *c != '\0'; c++) {
1109 if (*c == '/')
1110 *c = '_';
1111 }
1112
1113 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1114 sanitized_name);
1115 g_free(sanitized_name);
c902760f
MT
1116
1117 fd = mkstemp(filename);
1118 if (fd < 0) {
7f56e740
PB
1119 error_setg_errno(errp, errno,
1120 "unable to create backing store for hugepages");
e4ada482 1121 g_free(filename);
f9a49dfa 1122 goto error;
c902760f
MT
1123 }
1124 unlink(filename);
e4ada482 1125 g_free(filename);
c902760f
MT
1126
1127 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1128
1129 /*
1130 * ftruncate is not supported by hugetlbfs in older
1131 * hosts, so don't bother bailing out on errors.
1132 * If anything goes wrong with it under other filesystems,
1133 * mmap will fail.
1134 */
7f56e740 1135 if (ftruncate(fd, memory)) {
9742bf26 1136 perror("ftruncate");
7f56e740 1137 }
c902760f 1138
dbcb8981
PB
1139 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1140 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1141 fd, 0);
c902760f 1142 if (area == MAP_FAILED) {
7f56e740
PB
1143 error_setg_errno(errp, errno,
1144 "unable to map backing store for hugepages");
9742bf26 1145 close(fd);
f9a49dfa 1146 goto error;
c902760f 1147 }
ef36fa14
MT
1148
1149 if (mem_prealloc) {
38183310 1150 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1151 }
1152
04b16653 1153 block->fd = fd;
c902760f 1154 return area;
f9a49dfa
MT
1155
1156error:
1157 if (mem_prealloc) {
e4d9df4f 1158 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1159 exit(1);
1160 }
1161 return NULL;
c902760f
MT
1162}
1163#endif
1164
d17b5288 1165static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1166{
1167 RAMBlock *block, *next_block;
3e837b2c 1168 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1169
49cd9ac6
SH
1170 assert(size != 0); /* it would hand out same offset multiple times */
1171
a3161038 1172 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1173 return 0;
1174
a3161038 1175 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1176 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1177
62be4e3a 1178 end = block->offset + block->max_length;
04b16653 1179
a3161038 1180 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1181 if (next_block->offset >= end) {
1182 next = MIN(next, next_block->offset);
1183 }
1184 }
1185 if (next - end >= size && next - end < mingap) {
3e837b2c 1186 offset = end;
04b16653
AW
1187 mingap = next - end;
1188 }
1189 }
3e837b2c
AW
1190
1191 if (offset == RAM_ADDR_MAX) {
1192 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1193 (uint64_t)size);
1194 abort();
1195 }
1196
04b16653
AW
1197 return offset;
1198}
1199
652d7ec2 1200ram_addr_t last_ram_offset(void)
d17b5288
AW
1201{
1202 RAMBlock *block;
1203 ram_addr_t last = 0;
1204
a3161038 1205 QTAILQ_FOREACH(block, &ram_list.blocks, next)
62be4e3a 1206 last = MAX(last, block->offset + block->max_length);
d17b5288
AW
1207
1208 return last;
1209}
1210
ddb97f1d
JB
1211static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1212{
1213 int ret;
ddb97f1d
JB
1214
1215 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1216 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1217 "dump-guest-core", true)) {
ddb97f1d
JB
1218 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1219 if (ret) {
1220 perror("qemu_madvise");
1221 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1222 "but dump_guest_core=off specified\n");
1223 }
1224 }
1225}
1226
20cfe881 1227static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1228{
20cfe881 1229 RAMBlock *block;
84b89d78 1230
a3161038 1231 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1232 if (block->offset == addr) {
20cfe881 1233 return block;
c5705a77
AK
1234 }
1235 }
20cfe881
HT
1236
1237 return NULL;
1238}
1239
1240void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1241{
1242 RAMBlock *new_block = find_ram_block(addr);
1243 RAMBlock *block;
1244
c5705a77
AK
1245 assert(new_block);
1246 assert(!new_block->idstr[0]);
84b89d78 1247
09e5ab63
AL
1248 if (dev) {
1249 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1250 if (id) {
1251 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1252 g_free(id);
84b89d78
CM
1253 }
1254 }
1255 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1256
b2a8658e
UD
1257 /* This assumes the iothread lock is taken here too. */
1258 qemu_mutex_lock_ramlist();
a3161038 1259 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1260 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1261 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1262 new_block->idstr);
1263 abort();
1264 }
1265 }
b2a8658e 1266 qemu_mutex_unlock_ramlist();
c5705a77
AK
1267}
1268
20cfe881
HT
1269void qemu_ram_unset_idstr(ram_addr_t addr)
1270{
1271 RAMBlock *block = find_ram_block(addr);
1272
1273 if (block) {
1274 memset(block->idstr, 0, sizeof(block->idstr));
1275 }
1276}
1277
8490fc78
LC
1278static int memory_try_enable_merging(void *addr, size_t len)
1279{
2ff3de68 1280 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1281 /* disabled by the user */
1282 return 0;
1283 }
1284
1285 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1286}
1287
62be4e3a
MT
1288/* Only legal before guest might have detected the memory size: e.g. on
1289 * incoming migration, or right after reset.
1290 *
1291 * As memory core doesn't know how is memory accessed, it is up to
1292 * resize callback to update device state and/or add assertions to detect
1293 * misuse, if necessary.
1294 */
1295int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1296{
1297 RAMBlock *block = find_ram_block(base);
1298
1299 assert(block);
1300
1301 if (block->used_length == newsize) {
1302 return 0;
1303 }
1304
1305 if (!(block->flags & RAM_RESIZEABLE)) {
1306 error_setg_errno(errp, EINVAL,
1307 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1308 " in != 0x" RAM_ADDR_FMT, block->idstr,
1309 newsize, block->used_length);
1310 return -EINVAL;
1311 }
1312
1313 if (block->max_length < newsize) {
1314 error_setg_errno(errp, EINVAL,
1315 "Length too large: %s: 0x" RAM_ADDR_FMT
1316 " > 0x" RAM_ADDR_FMT, block->idstr,
1317 newsize, block->max_length);
1318 return -EINVAL;
1319 }
1320
1321 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1322 block->used_length = newsize;
1323 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1324 memory_region_set_size(block->mr, newsize);
1325 if (block->resized) {
1326 block->resized(block->idstr, newsize, block->host);
1327 }
1328 return 0;
1329}
1330
ef701d7b 1331static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1332{
e1c57ab8 1333 RAMBlock *block;
2152f5ca
JQ
1334 ram_addr_t old_ram_size, new_ram_size;
1335
1336 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1337
b2a8658e
UD
1338 /* This assumes the iothread lock is taken here too. */
1339 qemu_mutex_lock_ramlist();
9b8424d5 1340 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1341
1342 if (!new_block->host) {
1343 if (xen_enabled()) {
9b8424d5
MT
1344 xen_ram_alloc(new_block->offset, new_block->max_length,
1345 new_block->mr);
e1c57ab8 1346 } else {
9b8424d5 1347 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1348 &new_block->mr->align);
39228250 1349 if (!new_block->host) {
ef701d7b
HT
1350 error_setg_errno(errp, errno,
1351 "cannot set up guest memory '%s'",
1352 memory_region_name(new_block->mr));
1353 qemu_mutex_unlock_ramlist();
1354 return -1;
39228250 1355 }
9b8424d5 1356 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1357 }
c902760f 1358 }
94a6b54f 1359
abb26d63
PB
1360 /* Keep the list sorted from biggest to smallest block. */
1361 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 1362 if (block->max_length < new_block->max_length) {
abb26d63
PB
1363 break;
1364 }
1365 }
1366 if (block) {
1367 QTAILQ_INSERT_BEFORE(block, new_block, next);
1368 } else {
1369 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1370 }
0d6d3c87 1371 ram_list.mru_block = NULL;
94a6b54f 1372
f798b07f 1373 ram_list.version++;
b2a8658e 1374 qemu_mutex_unlock_ramlist();
f798b07f 1375
2152f5ca
JQ
1376 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1377
1378 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1379 int i;
1380 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1381 ram_list.dirty_memory[i] =
1382 bitmap_zero_extend(ram_list.dirty_memory[i],
1383 old_ram_size, new_ram_size);
1384 }
2152f5ca 1385 }
9b8424d5
MT
1386 cpu_physical_memory_set_dirty_range(new_block->offset,
1387 new_block->used_length);
94a6b54f 1388
9b8424d5
MT
1389 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1390 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1391 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
ddb97f1d 1392
e1c57ab8 1393 if (kvm_enabled()) {
9b8424d5 1394 kvm_setup_guest_memory(new_block->host, new_block->max_length);
e1c57ab8 1395 }
6f0437e8 1396
94a6b54f
PB
1397 return new_block->offset;
1398}
e9a1ab19 1399
0b183fc8 1400#ifdef __linux__
e1c57ab8 1401ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1402 bool share, const char *mem_path,
7f56e740 1403 Error **errp)
e1c57ab8
PB
1404{
1405 RAMBlock *new_block;
ef701d7b
HT
1406 ram_addr_t addr;
1407 Error *local_err = NULL;
e1c57ab8
PB
1408
1409 if (xen_enabled()) {
7f56e740
PB
1410 error_setg(errp, "-mem-path not supported with Xen");
1411 return -1;
e1c57ab8
PB
1412 }
1413
1414 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1415 /*
1416 * file_ram_alloc() needs to allocate just like
1417 * phys_mem_alloc, but we haven't bothered to provide
1418 * a hook there.
1419 */
7f56e740
PB
1420 error_setg(errp,
1421 "-mem-path not supported with this accelerator");
1422 return -1;
e1c57ab8
PB
1423 }
1424
1425 size = TARGET_PAGE_ALIGN(size);
1426 new_block = g_malloc0(sizeof(*new_block));
1427 new_block->mr = mr;
9b8424d5
MT
1428 new_block->used_length = size;
1429 new_block->max_length = size;
dbcb8981 1430 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1431 new_block->host = file_ram_alloc(new_block, size,
1432 mem_path, errp);
1433 if (!new_block->host) {
1434 g_free(new_block);
1435 return -1;
1436 }
1437
ef701d7b
HT
1438 addr = ram_block_add(new_block, &local_err);
1439 if (local_err) {
1440 g_free(new_block);
1441 error_propagate(errp, local_err);
1442 return -1;
1443 }
1444 return addr;
e1c57ab8 1445}
0b183fc8 1446#endif
e1c57ab8 1447
62be4e3a
MT
1448static
1449ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1450 void (*resized)(const char*,
1451 uint64_t length,
1452 void *host),
1453 void *host, bool resizeable,
ef701d7b 1454 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1455{
1456 RAMBlock *new_block;
ef701d7b
HT
1457 ram_addr_t addr;
1458 Error *local_err = NULL;
e1c57ab8
PB
1459
1460 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1461 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1462 new_block = g_malloc0(sizeof(*new_block));
1463 new_block->mr = mr;
62be4e3a 1464 new_block->resized = resized;
9b8424d5
MT
1465 new_block->used_length = size;
1466 new_block->max_length = max_size;
62be4e3a 1467 assert(max_size >= size);
e1c57ab8
PB
1468 new_block->fd = -1;
1469 new_block->host = host;
1470 if (host) {
7bd4f430 1471 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1472 }
62be4e3a
MT
1473 if (resizeable) {
1474 new_block->flags |= RAM_RESIZEABLE;
1475 }
ef701d7b
HT
1476 addr = ram_block_add(new_block, &local_err);
1477 if (local_err) {
1478 g_free(new_block);
1479 error_propagate(errp, local_err);
1480 return -1;
1481 }
1482 return addr;
e1c57ab8
PB
1483}
1484
62be4e3a
MT
1485ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1486 MemoryRegion *mr, Error **errp)
1487{
1488 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1489}
1490
ef701d7b 1491ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1492{
62be4e3a
MT
1493 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1494}
1495
1496ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1497 void (*resized)(const char*,
1498 uint64_t length,
1499 void *host),
1500 MemoryRegion *mr, Error **errp)
1501{
1502 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1503}
1504
1f2e98b6
AW
1505void qemu_ram_free_from_ptr(ram_addr_t addr)
1506{
1507 RAMBlock *block;
1508
b2a8658e
UD
1509 /* This assumes the iothread lock is taken here too. */
1510 qemu_mutex_lock_ramlist();
a3161038 1511 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1512 if (addr == block->offset) {
a3161038 1513 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1514 ram_list.mru_block = NULL;
f798b07f 1515 ram_list.version++;
7267c094 1516 g_free(block);
b2a8658e 1517 break;
1f2e98b6
AW
1518 }
1519 }
b2a8658e 1520 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1521}
1522
c227f099 1523void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1524{
04b16653
AW
1525 RAMBlock *block;
1526
b2a8658e
UD
1527 /* This assumes the iothread lock is taken here too. */
1528 qemu_mutex_lock_ramlist();
a3161038 1529 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1530 if (addr == block->offset) {
a3161038 1531 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1532 ram_list.mru_block = NULL;
f798b07f 1533 ram_list.version++;
7bd4f430 1534 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1535 ;
dfeaf2ab
MA
1536 } else if (xen_enabled()) {
1537 xen_invalidate_map_cache_entry(block->host);
089f3f76 1538#ifndef _WIN32
3435f395 1539 } else if (block->fd >= 0) {
9b8424d5 1540 munmap(block->host, block->max_length);
3435f395 1541 close(block->fd);
089f3f76 1542#endif
04b16653 1543 } else {
9b8424d5 1544 qemu_anon_ram_free(block->host, block->max_length);
04b16653 1545 }
7267c094 1546 g_free(block);
b2a8658e 1547 break;
04b16653
AW
1548 }
1549 }
b2a8658e 1550 qemu_mutex_unlock_ramlist();
04b16653 1551
e9a1ab19
FB
1552}
1553
cd19cfa2
HY
1554#ifndef _WIN32
1555void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1556{
1557 RAMBlock *block;
1558 ram_addr_t offset;
1559 int flags;
1560 void *area, *vaddr;
1561
a3161038 1562 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2 1563 offset = addr - block->offset;
9b8424d5 1564 if (offset < block->max_length) {
1240be24 1565 vaddr = ramblock_ptr(block, offset);
7bd4f430 1566 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1567 ;
dfeaf2ab
MA
1568 } else if (xen_enabled()) {
1569 abort();
cd19cfa2
HY
1570 } else {
1571 flags = MAP_FIXED;
1572 munmap(vaddr, length);
3435f395 1573 if (block->fd >= 0) {
dbcb8981
PB
1574 flags |= (block->flags & RAM_SHARED ?
1575 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1576 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1577 flags, block->fd, offset);
cd19cfa2 1578 } else {
2eb9fbaa
MA
1579 /*
1580 * Remap needs to match alloc. Accelerators that
1581 * set phys_mem_alloc never remap. If they did,
1582 * we'd need a remap hook here.
1583 */
1584 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1585
cd19cfa2
HY
1586 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1587 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1588 flags, -1, 0);
cd19cfa2
HY
1589 }
1590 if (area != vaddr) {
f15fbc4b
AP
1591 fprintf(stderr, "Could not remap addr: "
1592 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1593 length, addr);
1594 exit(1);
1595 }
8490fc78 1596 memory_try_enable_merging(vaddr, length);
ddb97f1d 1597 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1598 }
1599 return;
1600 }
1601 }
1602}
1603#endif /* !_WIN32 */
1604
a35ba7be
PB
1605int qemu_get_ram_fd(ram_addr_t addr)
1606{
1607 RAMBlock *block = qemu_get_ram_block(addr);
1608
1609 return block->fd;
1610}
1611
3fd74b84
DM
1612void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1613{
1614 RAMBlock *block = qemu_get_ram_block(addr);
1615
1240be24 1616 return ramblock_ptr(block, 0);
3fd74b84
DM
1617}
1618
1b5ec234
PB
1619/* Return a host pointer to ram allocated with qemu_ram_alloc.
1620 With the exception of the softmmu code in this file, this should
1621 only be used for local memory (e.g. video ram) that the device owns,
1622 and knows it isn't going to access beyond the end of the block.
1623
1624 It should not be used for general purpose DMA.
1625 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1626 */
1627void *qemu_get_ram_ptr(ram_addr_t addr)
1628{
1629 RAMBlock *block = qemu_get_ram_block(addr);
1630
0d6d3c87
PB
1631 if (xen_enabled()) {
1632 /* We need to check if the requested address is in the RAM
1633 * because we don't want to map the entire memory in QEMU.
1634 * In that case just map until the end of the page.
1635 */
1636 if (block->offset == 0) {
1637 return xen_map_cache(addr, 0, 0);
1638 } else if (block->host == NULL) {
1639 block->host =
9b8424d5 1640 xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87
PB
1641 }
1642 }
1240be24 1643 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1644}
1645
38bee5dc
SS
1646/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1647 * but takes a size argument */
cb85f7ab 1648static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1649{
8ab934f9
SS
1650 if (*size == 0) {
1651 return NULL;
1652 }
868bb33f 1653 if (xen_enabled()) {
e41d7c69 1654 return xen_map_cache(addr, *size, 1);
868bb33f 1655 } else {
38bee5dc
SS
1656 RAMBlock *block;
1657
a3161038 1658 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5
MT
1659 if (addr - block->offset < block->max_length) {
1660 if (addr - block->offset + *size > block->max_length)
1661 *size = block->max_length - addr + block->offset;
1240be24 1662 return ramblock_ptr(block, addr - block->offset);
38bee5dc
SS
1663 }
1664 }
1665
1666 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1667 abort();
38bee5dc
SS
1668 }
1669}
1670
7443b437
PB
1671/* Some of the softmmu routines need to translate from a host pointer
1672 (typically a TLB entry) back to a ram offset. */
1b5ec234 1673MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1674{
94a6b54f
PB
1675 RAMBlock *block;
1676 uint8_t *host = ptr;
1677
868bb33f 1678 if (xen_enabled()) {
e41d7c69 1679 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1680 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1681 }
1682
23887b79 1683 block = ram_list.mru_block;
9b8424d5 1684 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1685 goto found;
1686 }
1687
a3161038 1688 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1689 /* This case append when the block is not mapped. */
1690 if (block->host == NULL) {
1691 continue;
1692 }
9b8424d5 1693 if (host - block->host < block->max_length) {
23887b79 1694 goto found;
f471a17e 1695 }
94a6b54f 1696 }
432d268c 1697
1b5ec234 1698 return NULL;
23887b79
PB
1699
1700found:
1701 *ram_addr = block->offset + (host - block->host);
1b5ec234 1702 return block->mr;
e890261f 1703}
f471a17e 1704
a8170e5e 1705static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1706 uint64_t val, unsigned size)
9fa3e853 1707{
52159192 1708 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1709 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1710 }
0e0df1e2
AK
1711 switch (size) {
1712 case 1:
1713 stb_p(qemu_get_ram_ptr(ram_addr), val);
1714 break;
1715 case 2:
1716 stw_p(qemu_get_ram_ptr(ram_addr), val);
1717 break;
1718 case 4:
1719 stl_p(qemu_get_ram_ptr(ram_addr), val);
1720 break;
1721 default:
1722 abort();
3a7d929e 1723 }
6886867e 1724 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1725 /* we remove the notdirty callback only if the code has been
1726 flushed */
a2cd8c85 1727 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1728 CPUArchState *env = current_cpu->env_ptr;
93afeade 1729 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1730 }
9fa3e853
FB
1731}
1732
b018ddf6
PB
1733static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1734 unsigned size, bool is_write)
1735{
1736 return is_write;
1737}
1738
0e0df1e2 1739static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1740 .write = notdirty_mem_write,
b018ddf6 1741 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1742 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1743};
1744
0f459d16 1745/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1746static void check_watchpoint(int offset, int len, int flags)
0f459d16 1747{
93afeade
AF
1748 CPUState *cpu = current_cpu;
1749 CPUArchState *env = cpu->env_ptr;
06d55cc1 1750 target_ulong pc, cs_base;
0f459d16 1751 target_ulong vaddr;
a1d1bb31 1752 CPUWatchpoint *wp;
06d55cc1 1753 int cpu_flags;
0f459d16 1754
ff4700b0 1755 if (cpu->watchpoint_hit) {
06d55cc1
AL
1756 /* We re-entered the check after replacing the TB. Now raise
1757 * the debug interrupt so that is will trigger after the
1758 * current instruction. */
93afeade 1759 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1760 return;
1761 }
93afeade 1762 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1763 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1764 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1765 && (wp->flags & flags)) {
08225676
PM
1766 if (flags == BP_MEM_READ) {
1767 wp->flags |= BP_WATCHPOINT_HIT_READ;
1768 } else {
1769 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1770 }
1771 wp->hitaddr = vaddr;
ff4700b0
AF
1772 if (!cpu->watchpoint_hit) {
1773 cpu->watchpoint_hit = wp;
239c51a5 1774 tb_check_watchpoint(cpu);
6e140f28 1775 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1776 cpu->exception_index = EXCP_DEBUG;
5638d180 1777 cpu_loop_exit(cpu);
6e140f28
AL
1778 } else {
1779 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1780 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1781 cpu_resume_from_signal(cpu, NULL);
6e140f28 1782 }
06d55cc1 1783 }
6e140f28
AL
1784 } else {
1785 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1786 }
1787 }
1788}
1789
6658ffb8
PB
1790/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1791 so these check for a hit then pass through to the normal out-of-line
1792 phys routines. */
a8170e5e 1793static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1794 unsigned size)
6658ffb8 1795{
05068c0d 1796 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1797 switch (size) {
2c17449b 1798 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1799 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1800 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1801 default: abort();
1802 }
6658ffb8
PB
1803}
1804
a8170e5e 1805static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1806 uint64_t val, unsigned size)
6658ffb8 1807{
05068c0d 1808 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1809 switch (size) {
67364150 1810 case 1:
db3be60d 1811 stb_phys(&address_space_memory, addr, val);
67364150
MF
1812 break;
1813 case 2:
5ce5944d 1814 stw_phys(&address_space_memory, addr, val);
67364150
MF
1815 break;
1816 case 4:
ab1da857 1817 stl_phys(&address_space_memory, addr, val);
67364150 1818 break;
1ec9b909
AK
1819 default: abort();
1820 }
6658ffb8
PB
1821}
1822
1ec9b909
AK
1823static const MemoryRegionOps watch_mem_ops = {
1824 .read = watch_mem_read,
1825 .write = watch_mem_write,
1826 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1827};
6658ffb8 1828
a8170e5e 1829static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1830 unsigned len)
db7b5426 1831{
acc9d80b 1832 subpage_t *subpage = opaque;
ff6cff75 1833 uint8_t buf[8];
791af8c8 1834
db7b5426 1835#if defined(DEBUG_SUBPAGE)
016e9d62 1836 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1837 subpage, len, addr);
db7b5426 1838#endif
acc9d80b
JK
1839 address_space_read(subpage->as, addr + subpage->base, buf, len);
1840 switch (len) {
1841 case 1:
1842 return ldub_p(buf);
1843 case 2:
1844 return lduw_p(buf);
1845 case 4:
1846 return ldl_p(buf);
ff6cff75
PB
1847 case 8:
1848 return ldq_p(buf);
acc9d80b
JK
1849 default:
1850 abort();
1851 }
db7b5426
BS
1852}
1853
a8170e5e 1854static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1855 uint64_t value, unsigned len)
db7b5426 1856{
acc9d80b 1857 subpage_t *subpage = opaque;
ff6cff75 1858 uint8_t buf[8];
acc9d80b 1859
db7b5426 1860#if defined(DEBUG_SUBPAGE)
016e9d62 1861 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1862 " value %"PRIx64"\n",
1863 __func__, subpage, len, addr, value);
db7b5426 1864#endif
acc9d80b
JK
1865 switch (len) {
1866 case 1:
1867 stb_p(buf, value);
1868 break;
1869 case 2:
1870 stw_p(buf, value);
1871 break;
1872 case 4:
1873 stl_p(buf, value);
1874 break;
ff6cff75
PB
1875 case 8:
1876 stq_p(buf, value);
1877 break;
acc9d80b
JK
1878 default:
1879 abort();
1880 }
1881 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1882}
1883
c353e4cc 1884static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1885 unsigned len, bool is_write)
c353e4cc 1886{
acc9d80b 1887 subpage_t *subpage = opaque;
c353e4cc 1888#if defined(DEBUG_SUBPAGE)
016e9d62 1889 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1890 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1891#endif
1892
acc9d80b 1893 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1894 len, is_write);
c353e4cc
PB
1895}
1896
70c68e44
AK
1897static const MemoryRegionOps subpage_ops = {
1898 .read = subpage_read,
1899 .write = subpage_write,
ff6cff75
PB
1900 .impl.min_access_size = 1,
1901 .impl.max_access_size = 8,
1902 .valid.min_access_size = 1,
1903 .valid.max_access_size = 8,
c353e4cc 1904 .valid.accepts = subpage_accepts,
70c68e44 1905 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1906};
1907
c227f099 1908static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1909 uint16_t section)
db7b5426
BS
1910{
1911 int idx, eidx;
1912
1913 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1914 return -1;
1915 idx = SUBPAGE_IDX(start);
1916 eidx = SUBPAGE_IDX(end);
1917#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1918 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1919 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1920#endif
db7b5426 1921 for (; idx <= eidx; idx++) {
5312bd8b 1922 mmio->sub_section[idx] = section;
db7b5426
BS
1923 }
1924
1925 return 0;
1926}
1927
acc9d80b 1928static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1929{
c227f099 1930 subpage_t *mmio;
db7b5426 1931
7267c094 1932 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1933
acc9d80b 1934 mmio->as = as;
1eec614b 1935 mmio->base = base;
2c9b15ca 1936 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1937 NULL, TARGET_PAGE_SIZE);
b3b00c78 1938 mmio->iomem.subpage = true;
db7b5426 1939#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1940 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1941 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1942#endif
b41aac4f 1943 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1944
1945 return mmio;
1946}
1947
a656e22f
PC
1948static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1949 MemoryRegion *mr)
5312bd8b 1950{
a656e22f 1951 assert(as);
5312bd8b 1952 MemoryRegionSection section = {
a656e22f 1953 .address_space = as,
5312bd8b
AK
1954 .mr = mr,
1955 .offset_within_address_space = 0,
1956 .offset_within_region = 0,
052e87b0 1957 .size = int128_2_64(),
5312bd8b
AK
1958 };
1959
53cb28cb 1960 return phys_section_add(map, &section);
5312bd8b
AK
1961}
1962
77717094 1963MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1964{
77717094 1965 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1966}
1967
e9179ce1
AK
1968static void io_mem_init(void)
1969{
1f6245e5 1970 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1971 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1972 NULL, UINT64_MAX);
2c9b15ca 1973 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1974 NULL, UINT64_MAX);
2c9b15ca 1975 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1976 NULL, UINT64_MAX);
e9179ce1
AK
1977}
1978
ac1970fb 1979static void mem_begin(MemoryListener *listener)
00752703
PB
1980{
1981 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1982 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1983 uint16_t n;
1984
a656e22f 1985 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1986 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1987 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1988 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1989 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1990 assert(n == PHYS_SECTION_ROM);
a656e22f 1991 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1992 assert(n == PHYS_SECTION_WATCH);
00752703 1993
9736e55b 1994 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1995 d->as = as;
1996 as->next_dispatch = d;
1997}
1998
1999static void mem_commit(MemoryListener *listener)
ac1970fb 2000{
89ae337a 2001 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2002 AddressSpaceDispatch *cur = as->dispatch;
2003 AddressSpaceDispatch *next = as->next_dispatch;
2004
53cb28cb 2005 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2006
0475d94f 2007 as->dispatch = next;
b41aac4f 2008
53cb28cb
MA
2009 if (cur) {
2010 phys_sections_free(&cur->map);
2011 g_free(cur);
2012 }
9affd6fc
PB
2013}
2014
1d71148e 2015static void tcg_commit(MemoryListener *listener)
50c1e149 2016{
182735ef 2017 CPUState *cpu;
117712c3
AK
2018
2019 /* since each CPU stores ram addresses in its TLB cache, we must
2020 reset the modified entries */
2021 /* XXX: slow ! */
bdc44640 2022 CPU_FOREACH(cpu) {
33bde2e1
EI
2023 /* FIXME: Disentangle the cpu.h circular files deps so we can
2024 directly get the right CPU from listener. */
2025 if (cpu->tcg_as_listener != listener) {
2026 continue;
2027 }
00c8cb0a 2028 tlb_flush(cpu, 1);
117712c3 2029 }
50c1e149
AK
2030}
2031
93632747
AK
2032static void core_log_global_start(MemoryListener *listener)
2033{
981fdf23 2034 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2035}
2036
2037static void core_log_global_stop(MemoryListener *listener)
2038{
981fdf23 2039 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2040}
2041
93632747 2042static MemoryListener core_memory_listener = {
93632747
AK
2043 .log_global_start = core_log_global_start,
2044 .log_global_stop = core_log_global_stop,
ac1970fb 2045 .priority = 1,
93632747
AK
2046};
2047
ac1970fb
AK
2048void address_space_init_dispatch(AddressSpace *as)
2049{
00752703 2050 as->dispatch = NULL;
89ae337a 2051 as->dispatch_listener = (MemoryListener) {
ac1970fb 2052 .begin = mem_begin,
00752703 2053 .commit = mem_commit,
ac1970fb
AK
2054 .region_add = mem_add,
2055 .region_nop = mem_add,
2056 .priority = 0,
2057 };
89ae337a 2058 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2059}
2060
83f3c251
AK
2061void address_space_destroy_dispatch(AddressSpace *as)
2062{
2063 AddressSpaceDispatch *d = as->dispatch;
2064
89ae337a 2065 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
2066 g_free(d);
2067 as->dispatch = NULL;
2068}
2069
62152b8a
AK
2070static void memory_map_init(void)
2071{
7267c094 2072 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2073
57271d63 2074 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2075 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2076
7267c094 2077 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2078 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2079 65536);
7dca8043 2080 address_space_init(&address_space_io, system_io, "I/O");
93632747 2081
f6790af6 2082 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2083}
2084
2085MemoryRegion *get_system_memory(void)
2086{
2087 return system_memory;
2088}
2089
309cb471
AK
2090MemoryRegion *get_system_io(void)
2091{
2092 return system_io;
2093}
2094
e2eef170
PB
2095#endif /* !defined(CONFIG_USER_ONLY) */
2096
13eb76e0
FB
2097/* physical memory access (slow version, mainly for debug) */
2098#if defined(CONFIG_USER_ONLY)
f17ec444 2099int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2100 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2101{
2102 int l, flags;
2103 target_ulong page;
53a5960a 2104 void * p;
13eb76e0
FB
2105
2106 while (len > 0) {
2107 page = addr & TARGET_PAGE_MASK;
2108 l = (page + TARGET_PAGE_SIZE) - addr;
2109 if (l > len)
2110 l = len;
2111 flags = page_get_flags(page);
2112 if (!(flags & PAGE_VALID))
a68fe89c 2113 return -1;
13eb76e0
FB
2114 if (is_write) {
2115 if (!(flags & PAGE_WRITE))
a68fe89c 2116 return -1;
579a97f7 2117 /* XXX: this code should not depend on lock_user */
72fb7daa 2118 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2119 return -1;
72fb7daa
AJ
2120 memcpy(p, buf, l);
2121 unlock_user(p, addr, l);
13eb76e0
FB
2122 } else {
2123 if (!(flags & PAGE_READ))
a68fe89c 2124 return -1;
579a97f7 2125 /* XXX: this code should not depend on lock_user */
72fb7daa 2126 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2127 return -1;
72fb7daa 2128 memcpy(buf, p, l);
5b257578 2129 unlock_user(p, addr, 0);
13eb76e0
FB
2130 }
2131 len -= l;
2132 buf += l;
2133 addr += l;
2134 }
a68fe89c 2135 return 0;
13eb76e0 2136}
8df1cd07 2137
13eb76e0 2138#else
51d7a9eb 2139
a8170e5e
AK
2140static void invalidate_and_set_dirty(hwaddr addr,
2141 hwaddr length)
51d7a9eb 2142{
f874bf90
PM
2143 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2144 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2145 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2146 }
e226939d 2147 xen_modified_memory(addr, length);
51d7a9eb
AP
2148}
2149
23326164 2150static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2151{
e1622f4b 2152 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2153
2154 /* Regions are assumed to support 1-4 byte accesses unless
2155 otherwise specified. */
23326164
RH
2156 if (access_size_max == 0) {
2157 access_size_max = 4;
2158 }
2159
2160 /* Bound the maximum access by the alignment of the address. */
2161 if (!mr->ops->impl.unaligned) {
2162 unsigned align_size_max = addr & -addr;
2163 if (align_size_max != 0 && align_size_max < access_size_max) {
2164 access_size_max = align_size_max;
2165 }
82f2563f 2166 }
23326164
RH
2167
2168 /* Don't attempt accesses larger than the maximum. */
2169 if (l > access_size_max) {
2170 l = access_size_max;
82f2563f 2171 }
098178f2
PB
2172 if (l & (l - 1)) {
2173 l = 1 << (qemu_fls(l) - 1);
2174 }
23326164
RH
2175
2176 return l;
82f2563f
PB
2177}
2178
fd8aaa76 2179bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2180 int len, bool is_write)
13eb76e0 2181{
149f54b5 2182 hwaddr l;
13eb76e0 2183 uint8_t *ptr;
791af8c8 2184 uint64_t val;
149f54b5 2185 hwaddr addr1;
5c8a00ce 2186 MemoryRegion *mr;
fd8aaa76 2187 bool error = false;
3b46e624 2188
13eb76e0 2189 while (len > 0) {
149f54b5 2190 l = len;
5c8a00ce 2191 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2192
13eb76e0 2193 if (is_write) {
5c8a00ce
PB
2194 if (!memory_access_is_direct(mr, is_write)) {
2195 l = memory_access_size(mr, l, addr1);
4917cf44 2196 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2197 potential bugs */
23326164
RH
2198 switch (l) {
2199 case 8:
2200 /* 64 bit write access */
2201 val = ldq_p(buf);
2202 error |= io_mem_write(mr, addr1, val, 8);
2203 break;
2204 case 4:
1c213d19 2205 /* 32 bit write access */
c27004ec 2206 val = ldl_p(buf);
5c8a00ce 2207 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2208 break;
2209 case 2:
1c213d19 2210 /* 16 bit write access */
c27004ec 2211 val = lduw_p(buf);
5c8a00ce 2212 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2213 break;
2214 case 1:
1c213d19 2215 /* 8 bit write access */
c27004ec 2216 val = ldub_p(buf);
5c8a00ce 2217 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2218 break;
2219 default:
2220 abort();
13eb76e0 2221 }
2bbfa05d 2222 } else {
5c8a00ce 2223 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2224 /* RAM case */
5579c7f3 2225 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2226 memcpy(ptr, buf, l);
51d7a9eb 2227 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2228 }
2229 } else {
5c8a00ce 2230 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2231 /* I/O case */
5c8a00ce 2232 l = memory_access_size(mr, l, addr1);
23326164
RH
2233 switch (l) {
2234 case 8:
2235 /* 64 bit read access */
2236 error |= io_mem_read(mr, addr1, &val, 8);
2237 stq_p(buf, val);
2238 break;
2239 case 4:
13eb76e0 2240 /* 32 bit read access */
5c8a00ce 2241 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2242 stl_p(buf, val);
23326164
RH
2243 break;
2244 case 2:
13eb76e0 2245 /* 16 bit read access */
5c8a00ce 2246 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2247 stw_p(buf, val);
23326164
RH
2248 break;
2249 case 1:
1c213d19 2250 /* 8 bit read access */
5c8a00ce 2251 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2252 stb_p(buf, val);
23326164
RH
2253 break;
2254 default:
2255 abort();
13eb76e0
FB
2256 }
2257 } else {
2258 /* RAM case */
5c8a00ce 2259 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2260 memcpy(buf, ptr, l);
13eb76e0
FB
2261 }
2262 }
2263 len -= l;
2264 buf += l;
2265 addr += l;
2266 }
fd8aaa76
PB
2267
2268 return error;
13eb76e0 2269}
8df1cd07 2270
fd8aaa76 2271bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2272 const uint8_t *buf, int len)
2273{
fd8aaa76 2274 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2275}
2276
fd8aaa76 2277bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2278{
fd8aaa76 2279 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2280}
2281
2282
a8170e5e 2283void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2284 int len, int is_write)
2285{
fd8aaa76 2286 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2287}
2288
582b55a9
AG
2289enum write_rom_type {
2290 WRITE_DATA,
2291 FLUSH_CACHE,
2292};
2293
2a221651 2294static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2295 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2296{
149f54b5 2297 hwaddr l;
d0ecd2aa 2298 uint8_t *ptr;
149f54b5 2299 hwaddr addr1;
5c8a00ce 2300 MemoryRegion *mr;
3b46e624 2301
d0ecd2aa 2302 while (len > 0) {
149f54b5 2303 l = len;
2a221651 2304 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2305
5c8a00ce
PB
2306 if (!(memory_region_is_ram(mr) ||
2307 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2308 /* do nothing */
2309 } else {
5c8a00ce 2310 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2311 /* ROM/RAM case */
5579c7f3 2312 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2313 switch (type) {
2314 case WRITE_DATA:
2315 memcpy(ptr, buf, l);
2316 invalidate_and_set_dirty(addr1, l);
2317 break;
2318 case FLUSH_CACHE:
2319 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2320 break;
2321 }
d0ecd2aa
FB
2322 }
2323 len -= l;
2324 buf += l;
2325 addr += l;
2326 }
2327}
2328
582b55a9 2329/* used for ROM loading : can write in RAM and ROM */
2a221651 2330void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2331 const uint8_t *buf, int len)
2332{
2a221651 2333 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2334}
2335
2336void cpu_flush_icache_range(hwaddr start, int len)
2337{
2338 /*
2339 * This function should do the same thing as an icache flush that was
2340 * triggered from within the guest. For TCG we are always cache coherent,
2341 * so there is no need to flush anything. For KVM / Xen we need to flush
2342 * the host's instruction cache at least.
2343 */
2344 if (tcg_enabled()) {
2345 return;
2346 }
2347
2a221651
EI
2348 cpu_physical_memory_write_rom_internal(&address_space_memory,
2349 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2350}
2351
6d16c2f8 2352typedef struct {
d3e71559 2353 MemoryRegion *mr;
6d16c2f8 2354 void *buffer;
a8170e5e
AK
2355 hwaddr addr;
2356 hwaddr len;
6d16c2f8
AL
2357} BounceBuffer;
2358
2359static BounceBuffer bounce;
2360
ba223c29
AL
2361typedef struct MapClient {
2362 void *opaque;
2363 void (*callback)(void *opaque);
72cf2d4f 2364 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2365} MapClient;
2366
72cf2d4f
BS
2367static QLIST_HEAD(map_client_list, MapClient) map_client_list
2368 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2369
2370void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2371{
7267c094 2372 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2373
2374 client->opaque = opaque;
2375 client->callback = callback;
72cf2d4f 2376 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2377 return client;
2378}
2379
8b9c99d9 2380static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2381{
2382 MapClient *client = (MapClient *)_client;
2383
72cf2d4f 2384 QLIST_REMOVE(client, link);
7267c094 2385 g_free(client);
ba223c29
AL
2386}
2387
2388static void cpu_notify_map_clients(void)
2389{
2390 MapClient *client;
2391
72cf2d4f
BS
2392 while (!QLIST_EMPTY(&map_client_list)) {
2393 client = QLIST_FIRST(&map_client_list);
ba223c29 2394 client->callback(client->opaque);
34d5e948 2395 cpu_unregister_map_client(client);
ba223c29
AL
2396 }
2397}
2398
51644ab7
PB
2399bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2400{
5c8a00ce 2401 MemoryRegion *mr;
51644ab7
PB
2402 hwaddr l, xlat;
2403
2404 while (len > 0) {
2405 l = len;
5c8a00ce
PB
2406 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2407 if (!memory_access_is_direct(mr, is_write)) {
2408 l = memory_access_size(mr, l, addr);
2409 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2410 return false;
2411 }
2412 }
2413
2414 len -= l;
2415 addr += l;
2416 }
2417 return true;
2418}
2419
6d16c2f8
AL
2420/* Map a physical memory region into a host virtual address.
2421 * May map a subset of the requested range, given by and returned in *plen.
2422 * May return NULL if resources needed to perform the mapping are exhausted.
2423 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2424 * Use cpu_register_map_client() to know when retrying the map operation is
2425 * likely to succeed.
6d16c2f8 2426 */
ac1970fb 2427void *address_space_map(AddressSpace *as,
a8170e5e
AK
2428 hwaddr addr,
2429 hwaddr *plen,
ac1970fb 2430 bool is_write)
6d16c2f8 2431{
a8170e5e 2432 hwaddr len = *plen;
e3127ae0
PB
2433 hwaddr done = 0;
2434 hwaddr l, xlat, base;
2435 MemoryRegion *mr, *this_mr;
2436 ram_addr_t raddr;
6d16c2f8 2437
e3127ae0
PB
2438 if (len == 0) {
2439 return NULL;
2440 }
38bee5dc 2441
e3127ae0
PB
2442 l = len;
2443 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2444 if (!memory_access_is_direct(mr, is_write)) {
2445 if (bounce.buffer) {
2446 return NULL;
6d16c2f8 2447 }
e85d9db5
KW
2448 /* Avoid unbounded allocations */
2449 l = MIN(l, TARGET_PAGE_SIZE);
2450 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2451 bounce.addr = addr;
2452 bounce.len = l;
d3e71559
PB
2453
2454 memory_region_ref(mr);
2455 bounce.mr = mr;
e3127ae0
PB
2456 if (!is_write) {
2457 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2458 }
6d16c2f8 2459
e3127ae0
PB
2460 *plen = l;
2461 return bounce.buffer;
2462 }
2463
2464 base = xlat;
2465 raddr = memory_region_get_ram_addr(mr);
2466
2467 for (;;) {
6d16c2f8
AL
2468 len -= l;
2469 addr += l;
e3127ae0
PB
2470 done += l;
2471 if (len == 0) {
2472 break;
2473 }
2474
2475 l = len;
2476 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2477 if (this_mr != mr || xlat != base + done) {
2478 break;
2479 }
6d16c2f8 2480 }
e3127ae0 2481
d3e71559 2482 memory_region_ref(mr);
e3127ae0
PB
2483 *plen = done;
2484 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2485}
2486
ac1970fb 2487/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2488 * Will also mark the memory as dirty if is_write == 1. access_len gives
2489 * the amount of memory that was actually read or written by the caller.
2490 */
a8170e5e
AK
2491void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2492 int is_write, hwaddr access_len)
6d16c2f8
AL
2493{
2494 if (buffer != bounce.buffer) {
d3e71559
PB
2495 MemoryRegion *mr;
2496 ram_addr_t addr1;
2497
2498 mr = qemu_ram_addr_from_host(buffer, &addr1);
2499 assert(mr != NULL);
6d16c2f8 2500 if (is_write) {
6886867e 2501 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2502 }
868bb33f 2503 if (xen_enabled()) {
e41d7c69 2504 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2505 }
d3e71559 2506 memory_region_unref(mr);
6d16c2f8
AL
2507 return;
2508 }
2509 if (is_write) {
ac1970fb 2510 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2511 }
f8a83245 2512 qemu_vfree(bounce.buffer);
6d16c2f8 2513 bounce.buffer = NULL;
d3e71559 2514 memory_region_unref(bounce.mr);
ba223c29 2515 cpu_notify_map_clients();
6d16c2f8 2516}
d0ecd2aa 2517
a8170e5e
AK
2518void *cpu_physical_memory_map(hwaddr addr,
2519 hwaddr *plen,
ac1970fb
AK
2520 int is_write)
2521{
2522 return address_space_map(&address_space_memory, addr, plen, is_write);
2523}
2524
a8170e5e
AK
2525void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2526 int is_write, hwaddr access_len)
ac1970fb
AK
2527{
2528 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2529}
2530
8df1cd07 2531/* warning: addr must be aligned */
fdfba1a2 2532static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2533 enum device_endian endian)
8df1cd07 2534{
8df1cd07 2535 uint8_t *ptr;
791af8c8 2536 uint64_t val;
5c8a00ce 2537 MemoryRegion *mr;
149f54b5
PB
2538 hwaddr l = 4;
2539 hwaddr addr1;
8df1cd07 2540
fdfba1a2 2541 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2542 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2543 /* I/O case */
5c8a00ce 2544 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2545#if defined(TARGET_WORDS_BIGENDIAN)
2546 if (endian == DEVICE_LITTLE_ENDIAN) {
2547 val = bswap32(val);
2548 }
2549#else
2550 if (endian == DEVICE_BIG_ENDIAN) {
2551 val = bswap32(val);
2552 }
2553#endif
8df1cd07
FB
2554 } else {
2555 /* RAM case */
5c8a00ce 2556 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2557 & TARGET_PAGE_MASK)
149f54b5 2558 + addr1);
1e78bcc1
AG
2559 switch (endian) {
2560 case DEVICE_LITTLE_ENDIAN:
2561 val = ldl_le_p(ptr);
2562 break;
2563 case DEVICE_BIG_ENDIAN:
2564 val = ldl_be_p(ptr);
2565 break;
2566 default:
2567 val = ldl_p(ptr);
2568 break;
2569 }
8df1cd07
FB
2570 }
2571 return val;
2572}
2573
fdfba1a2 2574uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2575{
fdfba1a2 2576 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2577}
2578
fdfba1a2 2579uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2580{
fdfba1a2 2581 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2582}
2583
fdfba1a2 2584uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2585{
fdfba1a2 2586 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2587}
2588
84b7b8e7 2589/* warning: addr must be aligned */
2c17449b 2590static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2591 enum device_endian endian)
84b7b8e7 2592{
84b7b8e7
FB
2593 uint8_t *ptr;
2594 uint64_t val;
5c8a00ce 2595 MemoryRegion *mr;
149f54b5
PB
2596 hwaddr l = 8;
2597 hwaddr addr1;
84b7b8e7 2598
2c17449b 2599 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2600 false);
2601 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2602 /* I/O case */
5c8a00ce 2603 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2604#if defined(TARGET_WORDS_BIGENDIAN)
2605 if (endian == DEVICE_LITTLE_ENDIAN) {
2606 val = bswap64(val);
2607 }
2608#else
2609 if (endian == DEVICE_BIG_ENDIAN) {
2610 val = bswap64(val);
2611 }
84b7b8e7
FB
2612#endif
2613 } else {
2614 /* RAM case */
5c8a00ce 2615 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2616 & TARGET_PAGE_MASK)
149f54b5 2617 + addr1);
1e78bcc1
AG
2618 switch (endian) {
2619 case DEVICE_LITTLE_ENDIAN:
2620 val = ldq_le_p(ptr);
2621 break;
2622 case DEVICE_BIG_ENDIAN:
2623 val = ldq_be_p(ptr);
2624 break;
2625 default:
2626 val = ldq_p(ptr);
2627 break;
2628 }
84b7b8e7
FB
2629 }
2630 return val;
2631}
2632
2c17449b 2633uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2634{
2c17449b 2635 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2636}
2637
2c17449b 2638uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2639{
2c17449b 2640 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2641}
2642
2c17449b 2643uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2644{
2c17449b 2645 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2646}
2647
aab33094 2648/* XXX: optimize */
2c17449b 2649uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2650{
2651 uint8_t val;
2c17449b 2652 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2653 return val;
2654}
2655
733f0b02 2656/* warning: addr must be aligned */
41701aa4 2657static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2658 enum device_endian endian)
aab33094 2659{
733f0b02
MT
2660 uint8_t *ptr;
2661 uint64_t val;
5c8a00ce 2662 MemoryRegion *mr;
149f54b5
PB
2663 hwaddr l = 2;
2664 hwaddr addr1;
733f0b02 2665
41701aa4 2666 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2667 false);
2668 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2669 /* I/O case */
5c8a00ce 2670 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2671#if defined(TARGET_WORDS_BIGENDIAN)
2672 if (endian == DEVICE_LITTLE_ENDIAN) {
2673 val = bswap16(val);
2674 }
2675#else
2676 if (endian == DEVICE_BIG_ENDIAN) {
2677 val = bswap16(val);
2678 }
2679#endif
733f0b02
MT
2680 } else {
2681 /* RAM case */
5c8a00ce 2682 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2683 & TARGET_PAGE_MASK)
149f54b5 2684 + addr1);
1e78bcc1
AG
2685 switch (endian) {
2686 case DEVICE_LITTLE_ENDIAN:
2687 val = lduw_le_p(ptr);
2688 break;
2689 case DEVICE_BIG_ENDIAN:
2690 val = lduw_be_p(ptr);
2691 break;
2692 default:
2693 val = lduw_p(ptr);
2694 break;
2695 }
733f0b02
MT
2696 }
2697 return val;
aab33094
FB
2698}
2699
41701aa4 2700uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2701{
41701aa4 2702 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2703}
2704
41701aa4 2705uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2706{
41701aa4 2707 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2708}
2709
41701aa4 2710uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2711{
41701aa4 2712 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2713}
2714
8df1cd07
FB
2715/* warning: addr must be aligned. The ram page is not masked as dirty
2716 and the code inside is not invalidated. It is useful if the dirty
2717 bits are used to track modified PTEs */
2198a121 2718void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2719{
8df1cd07 2720 uint8_t *ptr;
5c8a00ce 2721 MemoryRegion *mr;
149f54b5
PB
2722 hwaddr l = 4;
2723 hwaddr addr1;
8df1cd07 2724
2198a121 2725 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2726 true);
2727 if (l < 4 || !memory_access_is_direct(mr, true)) {
2728 io_mem_write(mr, addr1, val, 4);
8df1cd07 2729 } else {
5c8a00ce 2730 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2731 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2732 stl_p(ptr, val);
74576198
AL
2733
2734 if (unlikely(in_migration)) {
a2cd8c85 2735 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2736 /* invalidate code */
2737 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2738 /* set dirty bit */
6886867e 2739 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2740 }
2741 }
8df1cd07
FB
2742 }
2743}
2744
2745/* warning: addr must be aligned */
ab1da857
EI
2746static inline void stl_phys_internal(AddressSpace *as,
2747 hwaddr addr, uint32_t val,
1e78bcc1 2748 enum device_endian endian)
8df1cd07 2749{
8df1cd07 2750 uint8_t *ptr;
5c8a00ce 2751 MemoryRegion *mr;
149f54b5
PB
2752 hwaddr l = 4;
2753 hwaddr addr1;
8df1cd07 2754
ab1da857 2755 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2756 true);
2757 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2758#if defined(TARGET_WORDS_BIGENDIAN)
2759 if (endian == DEVICE_LITTLE_ENDIAN) {
2760 val = bswap32(val);
2761 }
2762#else
2763 if (endian == DEVICE_BIG_ENDIAN) {
2764 val = bswap32(val);
2765 }
2766#endif
5c8a00ce 2767 io_mem_write(mr, addr1, val, 4);
8df1cd07 2768 } else {
8df1cd07 2769 /* RAM case */
5c8a00ce 2770 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2771 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2772 switch (endian) {
2773 case DEVICE_LITTLE_ENDIAN:
2774 stl_le_p(ptr, val);
2775 break;
2776 case DEVICE_BIG_ENDIAN:
2777 stl_be_p(ptr, val);
2778 break;
2779 default:
2780 stl_p(ptr, val);
2781 break;
2782 }
51d7a9eb 2783 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2784 }
2785}
2786
ab1da857 2787void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2788{
ab1da857 2789 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2790}
2791
ab1da857 2792void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2793{
ab1da857 2794 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2795}
2796
ab1da857 2797void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2798{
ab1da857 2799 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2800}
2801
aab33094 2802/* XXX: optimize */
db3be60d 2803void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2804{
2805 uint8_t v = val;
db3be60d 2806 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2807}
2808
733f0b02 2809/* warning: addr must be aligned */
5ce5944d
EI
2810static inline void stw_phys_internal(AddressSpace *as,
2811 hwaddr addr, uint32_t val,
1e78bcc1 2812 enum device_endian endian)
aab33094 2813{
733f0b02 2814 uint8_t *ptr;
5c8a00ce 2815 MemoryRegion *mr;
149f54b5
PB
2816 hwaddr l = 2;
2817 hwaddr addr1;
733f0b02 2818
5ce5944d 2819 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2820 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2821#if defined(TARGET_WORDS_BIGENDIAN)
2822 if (endian == DEVICE_LITTLE_ENDIAN) {
2823 val = bswap16(val);
2824 }
2825#else
2826 if (endian == DEVICE_BIG_ENDIAN) {
2827 val = bswap16(val);
2828 }
2829#endif
5c8a00ce 2830 io_mem_write(mr, addr1, val, 2);
733f0b02 2831 } else {
733f0b02 2832 /* RAM case */
5c8a00ce 2833 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2834 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2835 switch (endian) {
2836 case DEVICE_LITTLE_ENDIAN:
2837 stw_le_p(ptr, val);
2838 break;
2839 case DEVICE_BIG_ENDIAN:
2840 stw_be_p(ptr, val);
2841 break;
2842 default:
2843 stw_p(ptr, val);
2844 break;
2845 }
51d7a9eb 2846 invalidate_and_set_dirty(addr1, 2);
733f0b02 2847 }
aab33094
FB
2848}
2849
5ce5944d 2850void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2851{
5ce5944d 2852 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2853}
2854
5ce5944d 2855void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2856{
5ce5944d 2857 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2858}
2859
5ce5944d 2860void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2861{
5ce5944d 2862 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2863}
2864
aab33094 2865/* XXX: optimize */
f606604f 2866void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2867{
2868 val = tswap64(val);
f606604f 2869 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2870}
2871
f606604f 2872void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2873{
2874 val = cpu_to_le64(val);
f606604f 2875 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2876}
2877
f606604f 2878void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2879{
2880 val = cpu_to_be64(val);
f606604f 2881 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2882}
2883
5e2972fd 2884/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2885int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2886 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2887{
2888 int l;
a8170e5e 2889 hwaddr phys_addr;
9b3c35e0 2890 target_ulong page;
13eb76e0
FB
2891
2892 while (len > 0) {
2893 page = addr & TARGET_PAGE_MASK;
f17ec444 2894 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2895 /* if no physical page mapped, return an error */
2896 if (phys_addr == -1)
2897 return -1;
2898 l = (page + TARGET_PAGE_SIZE) - addr;
2899 if (l > len)
2900 l = len;
5e2972fd 2901 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2902 if (is_write) {
2903 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2904 } else {
2905 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2906 }
13eb76e0
FB
2907 len -= l;
2908 buf += l;
2909 addr += l;
2910 }
2911 return 0;
2912}
a68fe89c 2913#endif
13eb76e0 2914
8e4a424b
BS
2915/*
2916 * A helper function for the _utterly broken_ virtio device model to find out if
2917 * it's running on a big endian machine. Don't do this at home kids!
2918 */
98ed8ecf
GK
2919bool target_words_bigendian(void);
2920bool target_words_bigendian(void)
8e4a424b
BS
2921{
2922#if defined(TARGET_WORDS_BIGENDIAN)
2923 return true;
2924#else
2925 return false;
2926#endif
2927}
2928
76f35538 2929#ifndef CONFIG_USER_ONLY
a8170e5e 2930bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2931{
5c8a00ce 2932 MemoryRegion*mr;
149f54b5 2933 hwaddr l = 1;
76f35538 2934
5c8a00ce
PB
2935 mr = address_space_translate(&address_space_memory,
2936 phys_addr, &phys_addr, &l, false);
76f35538 2937
5c8a00ce
PB
2938 return !(memory_region_is_ram(mr) ||
2939 memory_region_is_romd(mr));
76f35538 2940}
bd2fa51f
MH
2941
2942void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2943{
2944 RAMBlock *block;
2945
2946 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 2947 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f
MH
2948 }
2949}
ec3f8c99 2950#endif