]> git.ipfire.org Git - thirdparty/qemu.git/blob - exec.c
mac_dbdma: always initialize channel field in DBDMA_channel
[thirdparty/qemu.git] / exec.c
1 /*
2 * Virtual page mapping
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifndef _WIN32
21 #include <sys/types.h>
22 #include <sys/mman.h>
23 #endif
24
25 #include "qemu-common.h"
26 #include "cpu.h"
27 #include "tcg.h"
28 #include "hw/hw.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
31 #endif
32 #include "hw/qdev.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
47 #include "trace.h"
48 #endif
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "translate-all.h"
53 #include "sysemu/replay.h"
54
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
57
58 #include "qemu/range.h"
59 #ifndef _WIN32
60 #include "qemu/mmap-alloc.h"
61 #endif
62
63 //#define DEBUG_SUBPAGE
64
65 #if !defined(CONFIG_USER_ONLY)
66 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
69 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
70
71 static MemoryRegion *system_memory;
72 static MemoryRegion *system_io;
73
74 AddressSpace address_space_io;
75 AddressSpace address_space_memory;
76
77 MemoryRegion io_mem_rom, io_mem_notdirty;
78 static MemoryRegion io_mem_unassigned;
79
80 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81 #define RAM_PREALLOC (1 << 0)
82
83 /* RAM is mmap-ed with MAP_SHARED */
84 #define RAM_SHARED (1 << 1)
85
86 /* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89 #define RAM_RESIZEABLE (1 << 2)
90
91 /* RAM is backed by an mmapped file.
92 */
93 #define RAM_FILE (1 << 3)
94 #endif
95
96 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
97 /* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
99 __thread CPUState *current_cpu;
100 /* 0 = Do not count executed instructions.
101 1 = Precise instruction counting.
102 2 = Adaptive rate instruction counting. */
103 int use_icount;
104
105 #if !defined(CONFIG_USER_ONLY)
106
107 typedef struct PhysPageEntry PhysPageEntry;
108
109 struct PhysPageEntry {
110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
111 uint32_t skip : 6;
112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
113 uint32_t ptr : 26;
114 };
115
116 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
118 /* Size of the L2 (and L3, etc) page tables. */
119 #define ADDR_SPACE_BITS 64
120
121 #define P_L2_BITS 9
122 #define P_L2_SIZE (1 << P_L2_BITS)
123
124 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126 typedef PhysPageEntry Node[P_L2_SIZE];
127
128 typedef struct PhysPageMap {
129 struct rcu_head rcu;
130
131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137 } PhysPageMap;
138
139 struct AddressSpaceDispatch {
140 struct rcu_head rcu;
141
142 /* This is a multi-level map on the physical address space.
143 * The bottom level has pointers to MemoryRegionSections.
144 */
145 PhysPageEntry phys_map;
146 PhysPageMap map;
147 AddressSpace *as;
148 };
149
150 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
151 typedef struct subpage_t {
152 MemoryRegion iomem;
153 AddressSpace *as;
154 hwaddr base;
155 uint16_t sub_section[TARGET_PAGE_SIZE];
156 } subpage_t;
157
158 #define PHYS_SECTION_UNASSIGNED 0
159 #define PHYS_SECTION_NOTDIRTY 1
160 #define PHYS_SECTION_ROM 2
161 #define PHYS_SECTION_WATCH 3
162
163 static void io_mem_init(void);
164 static void memory_map_init(void);
165 static void tcg_commit(MemoryListener *listener);
166
167 static MemoryRegion io_mem_watch;
168
169 /**
170 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
171 * @cpu: the CPU whose AddressSpace this is
172 * @as: the AddressSpace itself
173 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
174 * @tcg_as_listener: listener for tracking changes to the AddressSpace
175 */
176 struct CPUAddressSpace {
177 CPUState *cpu;
178 AddressSpace *as;
179 struct AddressSpaceDispatch *memory_dispatch;
180 MemoryListener tcg_as_listener;
181 };
182
183 #endif
184
185 #if !defined(CONFIG_USER_ONLY)
186
187 static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
188 {
189 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
191 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
192 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
193 }
194 }
195
196 static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
197 {
198 unsigned i;
199 uint32_t ret;
200 PhysPageEntry e;
201 PhysPageEntry *p;
202
203 ret = map->nodes_nb++;
204 p = map->nodes[ret];
205 assert(ret != PHYS_MAP_NODE_NIL);
206 assert(ret != map->nodes_nb_alloc);
207
208 e.skip = leaf ? 0 : 1;
209 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
210 for (i = 0; i < P_L2_SIZE; ++i) {
211 memcpy(&p[i], &e, sizeof(e));
212 }
213 return ret;
214 }
215
216 static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
217 hwaddr *index, hwaddr *nb, uint16_t leaf,
218 int level)
219 {
220 PhysPageEntry *p;
221 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
222
223 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
224 lp->ptr = phys_map_node_alloc(map, level == 0);
225 }
226 p = map->nodes[lp->ptr];
227 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
228
229 while (*nb && lp < &p[P_L2_SIZE]) {
230 if ((*index & (step - 1)) == 0 && *nb >= step) {
231 lp->skip = 0;
232 lp->ptr = leaf;
233 *index += step;
234 *nb -= step;
235 } else {
236 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
237 }
238 ++lp;
239 }
240 }
241
242 static void phys_page_set(AddressSpaceDispatch *d,
243 hwaddr index, hwaddr nb,
244 uint16_t leaf)
245 {
246 /* Wildly overreserve - it doesn't matter much. */
247 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
248
249 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
250 }
251
252 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
253 * and update our entry so we can skip it and go directly to the destination.
254 */
255 static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
256 {
257 unsigned valid_ptr = P_L2_SIZE;
258 int valid = 0;
259 PhysPageEntry *p;
260 int i;
261
262 if (lp->ptr == PHYS_MAP_NODE_NIL) {
263 return;
264 }
265
266 p = nodes[lp->ptr];
267 for (i = 0; i < P_L2_SIZE; i++) {
268 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
269 continue;
270 }
271
272 valid_ptr = i;
273 valid++;
274 if (p[i].skip) {
275 phys_page_compact(&p[i], nodes, compacted);
276 }
277 }
278
279 /* We can only compress if there's only one child. */
280 if (valid != 1) {
281 return;
282 }
283
284 assert(valid_ptr < P_L2_SIZE);
285
286 /* Don't compress if it won't fit in the # of bits we have. */
287 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
288 return;
289 }
290
291 lp->ptr = p[valid_ptr].ptr;
292 if (!p[valid_ptr].skip) {
293 /* If our only child is a leaf, make this a leaf. */
294 /* By design, we should have made this node a leaf to begin with so we
295 * should never reach here.
296 * But since it's so simple to handle this, let's do it just in case we
297 * change this rule.
298 */
299 lp->skip = 0;
300 } else {
301 lp->skip += p[valid_ptr].skip;
302 }
303 }
304
305 static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
306 {
307 DECLARE_BITMAP(compacted, nodes_nb);
308
309 if (d->phys_map.skip) {
310 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
311 }
312 }
313
314 static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
315 Node *nodes, MemoryRegionSection *sections)
316 {
317 PhysPageEntry *p;
318 hwaddr index = addr >> TARGET_PAGE_BITS;
319 int i;
320
321 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
322 if (lp.ptr == PHYS_MAP_NODE_NIL) {
323 return &sections[PHYS_SECTION_UNASSIGNED];
324 }
325 p = nodes[lp.ptr];
326 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
327 }
328
329 if (sections[lp.ptr].size.hi ||
330 range_covers_byte(sections[lp.ptr].offset_within_address_space,
331 sections[lp.ptr].size.lo, addr)) {
332 return &sections[lp.ptr];
333 } else {
334 return &sections[PHYS_SECTION_UNASSIGNED];
335 }
336 }
337
338 bool memory_region_is_unassigned(MemoryRegion *mr)
339 {
340 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
341 && mr != &io_mem_watch;
342 }
343
344 /* Called from RCU critical section */
345 static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
346 hwaddr addr,
347 bool resolve_subpage)
348 {
349 MemoryRegionSection *section;
350 subpage_t *subpage;
351
352 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
353 if (resolve_subpage && section->mr->subpage) {
354 subpage = container_of(section->mr, subpage_t, iomem);
355 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
356 }
357 return section;
358 }
359
360 /* Called from RCU critical section */
361 static MemoryRegionSection *
362 address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
363 hwaddr *plen, bool resolve_subpage)
364 {
365 MemoryRegionSection *section;
366 MemoryRegion *mr;
367 Int128 diff;
368
369 section = address_space_lookup_region(d, addr, resolve_subpage);
370 /* Compute offset within MemoryRegionSection */
371 addr -= section->offset_within_address_space;
372
373 /* Compute offset within MemoryRegion */
374 *xlat = addr + section->offset_within_region;
375
376 mr = section->mr;
377
378 /* MMIO registers can be expected to perform full-width accesses based only
379 * on their address, without considering adjacent registers that could
380 * decode to completely different MemoryRegions. When such registers
381 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
382 * regions overlap wildly. For this reason we cannot clamp the accesses
383 * here.
384 *
385 * If the length is small (as is the case for address_space_ldl/stl),
386 * everything works fine. If the incoming length is large, however,
387 * the caller really has to do the clamping through memory_access_size.
388 */
389 if (memory_region_is_ram(mr)) {
390 diff = int128_sub(section->size, int128_make64(addr));
391 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
392 }
393 return section;
394 }
395
396 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
397 {
398 if (memory_region_is_ram(mr)) {
399 return !(is_write && mr->readonly);
400 }
401 if (memory_region_is_romd(mr)) {
402 return !is_write;
403 }
404
405 return false;
406 }
407
408 /* Called from RCU critical section */
409 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
410 hwaddr *xlat, hwaddr *plen,
411 bool is_write)
412 {
413 IOMMUTLBEntry iotlb;
414 MemoryRegionSection *section;
415 MemoryRegion *mr;
416
417 for (;;) {
418 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
419 section = address_space_translate_internal(d, addr, &addr, plen, true);
420 mr = section->mr;
421
422 if (!mr->iommu_ops) {
423 break;
424 }
425
426 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
427 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
428 | (addr & iotlb.addr_mask));
429 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
430 if (!(iotlb.perm & (1 << is_write))) {
431 mr = &io_mem_unassigned;
432 break;
433 }
434
435 as = iotlb.target_as;
436 }
437
438 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
439 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
440 *plen = MIN(page, *plen);
441 }
442
443 *xlat = addr;
444 return mr;
445 }
446
447 /* Called from RCU critical section */
448 MemoryRegionSection *
449 address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
450 hwaddr *xlat, hwaddr *plen)
451 {
452 MemoryRegionSection *section;
453 section = address_space_translate_internal(cpu->cpu_ases[0].memory_dispatch,
454 addr, xlat, plen, false);
455
456 assert(!section->mr->iommu_ops);
457 return section;
458 }
459 #endif
460
461 #if !defined(CONFIG_USER_ONLY)
462
463 static int cpu_common_post_load(void *opaque, int version_id)
464 {
465 CPUState *cpu = opaque;
466
467 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
468 version_id is increased. */
469 cpu->interrupt_request &= ~0x01;
470 tlb_flush(cpu, 1);
471
472 return 0;
473 }
474
475 static int cpu_common_pre_load(void *opaque)
476 {
477 CPUState *cpu = opaque;
478
479 cpu->exception_index = -1;
480
481 return 0;
482 }
483
484 static bool cpu_common_exception_index_needed(void *opaque)
485 {
486 CPUState *cpu = opaque;
487
488 return tcg_enabled() && cpu->exception_index != -1;
489 }
490
491 static const VMStateDescription vmstate_cpu_common_exception_index = {
492 .name = "cpu_common/exception_index",
493 .version_id = 1,
494 .minimum_version_id = 1,
495 .needed = cpu_common_exception_index_needed,
496 .fields = (VMStateField[]) {
497 VMSTATE_INT32(exception_index, CPUState),
498 VMSTATE_END_OF_LIST()
499 }
500 };
501
502 static bool cpu_common_crash_occurred_needed(void *opaque)
503 {
504 CPUState *cpu = opaque;
505
506 return cpu->crash_occurred;
507 }
508
509 static const VMStateDescription vmstate_cpu_common_crash_occurred = {
510 .name = "cpu_common/crash_occurred",
511 .version_id = 1,
512 .minimum_version_id = 1,
513 .needed = cpu_common_crash_occurred_needed,
514 .fields = (VMStateField[]) {
515 VMSTATE_BOOL(crash_occurred, CPUState),
516 VMSTATE_END_OF_LIST()
517 }
518 };
519
520 const VMStateDescription vmstate_cpu_common = {
521 .name = "cpu_common",
522 .version_id = 1,
523 .minimum_version_id = 1,
524 .pre_load = cpu_common_pre_load,
525 .post_load = cpu_common_post_load,
526 .fields = (VMStateField[]) {
527 VMSTATE_UINT32(halted, CPUState),
528 VMSTATE_UINT32(interrupt_request, CPUState),
529 VMSTATE_END_OF_LIST()
530 },
531 .subsections = (const VMStateDescription*[]) {
532 &vmstate_cpu_common_exception_index,
533 &vmstate_cpu_common_crash_occurred,
534 NULL
535 }
536 };
537
538 #endif
539
540 CPUState *qemu_get_cpu(int index)
541 {
542 CPUState *cpu;
543
544 CPU_FOREACH(cpu) {
545 if (cpu->cpu_index == index) {
546 return cpu;
547 }
548 }
549
550 return NULL;
551 }
552
553 #if !defined(CONFIG_USER_ONLY)
554 void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
555 {
556 /* We only support one address space per cpu at the moment. */
557 assert(cpu->as == as);
558
559 if (cpu->cpu_ases) {
560 /* We've already registered the listener for our only AS */
561 return;
562 }
563
564 cpu->cpu_ases = g_new0(CPUAddressSpace, 1);
565 cpu->cpu_ases[0].cpu = cpu;
566 cpu->cpu_ases[0].as = as;
567 cpu->cpu_ases[0].tcg_as_listener.commit = tcg_commit;
568 memory_listener_register(&cpu->cpu_ases[0].tcg_as_listener, as);
569 }
570 #endif
571
572 #ifndef CONFIG_USER_ONLY
573 static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
574
575 static int cpu_get_free_index(Error **errp)
576 {
577 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
578
579 if (cpu >= MAX_CPUMASK_BITS) {
580 error_setg(errp, "Trying to use more CPUs than max of %d",
581 MAX_CPUMASK_BITS);
582 return -1;
583 }
584
585 bitmap_set(cpu_index_map, cpu, 1);
586 return cpu;
587 }
588
589 void cpu_exec_exit(CPUState *cpu)
590 {
591 if (cpu->cpu_index == -1) {
592 /* cpu_index was never allocated by this @cpu or was already freed. */
593 return;
594 }
595
596 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
597 cpu->cpu_index = -1;
598 }
599 #else
600
601 static int cpu_get_free_index(Error **errp)
602 {
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610 }
611
612 void cpu_exec_exit(CPUState *cpu)
613 {
614 }
615 #endif
616
617 void cpu_exec_init(CPUState *cpu, Error **errp)
618 {
619 CPUClass *cc = CPU_GET_CLASS(cpu);
620 int cpu_index;
621 Error *local_err = NULL;
622
623 #ifndef CONFIG_USER_ONLY
624 cpu->as = &address_space_memory;
625 cpu->thread_id = qemu_get_thread_id();
626 #endif
627
628 #if defined(CONFIG_USER_ONLY)
629 cpu_list_lock();
630 #endif
631 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
632 if (local_err) {
633 error_propagate(errp, local_err);
634 #if defined(CONFIG_USER_ONLY)
635 cpu_list_unlock();
636 #endif
637 return;
638 }
639 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
640 #if defined(CONFIG_USER_ONLY)
641 cpu_list_unlock();
642 #endif
643 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
644 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
645 }
646 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
647 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
648 cpu_save, cpu_load, cpu->env_ptr);
649 assert(cc->vmsd == NULL);
650 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
651 #endif
652 if (cc->vmsd != NULL) {
653 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
654 }
655 }
656
657 #if defined(CONFIG_USER_ONLY)
658 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
659 {
660 tb_invalidate_phys_page_range(pc, pc + 1, 0);
661 }
662 #else
663 static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
664 {
665 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
666 if (phys != -1) {
667 tb_invalidate_phys_addr(cpu->as,
668 phys | (pc & ~TARGET_PAGE_MASK));
669 }
670 }
671 #endif
672
673 #if defined(CONFIG_USER_ONLY)
674 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
675
676 {
677 }
678
679 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
680 int flags)
681 {
682 return -ENOSYS;
683 }
684
685 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
686 {
687 }
688
689 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
690 int flags, CPUWatchpoint **watchpoint)
691 {
692 return -ENOSYS;
693 }
694 #else
695 /* Add a watchpoint. */
696 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
697 int flags, CPUWatchpoint **watchpoint)
698 {
699 CPUWatchpoint *wp;
700
701 /* forbid ranges which are empty or run off the end of the address space */
702 if (len == 0 || (addr + len - 1) < addr) {
703 error_report("tried to set invalid watchpoint at %"
704 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
705 return -EINVAL;
706 }
707 wp = g_malloc(sizeof(*wp));
708
709 wp->vaddr = addr;
710 wp->len = len;
711 wp->flags = flags;
712
713 /* keep all GDB-injected watchpoints in front */
714 if (flags & BP_GDB) {
715 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
716 } else {
717 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
718 }
719
720 tlb_flush_page(cpu, addr);
721
722 if (watchpoint)
723 *watchpoint = wp;
724 return 0;
725 }
726
727 /* Remove a specific watchpoint. */
728 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
729 int flags)
730 {
731 CPUWatchpoint *wp;
732
733 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
734 if (addr == wp->vaddr && len == wp->len
735 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
736 cpu_watchpoint_remove_by_ref(cpu, wp);
737 return 0;
738 }
739 }
740 return -ENOENT;
741 }
742
743 /* Remove a specific watchpoint by reference. */
744 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
745 {
746 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
747
748 tlb_flush_page(cpu, watchpoint->vaddr);
749
750 g_free(watchpoint);
751 }
752
753 /* Remove all matching watchpoints. */
754 void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
755 {
756 CPUWatchpoint *wp, *next;
757
758 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
759 if (wp->flags & mask) {
760 cpu_watchpoint_remove_by_ref(cpu, wp);
761 }
762 }
763 }
764
765 /* Return true if this watchpoint address matches the specified
766 * access (ie the address range covered by the watchpoint overlaps
767 * partially or completely with the address range covered by the
768 * access).
769 */
770 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
771 vaddr addr,
772 vaddr len)
773 {
774 /* We know the lengths are non-zero, but a little caution is
775 * required to avoid errors in the case where the range ends
776 * exactly at the top of the address space and so addr + len
777 * wraps round to zero.
778 */
779 vaddr wpend = wp->vaddr + wp->len - 1;
780 vaddr addrend = addr + len - 1;
781
782 return !(addr > wpend || wp->vaddr > addrend);
783 }
784
785 #endif
786
787 /* Add a breakpoint. */
788 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
789 CPUBreakpoint **breakpoint)
790 {
791 CPUBreakpoint *bp;
792
793 bp = g_malloc(sizeof(*bp));
794
795 bp->pc = pc;
796 bp->flags = flags;
797
798 /* keep all GDB-injected breakpoints in front */
799 if (flags & BP_GDB) {
800 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
801 } else {
802 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
803 }
804
805 breakpoint_invalidate(cpu, pc);
806
807 if (breakpoint) {
808 *breakpoint = bp;
809 }
810 return 0;
811 }
812
813 /* Remove a specific breakpoint. */
814 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
815 {
816 CPUBreakpoint *bp;
817
818 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
819 if (bp->pc == pc && bp->flags == flags) {
820 cpu_breakpoint_remove_by_ref(cpu, bp);
821 return 0;
822 }
823 }
824 return -ENOENT;
825 }
826
827 /* Remove a specific breakpoint by reference. */
828 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
829 {
830 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
831
832 breakpoint_invalidate(cpu, breakpoint->pc);
833
834 g_free(breakpoint);
835 }
836
837 /* Remove all matching breakpoints. */
838 void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
839 {
840 CPUBreakpoint *bp, *next;
841
842 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
843 if (bp->flags & mask) {
844 cpu_breakpoint_remove_by_ref(cpu, bp);
845 }
846 }
847 }
848
849 /* enable or disable single step mode. EXCP_DEBUG is returned by the
850 CPU loop after each instruction */
851 void cpu_single_step(CPUState *cpu, int enabled)
852 {
853 if (cpu->singlestep_enabled != enabled) {
854 cpu->singlestep_enabled = enabled;
855 if (kvm_enabled()) {
856 kvm_update_guest_debug(cpu, 0);
857 } else {
858 /* must flush all the translated code to avoid inconsistencies */
859 /* XXX: only flush what is necessary */
860 tb_flush(cpu);
861 }
862 }
863 }
864
865 void cpu_abort(CPUState *cpu, const char *fmt, ...)
866 {
867 va_list ap;
868 va_list ap2;
869
870 va_start(ap, fmt);
871 va_copy(ap2, ap);
872 fprintf(stderr, "qemu: fatal: ");
873 vfprintf(stderr, fmt, ap);
874 fprintf(stderr, "\n");
875 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
876 if (qemu_log_enabled()) {
877 qemu_log("qemu: fatal: ");
878 qemu_log_vprintf(fmt, ap2);
879 qemu_log("\n");
880 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
881 qemu_log_flush();
882 qemu_log_close();
883 }
884 va_end(ap2);
885 va_end(ap);
886 replay_finish();
887 #if defined(CONFIG_USER_ONLY)
888 {
889 struct sigaction act;
890 sigfillset(&act.sa_mask);
891 act.sa_handler = SIG_DFL;
892 sigaction(SIGABRT, &act, NULL);
893 }
894 #endif
895 abort();
896 }
897
898 #if !defined(CONFIG_USER_ONLY)
899 /* Called from RCU critical section */
900 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
901 {
902 RAMBlock *block;
903
904 block = atomic_rcu_read(&ram_list.mru_block);
905 if (block && addr - block->offset < block->max_length) {
906 return block;
907 }
908 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
909 if (addr - block->offset < block->max_length) {
910 goto found;
911 }
912 }
913
914 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
915 abort();
916
917 found:
918 /* It is safe to write mru_block outside the iothread lock. This
919 * is what happens:
920 *
921 * mru_block = xxx
922 * rcu_read_unlock()
923 * xxx removed from list
924 * rcu_read_lock()
925 * read mru_block
926 * mru_block = NULL;
927 * call_rcu(reclaim_ramblock, xxx);
928 * rcu_read_unlock()
929 *
930 * atomic_rcu_set is not needed here. The block was already published
931 * when it was placed into the list. Here we're just making an extra
932 * copy of the pointer.
933 */
934 ram_list.mru_block = block;
935 return block;
936 }
937
938 static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
939 {
940 CPUState *cpu;
941 ram_addr_t start1;
942 RAMBlock *block;
943 ram_addr_t end;
944
945 end = TARGET_PAGE_ALIGN(start + length);
946 start &= TARGET_PAGE_MASK;
947
948 rcu_read_lock();
949 block = qemu_get_ram_block(start);
950 assert(block == qemu_get_ram_block(end - 1));
951 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
952 CPU_FOREACH(cpu) {
953 tlb_reset_dirty(cpu, start1, length);
954 }
955 rcu_read_unlock();
956 }
957
958 /* Note: start and end must be within the same ram block. */
959 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
960 ram_addr_t length,
961 unsigned client)
962 {
963 unsigned long end, page;
964 bool dirty;
965
966 if (length == 0) {
967 return false;
968 }
969
970 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
971 page = start >> TARGET_PAGE_BITS;
972 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
973 page, end - page);
974
975 if (dirty && tcg_enabled()) {
976 tlb_reset_dirty_range_all(start, length);
977 }
978
979 return dirty;
980 }
981
982 /* Called from RCU critical section */
983 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
984 MemoryRegionSection *section,
985 target_ulong vaddr,
986 hwaddr paddr, hwaddr xlat,
987 int prot,
988 target_ulong *address)
989 {
990 hwaddr iotlb;
991 CPUWatchpoint *wp;
992
993 if (memory_region_is_ram(section->mr)) {
994 /* Normal RAM. */
995 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
996 + xlat;
997 if (!section->readonly) {
998 iotlb |= PHYS_SECTION_NOTDIRTY;
999 } else {
1000 iotlb |= PHYS_SECTION_ROM;
1001 }
1002 } else {
1003 AddressSpaceDispatch *d;
1004
1005 d = atomic_rcu_read(&section->address_space->dispatch);
1006 iotlb = section - d->map.sections;
1007 iotlb += xlat;
1008 }
1009
1010 /* Make accesses to pages with watchpoints go via the
1011 watchpoint trap routines. */
1012 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1013 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
1014 /* Avoid trapping reads of pages with a write breakpoint. */
1015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1016 iotlb = PHYS_SECTION_WATCH + paddr;
1017 *address |= TLB_MMIO;
1018 break;
1019 }
1020 }
1021 }
1022
1023 return iotlb;
1024 }
1025 #endif /* defined(CONFIG_USER_ONLY) */
1026
1027 #if !defined(CONFIG_USER_ONLY)
1028
1029 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1030 uint16_t section);
1031 static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
1032
1033 static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1034 qemu_anon_ram_alloc;
1035
1036 /*
1037 * Set a custom physical guest memory alloator.
1038 * Accelerators with unusual needs may need this. Hopefully, we can
1039 * get rid of it eventually.
1040 */
1041 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
1042 {
1043 phys_mem_alloc = alloc;
1044 }
1045
1046 static uint16_t phys_section_add(PhysPageMap *map,
1047 MemoryRegionSection *section)
1048 {
1049 /* The physical section number is ORed with a page-aligned
1050 * pointer to produce the iotlb entries. Thus it should
1051 * never overflow into the page-aligned value.
1052 */
1053 assert(map->sections_nb < TARGET_PAGE_SIZE);
1054
1055 if (map->sections_nb == map->sections_nb_alloc) {
1056 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1057 map->sections = g_renew(MemoryRegionSection, map->sections,
1058 map->sections_nb_alloc);
1059 }
1060 map->sections[map->sections_nb] = *section;
1061 memory_region_ref(section->mr);
1062 return map->sections_nb++;
1063 }
1064
1065 static void phys_section_destroy(MemoryRegion *mr)
1066 {
1067 memory_region_unref(mr);
1068
1069 if (mr->subpage) {
1070 subpage_t *subpage = container_of(mr, subpage_t, iomem);
1071 object_unref(OBJECT(&subpage->iomem));
1072 g_free(subpage);
1073 }
1074 }
1075
1076 static void phys_sections_free(PhysPageMap *map)
1077 {
1078 while (map->sections_nb > 0) {
1079 MemoryRegionSection *section = &map->sections[--map->sections_nb];
1080 phys_section_destroy(section->mr);
1081 }
1082 g_free(map->sections);
1083 g_free(map->nodes);
1084 }
1085
1086 static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
1087 {
1088 subpage_t *subpage;
1089 hwaddr base = section->offset_within_address_space
1090 & TARGET_PAGE_MASK;
1091 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
1092 d->map.nodes, d->map.sections);
1093 MemoryRegionSection subsection = {
1094 .offset_within_address_space = base,
1095 .size = int128_make64(TARGET_PAGE_SIZE),
1096 };
1097 hwaddr start, end;
1098
1099 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
1100
1101 if (!(existing->mr->subpage)) {
1102 subpage = subpage_init(d->as, base);
1103 subsection.address_space = d->as;
1104 subsection.mr = &subpage->iomem;
1105 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
1106 phys_section_add(&d->map, &subsection));
1107 } else {
1108 subpage = container_of(existing->mr, subpage_t, iomem);
1109 }
1110 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
1111 end = start + int128_get64(section->size) - 1;
1112 subpage_register(subpage, start, end,
1113 phys_section_add(&d->map, section));
1114 }
1115
1116
1117 static void register_multipage(AddressSpaceDispatch *d,
1118 MemoryRegionSection *section)
1119 {
1120 hwaddr start_addr = section->offset_within_address_space;
1121 uint16_t section_index = phys_section_add(&d->map, section);
1122 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1123 TARGET_PAGE_BITS));
1124
1125 assert(num_pages);
1126 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
1127 }
1128
1129 static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
1130 {
1131 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
1132 AddressSpaceDispatch *d = as->next_dispatch;
1133 MemoryRegionSection now = *section, remain = *section;
1134 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
1135
1136 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1137 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1138 - now.offset_within_address_space;
1139
1140 now.size = int128_min(int128_make64(left), now.size);
1141 register_subpage(d, &now);
1142 } else {
1143 now.size = int128_zero();
1144 }
1145 while (int128_ne(remain.size, now.size)) {
1146 remain.size = int128_sub(remain.size, now.size);
1147 remain.offset_within_address_space += int128_get64(now.size);
1148 remain.offset_within_region += int128_get64(now.size);
1149 now = remain;
1150 if (int128_lt(remain.size, page_size)) {
1151 register_subpage(d, &now);
1152 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
1153 now.size = page_size;
1154 register_subpage(d, &now);
1155 } else {
1156 now.size = int128_and(now.size, int128_neg(page_size));
1157 register_multipage(d, &now);
1158 }
1159 }
1160 }
1161
1162 void qemu_flush_coalesced_mmio_buffer(void)
1163 {
1164 if (kvm_enabled())
1165 kvm_flush_coalesced_mmio_buffer();
1166 }
1167
1168 void qemu_mutex_lock_ramlist(void)
1169 {
1170 qemu_mutex_lock(&ram_list.mutex);
1171 }
1172
1173 void qemu_mutex_unlock_ramlist(void)
1174 {
1175 qemu_mutex_unlock(&ram_list.mutex);
1176 }
1177
1178 #ifdef __linux__
1179
1180 #include <sys/vfs.h>
1181
1182 #define HUGETLBFS_MAGIC 0x958458f6
1183
1184 static long gethugepagesize(const char *path, Error **errp)
1185 {
1186 struct statfs fs;
1187 int ret;
1188
1189 do {
1190 ret = statfs(path, &fs);
1191 } while (ret != 0 && errno == EINTR);
1192
1193 if (ret != 0) {
1194 error_setg_errno(errp, errno, "failed to get page size of file %s",
1195 path);
1196 return 0;
1197 }
1198
1199 return fs.f_bsize;
1200 }
1201
1202 static void *file_ram_alloc(RAMBlock *block,
1203 ram_addr_t memory,
1204 const char *path,
1205 Error **errp)
1206 {
1207 struct stat st;
1208 char *filename;
1209 char *sanitized_name;
1210 char *c;
1211 void *area;
1212 int fd;
1213 uint64_t hpagesize;
1214 Error *local_err = NULL;
1215
1216 hpagesize = gethugepagesize(path, &local_err);
1217 if (local_err) {
1218 error_propagate(errp, local_err);
1219 goto error;
1220 }
1221 block->mr->align = hpagesize;
1222
1223 if (memory < hpagesize) {
1224 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1225 "or larger than huge page size 0x%" PRIx64,
1226 memory, hpagesize);
1227 goto error;
1228 }
1229
1230 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1231 error_setg(errp,
1232 "host lacks kvm mmu notifiers, -mem-path unsupported");
1233 goto error;
1234 }
1235
1236 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1237 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1238 sanitized_name = g_strdup(memory_region_name(block->mr));
1239 for (c = sanitized_name; *c != '\0'; c++) {
1240 if (*c == '/') {
1241 *c = '_';
1242 }
1243 }
1244
1245 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1246 sanitized_name);
1247 g_free(sanitized_name);
1248
1249 fd = mkstemp(filename);
1250 if (fd >= 0) {
1251 unlink(filename);
1252 }
1253 g_free(filename);
1254 } else {
1255 fd = open(path, O_RDWR | O_CREAT, 0644);
1256 }
1257
1258 if (fd < 0) {
1259 error_setg_errno(errp, errno,
1260 "unable to create backing store for hugepages");
1261 goto error;
1262 }
1263
1264 memory = ROUND_UP(memory, hpagesize);
1265
1266 /*
1267 * ftruncate is not supported by hugetlbfs in older
1268 * hosts, so don't bother bailing out on errors.
1269 * If anything goes wrong with it under other filesystems,
1270 * mmap will fail.
1271 */
1272 if (ftruncate(fd, memory)) {
1273 perror("ftruncate");
1274 }
1275
1276 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
1277 if (area == MAP_FAILED) {
1278 error_setg_errno(errp, errno,
1279 "unable to map backing store for hugepages");
1280 close(fd);
1281 goto error;
1282 }
1283
1284 if (mem_prealloc) {
1285 os_mem_prealloc(fd, area, memory);
1286 }
1287
1288 block->fd = fd;
1289 return area;
1290
1291 error:
1292 return NULL;
1293 }
1294 #endif
1295
1296 /* Called with the ramlist lock held. */
1297 static ram_addr_t find_ram_offset(ram_addr_t size)
1298 {
1299 RAMBlock *block, *next_block;
1300 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
1301
1302 assert(size != 0); /* it would hand out same offset multiple times */
1303
1304 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
1305 return 0;
1306 }
1307
1308 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1309 ram_addr_t end, next = RAM_ADDR_MAX;
1310
1311 end = block->offset + block->max_length;
1312
1313 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
1314 if (next_block->offset >= end) {
1315 next = MIN(next, next_block->offset);
1316 }
1317 }
1318 if (next - end >= size && next - end < mingap) {
1319 offset = end;
1320 mingap = next - end;
1321 }
1322 }
1323
1324 if (offset == RAM_ADDR_MAX) {
1325 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1326 (uint64_t)size);
1327 abort();
1328 }
1329
1330 return offset;
1331 }
1332
1333 ram_addr_t last_ram_offset(void)
1334 {
1335 RAMBlock *block;
1336 ram_addr_t last = 0;
1337
1338 rcu_read_lock();
1339 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1340 last = MAX(last, block->offset + block->max_length);
1341 }
1342 rcu_read_unlock();
1343 return last;
1344 }
1345
1346 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1347 {
1348 int ret;
1349
1350 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1351 if (!machine_dump_guest_core(current_machine)) {
1352 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1353 if (ret) {
1354 perror("qemu_madvise");
1355 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1356 "but dump_guest_core=off specified\n");
1357 }
1358 }
1359 }
1360
1361 /* Called within an RCU critical section, or while the ramlist lock
1362 * is held.
1363 */
1364 static RAMBlock *find_ram_block(ram_addr_t addr)
1365 {
1366 RAMBlock *block;
1367
1368 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1369 if (block->offset == addr) {
1370 return block;
1371 }
1372 }
1373
1374 return NULL;
1375 }
1376
1377 const char *qemu_ram_get_idstr(RAMBlock *rb)
1378 {
1379 return rb->idstr;
1380 }
1381
1382 /* Called with iothread lock held. */
1383 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1384 {
1385 RAMBlock *new_block, *block;
1386
1387 rcu_read_lock();
1388 new_block = find_ram_block(addr);
1389 assert(new_block);
1390 assert(!new_block->idstr[0]);
1391
1392 if (dev) {
1393 char *id = qdev_get_dev_path(dev);
1394 if (id) {
1395 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1396 g_free(id);
1397 }
1398 }
1399 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1400
1401 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1402 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1403 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1404 new_block->idstr);
1405 abort();
1406 }
1407 }
1408 rcu_read_unlock();
1409 }
1410
1411 /* Called with iothread lock held. */
1412 void qemu_ram_unset_idstr(ram_addr_t addr)
1413 {
1414 RAMBlock *block;
1415
1416 /* FIXME: arch_init.c assumes that this is not called throughout
1417 * migration. Ignore the problem since hot-unplug during migration
1418 * does not work anyway.
1419 */
1420
1421 rcu_read_lock();
1422 block = find_ram_block(addr);
1423 if (block) {
1424 memset(block->idstr, 0, sizeof(block->idstr));
1425 }
1426 rcu_read_unlock();
1427 }
1428
1429 static int memory_try_enable_merging(void *addr, size_t len)
1430 {
1431 if (!machine_mem_merge(current_machine)) {
1432 /* disabled by the user */
1433 return 0;
1434 }
1435
1436 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1437 }
1438
1439 /* Only legal before guest might have detected the memory size: e.g. on
1440 * incoming migration, or right after reset.
1441 *
1442 * As memory core doesn't know how is memory accessed, it is up to
1443 * resize callback to update device state and/or add assertions to detect
1444 * misuse, if necessary.
1445 */
1446 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1447 {
1448 RAMBlock *block = find_ram_block(base);
1449
1450 assert(block);
1451
1452 newsize = HOST_PAGE_ALIGN(newsize);
1453
1454 if (block->used_length == newsize) {
1455 return 0;
1456 }
1457
1458 if (!(block->flags & RAM_RESIZEABLE)) {
1459 error_setg_errno(errp, EINVAL,
1460 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1461 " in != 0x" RAM_ADDR_FMT, block->idstr,
1462 newsize, block->used_length);
1463 return -EINVAL;
1464 }
1465
1466 if (block->max_length < newsize) {
1467 error_setg_errno(errp, EINVAL,
1468 "Length too large: %s: 0x" RAM_ADDR_FMT
1469 " > 0x" RAM_ADDR_FMT, block->idstr,
1470 newsize, block->max_length);
1471 return -EINVAL;
1472 }
1473
1474 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1475 block->used_length = newsize;
1476 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1477 DIRTY_CLIENTS_ALL);
1478 memory_region_set_size(block->mr, newsize);
1479 if (block->resized) {
1480 block->resized(block->idstr, newsize, block->host);
1481 }
1482 return 0;
1483 }
1484
1485 static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
1486 {
1487 RAMBlock *block;
1488 RAMBlock *last_block = NULL;
1489 ram_addr_t old_ram_size, new_ram_size;
1490
1491 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1492
1493 qemu_mutex_lock_ramlist();
1494 new_block->offset = find_ram_offset(new_block->max_length);
1495
1496 if (!new_block->host) {
1497 if (xen_enabled()) {
1498 xen_ram_alloc(new_block->offset, new_block->max_length,
1499 new_block->mr);
1500 } else {
1501 new_block->host = phys_mem_alloc(new_block->max_length,
1502 &new_block->mr->align);
1503 if (!new_block->host) {
1504 error_setg_errno(errp, errno,
1505 "cannot set up guest memory '%s'",
1506 memory_region_name(new_block->mr));
1507 qemu_mutex_unlock_ramlist();
1508 return -1;
1509 }
1510 memory_try_enable_merging(new_block->host, new_block->max_length);
1511 }
1512 }
1513
1514 new_ram_size = MAX(old_ram_size,
1515 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1516 if (new_ram_size > old_ram_size) {
1517 migration_bitmap_extend(old_ram_size, new_ram_size);
1518 }
1519 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1520 * QLIST (which has an RCU-friendly variant) does not have insertion at
1521 * tail, so save the last element in last_block.
1522 */
1523 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1524 last_block = block;
1525 if (block->max_length < new_block->max_length) {
1526 break;
1527 }
1528 }
1529 if (block) {
1530 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
1531 } else if (last_block) {
1532 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
1533 } else { /* list is empty */
1534 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
1535 }
1536 ram_list.mru_block = NULL;
1537
1538 /* Write list before version */
1539 smp_wmb();
1540 ram_list.version++;
1541 qemu_mutex_unlock_ramlist();
1542
1543 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1544
1545 if (new_ram_size > old_ram_size) {
1546 int i;
1547
1548 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1549 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1550 ram_list.dirty_memory[i] =
1551 bitmap_zero_extend(ram_list.dirty_memory[i],
1552 old_ram_size, new_ram_size);
1553 }
1554 }
1555 cpu_physical_memory_set_dirty_range(new_block->offset,
1556 new_block->used_length,
1557 DIRTY_CLIENTS_ALL);
1558
1559 if (new_block->host) {
1560 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1561 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1562 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1563 if (kvm_enabled()) {
1564 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1565 }
1566 }
1567
1568 return new_block->offset;
1569 }
1570
1571 #ifdef __linux__
1572 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1573 bool share, const char *mem_path,
1574 Error **errp)
1575 {
1576 RAMBlock *new_block;
1577 ram_addr_t addr;
1578 Error *local_err = NULL;
1579
1580 if (xen_enabled()) {
1581 error_setg(errp, "-mem-path not supported with Xen");
1582 return -1;
1583 }
1584
1585 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1586 /*
1587 * file_ram_alloc() needs to allocate just like
1588 * phys_mem_alloc, but we haven't bothered to provide
1589 * a hook there.
1590 */
1591 error_setg(errp,
1592 "-mem-path not supported with this accelerator");
1593 return -1;
1594 }
1595
1596 size = HOST_PAGE_ALIGN(size);
1597 new_block = g_malloc0(sizeof(*new_block));
1598 new_block->mr = mr;
1599 new_block->used_length = size;
1600 new_block->max_length = size;
1601 new_block->flags = share ? RAM_SHARED : 0;
1602 new_block->flags |= RAM_FILE;
1603 new_block->host = file_ram_alloc(new_block, size,
1604 mem_path, errp);
1605 if (!new_block->host) {
1606 g_free(new_block);
1607 return -1;
1608 }
1609
1610 addr = ram_block_add(new_block, &local_err);
1611 if (local_err) {
1612 g_free(new_block);
1613 error_propagate(errp, local_err);
1614 return -1;
1615 }
1616 return addr;
1617 }
1618 #endif
1619
1620 static
1621 ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1622 void (*resized)(const char*,
1623 uint64_t length,
1624 void *host),
1625 void *host, bool resizeable,
1626 MemoryRegion *mr, Error **errp)
1627 {
1628 RAMBlock *new_block;
1629 ram_addr_t addr;
1630 Error *local_err = NULL;
1631
1632 size = HOST_PAGE_ALIGN(size);
1633 max_size = HOST_PAGE_ALIGN(max_size);
1634 new_block = g_malloc0(sizeof(*new_block));
1635 new_block->mr = mr;
1636 new_block->resized = resized;
1637 new_block->used_length = size;
1638 new_block->max_length = max_size;
1639 assert(max_size >= size);
1640 new_block->fd = -1;
1641 new_block->host = host;
1642 if (host) {
1643 new_block->flags |= RAM_PREALLOC;
1644 }
1645 if (resizeable) {
1646 new_block->flags |= RAM_RESIZEABLE;
1647 }
1648 addr = ram_block_add(new_block, &local_err);
1649 if (local_err) {
1650 g_free(new_block);
1651 error_propagate(errp, local_err);
1652 return -1;
1653 }
1654 return addr;
1655 }
1656
1657 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1658 MemoryRegion *mr, Error **errp)
1659 {
1660 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1661 }
1662
1663 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
1664 {
1665 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1666 }
1667
1668 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1669 void (*resized)(const char*,
1670 uint64_t length,
1671 void *host),
1672 MemoryRegion *mr, Error **errp)
1673 {
1674 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
1675 }
1676
1677 void qemu_ram_free_from_ptr(ram_addr_t addr)
1678 {
1679 RAMBlock *block;
1680
1681 qemu_mutex_lock_ramlist();
1682 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1683 if (addr == block->offset) {
1684 QLIST_REMOVE_RCU(block, next);
1685 ram_list.mru_block = NULL;
1686 /* Write list before version */
1687 smp_wmb();
1688 ram_list.version++;
1689 g_free_rcu(block, rcu);
1690 break;
1691 }
1692 }
1693 qemu_mutex_unlock_ramlist();
1694 }
1695
1696 static void reclaim_ramblock(RAMBlock *block)
1697 {
1698 if (block->flags & RAM_PREALLOC) {
1699 ;
1700 } else if (xen_enabled()) {
1701 xen_invalidate_map_cache_entry(block->host);
1702 #ifndef _WIN32
1703 } else if (block->fd >= 0) {
1704 if (block->flags & RAM_FILE) {
1705 qemu_ram_munmap(block->host, block->max_length);
1706 } else {
1707 munmap(block->host, block->max_length);
1708 }
1709 close(block->fd);
1710 #endif
1711 } else {
1712 qemu_anon_ram_free(block->host, block->max_length);
1713 }
1714 g_free(block);
1715 }
1716
1717 void qemu_ram_free(ram_addr_t addr)
1718 {
1719 RAMBlock *block;
1720
1721 qemu_mutex_lock_ramlist();
1722 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1723 if (addr == block->offset) {
1724 QLIST_REMOVE_RCU(block, next);
1725 ram_list.mru_block = NULL;
1726 /* Write list before version */
1727 smp_wmb();
1728 ram_list.version++;
1729 call_rcu(block, reclaim_ramblock, rcu);
1730 break;
1731 }
1732 }
1733 qemu_mutex_unlock_ramlist();
1734 }
1735
1736 #ifndef _WIN32
1737 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1738 {
1739 RAMBlock *block;
1740 ram_addr_t offset;
1741 int flags;
1742 void *area, *vaddr;
1743
1744 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1745 offset = addr - block->offset;
1746 if (offset < block->max_length) {
1747 vaddr = ramblock_ptr(block, offset);
1748 if (block->flags & RAM_PREALLOC) {
1749 ;
1750 } else if (xen_enabled()) {
1751 abort();
1752 } else {
1753 flags = MAP_FIXED;
1754 if (block->fd >= 0) {
1755 flags |= (block->flags & RAM_SHARED ?
1756 MAP_SHARED : MAP_PRIVATE);
1757 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1758 flags, block->fd, offset);
1759 } else {
1760 /*
1761 * Remap needs to match alloc. Accelerators that
1762 * set phys_mem_alloc never remap. If they did,
1763 * we'd need a remap hook here.
1764 */
1765 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1766
1767 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1768 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1769 flags, -1, 0);
1770 }
1771 if (area != vaddr) {
1772 fprintf(stderr, "Could not remap addr: "
1773 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1774 length, addr);
1775 exit(1);
1776 }
1777 memory_try_enable_merging(vaddr, length);
1778 qemu_ram_setup_dump(vaddr, length);
1779 }
1780 }
1781 }
1782 }
1783 #endif /* !_WIN32 */
1784
1785 int qemu_get_ram_fd(ram_addr_t addr)
1786 {
1787 RAMBlock *block;
1788 int fd;
1789
1790 rcu_read_lock();
1791 block = qemu_get_ram_block(addr);
1792 fd = block->fd;
1793 rcu_read_unlock();
1794 return fd;
1795 }
1796
1797 void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1798 {
1799 RAMBlock *block;
1800 void *ptr;
1801
1802 rcu_read_lock();
1803 block = qemu_get_ram_block(addr);
1804 ptr = ramblock_ptr(block, 0);
1805 rcu_read_unlock();
1806 return ptr;
1807 }
1808
1809 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1810 * This should not be used for general purpose DMA. Use address_space_map
1811 * or address_space_rw instead. For local memory (e.g. video ram) that the
1812 * device owns, use memory_region_get_ram_ptr.
1813 *
1814 * By the time this function returns, the returned pointer is not protected
1815 * by RCU anymore. If the caller is not within an RCU critical section and
1816 * does not hold the iothread lock, it must have other means of protecting the
1817 * pointer, such as a reference to the region that includes the incoming
1818 * ram_addr_t.
1819 */
1820 void *qemu_get_ram_ptr(ram_addr_t addr)
1821 {
1822 RAMBlock *block;
1823 void *ptr;
1824
1825 rcu_read_lock();
1826 block = qemu_get_ram_block(addr);
1827
1828 if (xen_enabled() && block->host == NULL) {
1829 /* We need to check if the requested address is in the RAM
1830 * because we don't want to map the entire memory in QEMU.
1831 * In that case just map until the end of the page.
1832 */
1833 if (block->offset == 0) {
1834 ptr = xen_map_cache(addr, 0, 0);
1835 goto unlock;
1836 }
1837
1838 block->host = xen_map_cache(block->offset, block->max_length, 1);
1839 }
1840 ptr = ramblock_ptr(block, addr - block->offset);
1841
1842 unlock:
1843 rcu_read_unlock();
1844 return ptr;
1845 }
1846
1847 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1848 * but takes a size argument.
1849 *
1850 * By the time this function returns, the returned pointer is not protected
1851 * by RCU anymore. If the caller is not within an RCU critical section and
1852 * does not hold the iothread lock, it must have other means of protecting the
1853 * pointer, such as a reference to the region that includes the incoming
1854 * ram_addr_t.
1855 */
1856 static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
1857 {
1858 void *ptr;
1859 if (*size == 0) {
1860 return NULL;
1861 }
1862 if (xen_enabled()) {
1863 return xen_map_cache(addr, *size, 1);
1864 } else {
1865 RAMBlock *block;
1866 rcu_read_lock();
1867 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1868 if (addr - block->offset < block->max_length) {
1869 if (addr - block->offset + *size > block->max_length)
1870 *size = block->max_length - addr + block->offset;
1871 ptr = ramblock_ptr(block, addr - block->offset);
1872 rcu_read_unlock();
1873 return ptr;
1874 }
1875 }
1876
1877 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1878 abort();
1879 }
1880 }
1881
1882 /*
1883 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1884 * in that RAMBlock.
1885 *
1886 * ptr: Host pointer to look up
1887 * round_offset: If true round the result offset down to a page boundary
1888 * *ram_addr: set to result ram_addr
1889 * *offset: set to result offset within the RAMBlock
1890 *
1891 * Returns: RAMBlock (or NULL if not found)
1892 *
1893 * By the time this function returns, the returned pointer is not protected
1894 * by RCU anymore. If the caller is not within an RCU critical section and
1895 * does not hold the iothread lock, it must have other means of protecting the
1896 * pointer, such as a reference to the region that includes the incoming
1897 * ram_addr_t.
1898 */
1899 RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1900 ram_addr_t *ram_addr,
1901 ram_addr_t *offset)
1902 {
1903 RAMBlock *block;
1904 uint8_t *host = ptr;
1905
1906 if (xen_enabled()) {
1907 rcu_read_lock();
1908 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1909 block = qemu_get_ram_block(*ram_addr);
1910 if (block) {
1911 *offset = (host - block->host);
1912 }
1913 rcu_read_unlock();
1914 return block;
1915 }
1916
1917 rcu_read_lock();
1918 block = atomic_rcu_read(&ram_list.mru_block);
1919 if (block && block->host && host - block->host < block->max_length) {
1920 goto found;
1921 }
1922
1923 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1924 /* This case append when the block is not mapped. */
1925 if (block->host == NULL) {
1926 continue;
1927 }
1928 if (host - block->host < block->max_length) {
1929 goto found;
1930 }
1931 }
1932
1933 rcu_read_unlock();
1934 return NULL;
1935
1936 found:
1937 *offset = (host - block->host);
1938 if (round_offset) {
1939 *offset &= TARGET_PAGE_MASK;
1940 }
1941 *ram_addr = block->offset + *offset;
1942 rcu_read_unlock();
1943 return block;
1944 }
1945
1946 /*
1947 * Finds the named RAMBlock
1948 *
1949 * name: The name of RAMBlock to find
1950 *
1951 * Returns: RAMBlock (or NULL if not found)
1952 */
1953 RAMBlock *qemu_ram_block_by_name(const char *name)
1954 {
1955 RAMBlock *block;
1956
1957 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1958 if (!strcmp(name, block->idstr)) {
1959 return block;
1960 }
1961 }
1962
1963 return NULL;
1964 }
1965
1966 /* Some of the softmmu routines need to translate from a host pointer
1967 (typically a TLB entry) back to a ram offset. */
1968 MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1969 {
1970 RAMBlock *block;
1971 ram_addr_t offset; /* Not used */
1972
1973 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
1974
1975 if (!block) {
1976 return NULL;
1977 }
1978
1979 return block->mr;
1980 }
1981
1982 static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
1983 uint64_t val, unsigned size)
1984 {
1985 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1986 tb_invalidate_phys_page_fast(ram_addr, size);
1987 }
1988 switch (size) {
1989 case 1:
1990 stb_p(qemu_get_ram_ptr(ram_addr), val);
1991 break;
1992 case 2:
1993 stw_p(qemu_get_ram_ptr(ram_addr), val);
1994 break;
1995 case 4:
1996 stl_p(qemu_get_ram_ptr(ram_addr), val);
1997 break;
1998 default:
1999 abort();
2000 }
2001 /* Set both VGA and migration bits for simplicity and to remove
2002 * the notdirty callback faster.
2003 */
2004 cpu_physical_memory_set_dirty_range(ram_addr, size,
2005 DIRTY_CLIENTS_NOCODE);
2006 /* we remove the notdirty callback only if the code has been
2007 flushed */
2008 if (!cpu_physical_memory_is_clean(ram_addr)) {
2009 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
2010 }
2011 }
2012
2013 static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2014 unsigned size, bool is_write)
2015 {
2016 return is_write;
2017 }
2018
2019 static const MemoryRegionOps notdirty_mem_ops = {
2020 .write = notdirty_mem_write,
2021 .valid.accepts = notdirty_mem_accepts,
2022 .endianness = DEVICE_NATIVE_ENDIAN,
2023 };
2024
2025 /* Generate a debug exception if a watchpoint has been hit. */
2026 static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
2027 {
2028 CPUState *cpu = current_cpu;
2029 CPUArchState *env = cpu->env_ptr;
2030 target_ulong pc, cs_base;
2031 target_ulong vaddr;
2032 CPUWatchpoint *wp;
2033 int cpu_flags;
2034
2035 if (cpu->watchpoint_hit) {
2036 /* We re-entered the check after replacing the TB. Now raise
2037 * the debug interrupt so that is will trigger after the
2038 * current instruction. */
2039 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
2040 return;
2041 }
2042 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2043 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
2044 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2045 && (wp->flags & flags)) {
2046 if (flags == BP_MEM_READ) {
2047 wp->flags |= BP_WATCHPOINT_HIT_READ;
2048 } else {
2049 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2050 }
2051 wp->hitaddr = vaddr;
2052 wp->hitattrs = attrs;
2053 if (!cpu->watchpoint_hit) {
2054 cpu->watchpoint_hit = wp;
2055 tb_check_watchpoint(cpu);
2056 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2057 cpu->exception_index = EXCP_DEBUG;
2058 cpu_loop_exit(cpu);
2059 } else {
2060 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2061 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
2062 cpu_resume_from_signal(cpu, NULL);
2063 }
2064 }
2065 } else {
2066 wp->flags &= ~BP_WATCHPOINT_HIT;
2067 }
2068 }
2069 }
2070
2071 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2072 so these check for a hit then pass through to the normal out-of-line
2073 phys routines. */
2074 static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2075 unsigned size, MemTxAttrs attrs)
2076 {
2077 MemTxResult res;
2078 uint64_t data;
2079
2080 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
2081 switch (size) {
2082 case 1:
2083 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
2084 break;
2085 case 2:
2086 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
2087 break;
2088 case 4:
2089 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
2090 break;
2091 default: abort();
2092 }
2093 *pdata = data;
2094 return res;
2095 }
2096
2097 static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2098 uint64_t val, unsigned size,
2099 MemTxAttrs attrs)
2100 {
2101 MemTxResult res;
2102
2103 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2104 switch (size) {
2105 case 1:
2106 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2107 break;
2108 case 2:
2109 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2110 break;
2111 case 4:
2112 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2113 break;
2114 default: abort();
2115 }
2116 return res;
2117 }
2118
2119 static const MemoryRegionOps watch_mem_ops = {
2120 .read_with_attrs = watch_mem_read,
2121 .write_with_attrs = watch_mem_write,
2122 .endianness = DEVICE_NATIVE_ENDIAN,
2123 };
2124
2125 static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2126 unsigned len, MemTxAttrs attrs)
2127 {
2128 subpage_t *subpage = opaque;
2129 uint8_t buf[8];
2130 MemTxResult res;
2131
2132 #if defined(DEBUG_SUBPAGE)
2133 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
2134 subpage, len, addr);
2135 #endif
2136 res = address_space_read(subpage->as, addr + subpage->base,
2137 attrs, buf, len);
2138 if (res) {
2139 return res;
2140 }
2141 switch (len) {
2142 case 1:
2143 *data = ldub_p(buf);
2144 return MEMTX_OK;
2145 case 2:
2146 *data = lduw_p(buf);
2147 return MEMTX_OK;
2148 case 4:
2149 *data = ldl_p(buf);
2150 return MEMTX_OK;
2151 case 8:
2152 *data = ldq_p(buf);
2153 return MEMTX_OK;
2154 default:
2155 abort();
2156 }
2157 }
2158
2159 static MemTxResult subpage_write(void *opaque, hwaddr addr,
2160 uint64_t value, unsigned len, MemTxAttrs attrs)
2161 {
2162 subpage_t *subpage = opaque;
2163 uint8_t buf[8];
2164
2165 #if defined(DEBUG_SUBPAGE)
2166 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2167 " value %"PRIx64"\n",
2168 __func__, subpage, len, addr, value);
2169 #endif
2170 switch (len) {
2171 case 1:
2172 stb_p(buf, value);
2173 break;
2174 case 2:
2175 stw_p(buf, value);
2176 break;
2177 case 4:
2178 stl_p(buf, value);
2179 break;
2180 case 8:
2181 stq_p(buf, value);
2182 break;
2183 default:
2184 abort();
2185 }
2186 return address_space_write(subpage->as, addr + subpage->base,
2187 attrs, buf, len);
2188 }
2189
2190 static bool subpage_accepts(void *opaque, hwaddr addr,
2191 unsigned len, bool is_write)
2192 {
2193 subpage_t *subpage = opaque;
2194 #if defined(DEBUG_SUBPAGE)
2195 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
2196 __func__, subpage, is_write ? 'w' : 'r', len, addr);
2197 #endif
2198
2199 return address_space_access_valid(subpage->as, addr + subpage->base,
2200 len, is_write);
2201 }
2202
2203 static const MemoryRegionOps subpage_ops = {
2204 .read_with_attrs = subpage_read,
2205 .write_with_attrs = subpage_write,
2206 .impl.min_access_size = 1,
2207 .impl.max_access_size = 8,
2208 .valid.min_access_size = 1,
2209 .valid.max_access_size = 8,
2210 .valid.accepts = subpage_accepts,
2211 .endianness = DEVICE_NATIVE_ENDIAN,
2212 };
2213
2214 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2215 uint16_t section)
2216 {
2217 int idx, eidx;
2218
2219 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2220 return -1;
2221 idx = SUBPAGE_IDX(start);
2222 eidx = SUBPAGE_IDX(end);
2223 #if defined(DEBUG_SUBPAGE)
2224 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2225 __func__, mmio, start, end, idx, eidx, section);
2226 #endif
2227 for (; idx <= eidx; idx++) {
2228 mmio->sub_section[idx] = section;
2229 }
2230
2231 return 0;
2232 }
2233
2234 static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
2235 {
2236 subpage_t *mmio;
2237
2238 mmio = g_malloc0(sizeof(subpage_t));
2239
2240 mmio->as = as;
2241 mmio->base = base;
2242 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
2243 NULL, TARGET_PAGE_SIZE);
2244 mmio->iomem.subpage = true;
2245 #if defined(DEBUG_SUBPAGE)
2246 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2247 mmio, base, TARGET_PAGE_SIZE);
2248 #endif
2249 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
2250
2251 return mmio;
2252 }
2253
2254 static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2255 MemoryRegion *mr)
2256 {
2257 assert(as);
2258 MemoryRegionSection section = {
2259 .address_space = as,
2260 .mr = mr,
2261 .offset_within_address_space = 0,
2262 .offset_within_region = 0,
2263 .size = int128_2_64(),
2264 };
2265
2266 return phys_section_add(map, &section);
2267 }
2268
2269 MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
2270 {
2271 CPUAddressSpace *cpuas = &cpu->cpu_ases[0];
2272 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
2273 MemoryRegionSection *sections = d->map.sections;
2274
2275 return sections[index & ~TARGET_PAGE_MASK].mr;
2276 }
2277
2278 static void io_mem_init(void)
2279 {
2280 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2281 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
2282 NULL, UINT64_MAX);
2283 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
2284 NULL, UINT64_MAX);
2285 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
2286 NULL, UINT64_MAX);
2287 }
2288
2289 static void mem_begin(MemoryListener *listener)
2290 {
2291 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2292 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2293 uint16_t n;
2294
2295 n = dummy_section(&d->map, as, &io_mem_unassigned);
2296 assert(n == PHYS_SECTION_UNASSIGNED);
2297 n = dummy_section(&d->map, as, &io_mem_notdirty);
2298 assert(n == PHYS_SECTION_NOTDIRTY);
2299 n = dummy_section(&d->map, as, &io_mem_rom);
2300 assert(n == PHYS_SECTION_ROM);
2301 n = dummy_section(&d->map, as, &io_mem_watch);
2302 assert(n == PHYS_SECTION_WATCH);
2303
2304 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
2305 d->as = as;
2306 as->next_dispatch = d;
2307 }
2308
2309 static void address_space_dispatch_free(AddressSpaceDispatch *d)
2310 {
2311 phys_sections_free(&d->map);
2312 g_free(d);
2313 }
2314
2315 static void mem_commit(MemoryListener *listener)
2316 {
2317 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
2318 AddressSpaceDispatch *cur = as->dispatch;
2319 AddressSpaceDispatch *next = as->next_dispatch;
2320
2321 phys_page_compact_all(next, next->map.nodes_nb);
2322
2323 atomic_rcu_set(&as->dispatch, next);
2324 if (cur) {
2325 call_rcu(cur, address_space_dispatch_free, rcu);
2326 }
2327 }
2328
2329 static void tcg_commit(MemoryListener *listener)
2330 {
2331 CPUAddressSpace *cpuas;
2332 AddressSpaceDispatch *d;
2333
2334 /* since each CPU stores ram addresses in its TLB cache, we must
2335 reset the modified entries */
2336 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2337 cpu_reloading_memory_map();
2338 /* The CPU and TLB are protected by the iothread lock.
2339 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2340 * may have split the RCU critical section.
2341 */
2342 d = atomic_rcu_read(&cpuas->as->dispatch);
2343 cpuas->memory_dispatch = d;
2344 tlb_flush(cpuas->cpu, 1);
2345 }
2346
2347 void address_space_init_dispatch(AddressSpace *as)
2348 {
2349 as->dispatch = NULL;
2350 as->dispatch_listener = (MemoryListener) {
2351 .begin = mem_begin,
2352 .commit = mem_commit,
2353 .region_add = mem_add,
2354 .region_nop = mem_add,
2355 .priority = 0,
2356 };
2357 memory_listener_register(&as->dispatch_listener, as);
2358 }
2359
2360 void address_space_unregister(AddressSpace *as)
2361 {
2362 memory_listener_unregister(&as->dispatch_listener);
2363 }
2364
2365 void address_space_destroy_dispatch(AddressSpace *as)
2366 {
2367 AddressSpaceDispatch *d = as->dispatch;
2368
2369 atomic_rcu_set(&as->dispatch, NULL);
2370 if (d) {
2371 call_rcu(d, address_space_dispatch_free, rcu);
2372 }
2373 }
2374
2375 static void memory_map_init(void)
2376 {
2377 system_memory = g_malloc(sizeof(*system_memory));
2378
2379 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
2380 address_space_init(&address_space_memory, system_memory, "memory");
2381
2382 system_io = g_malloc(sizeof(*system_io));
2383 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2384 65536);
2385 address_space_init(&address_space_io, system_io, "I/O");
2386 }
2387
2388 MemoryRegion *get_system_memory(void)
2389 {
2390 return system_memory;
2391 }
2392
2393 MemoryRegion *get_system_io(void)
2394 {
2395 return system_io;
2396 }
2397
2398 #endif /* !defined(CONFIG_USER_ONLY) */
2399
2400 /* physical memory access (slow version, mainly for debug) */
2401 #if defined(CONFIG_USER_ONLY)
2402 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2403 uint8_t *buf, int len, int is_write)
2404 {
2405 int l, flags;
2406 target_ulong page;
2407 void * p;
2408
2409 while (len > 0) {
2410 page = addr & TARGET_PAGE_MASK;
2411 l = (page + TARGET_PAGE_SIZE) - addr;
2412 if (l > len)
2413 l = len;
2414 flags = page_get_flags(page);
2415 if (!(flags & PAGE_VALID))
2416 return -1;
2417 if (is_write) {
2418 if (!(flags & PAGE_WRITE))
2419 return -1;
2420 /* XXX: this code should not depend on lock_user */
2421 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2422 return -1;
2423 memcpy(p, buf, l);
2424 unlock_user(p, addr, l);
2425 } else {
2426 if (!(flags & PAGE_READ))
2427 return -1;
2428 /* XXX: this code should not depend on lock_user */
2429 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2430 return -1;
2431 memcpy(buf, p, l);
2432 unlock_user(p, addr, 0);
2433 }
2434 len -= l;
2435 buf += l;
2436 addr += l;
2437 }
2438 return 0;
2439 }
2440
2441 #else
2442
2443 static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
2444 hwaddr length)
2445 {
2446 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2447 /* No early return if dirty_log_mask is or becomes 0, because
2448 * cpu_physical_memory_set_dirty_range will still call
2449 * xen_modified_memory.
2450 */
2451 if (dirty_log_mask) {
2452 dirty_log_mask =
2453 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
2454 }
2455 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2456 tb_invalidate_phys_range(addr, addr + length);
2457 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2458 }
2459 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
2460 }
2461
2462 static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
2463 {
2464 unsigned access_size_max = mr->ops->valid.max_access_size;
2465
2466 /* Regions are assumed to support 1-4 byte accesses unless
2467 otherwise specified. */
2468 if (access_size_max == 0) {
2469 access_size_max = 4;
2470 }
2471
2472 /* Bound the maximum access by the alignment of the address. */
2473 if (!mr->ops->impl.unaligned) {
2474 unsigned align_size_max = addr & -addr;
2475 if (align_size_max != 0 && align_size_max < access_size_max) {
2476 access_size_max = align_size_max;
2477 }
2478 }
2479
2480 /* Don't attempt accesses larger than the maximum. */
2481 if (l > access_size_max) {
2482 l = access_size_max;
2483 }
2484 l = pow2floor(l);
2485
2486 return l;
2487 }
2488
2489 static bool prepare_mmio_access(MemoryRegion *mr)
2490 {
2491 bool unlocked = !qemu_mutex_iothread_locked();
2492 bool release_lock = false;
2493
2494 if (unlocked && mr->global_locking) {
2495 qemu_mutex_lock_iothread();
2496 unlocked = false;
2497 release_lock = true;
2498 }
2499 if (mr->flush_coalesced_mmio) {
2500 if (unlocked) {
2501 qemu_mutex_lock_iothread();
2502 }
2503 qemu_flush_coalesced_mmio_buffer();
2504 if (unlocked) {
2505 qemu_mutex_unlock_iothread();
2506 }
2507 }
2508
2509 return release_lock;
2510 }
2511
2512 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2513 uint8_t *buf, int len, bool is_write)
2514 {
2515 hwaddr l;
2516 uint8_t *ptr;
2517 uint64_t val;
2518 hwaddr addr1;
2519 MemoryRegion *mr;
2520 MemTxResult result = MEMTX_OK;
2521 bool release_lock = false;
2522
2523 rcu_read_lock();
2524 while (len > 0) {
2525 l = len;
2526 mr = address_space_translate(as, addr, &addr1, &l, is_write);
2527
2528 if (is_write) {
2529 if (!memory_access_is_direct(mr, is_write)) {
2530 release_lock |= prepare_mmio_access(mr);
2531 l = memory_access_size(mr, l, addr1);
2532 /* XXX: could force current_cpu to NULL to avoid
2533 potential bugs */
2534 switch (l) {
2535 case 8:
2536 /* 64 bit write access */
2537 val = ldq_p(buf);
2538 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2539 attrs);
2540 break;
2541 case 4:
2542 /* 32 bit write access */
2543 val = ldl_p(buf);
2544 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2545 attrs);
2546 break;
2547 case 2:
2548 /* 16 bit write access */
2549 val = lduw_p(buf);
2550 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2551 attrs);
2552 break;
2553 case 1:
2554 /* 8 bit write access */
2555 val = ldub_p(buf);
2556 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2557 attrs);
2558 break;
2559 default:
2560 abort();
2561 }
2562 } else {
2563 addr1 += memory_region_get_ram_addr(mr);
2564 /* RAM case */
2565 ptr = qemu_get_ram_ptr(addr1);
2566 memcpy(ptr, buf, l);
2567 invalidate_and_set_dirty(mr, addr1, l);
2568 }
2569 } else {
2570 if (!memory_access_is_direct(mr, is_write)) {
2571 /* I/O case */
2572 release_lock |= prepare_mmio_access(mr);
2573 l = memory_access_size(mr, l, addr1);
2574 switch (l) {
2575 case 8:
2576 /* 64 bit read access */
2577 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2578 attrs);
2579 stq_p(buf, val);
2580 break;
2581 case 4:
2582 /* 32 bit read access */
2583 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2584 attrs);
2585 stl_p(buf, val);
2586 break;
2587 case 2:
2588 /* 16 bit read access */
2589 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2590 attrs);
2591 stw_p(buf, val);
2592 break;
2593 case 1:
2594 /* 8 bit read access */
2595 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2596 attrs);
2597 stb_p(buf, val);
2598 break;
2599 default:
2600 abort();
2601 }
2602 } else {
2603 /* RAM case */
2604 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
2605 memcpy(buf, ptr, l);
2606 }
2607 }
2608
2609 if (release_lock) {
2610 qemu_mutex_unlock_iothread();
2611 release_lock = false;
2612 }
2613
2614 len -= l;
2615 buf += l;
2616 addr += l;
2617 }
2618 rcu_read_unlock();
2619
2620 return result;
2621 }
2622
2623 MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2624 const uint8_t *buf, int len)
2625 {
2626 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
2627 }
2628
2629 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2630 uint8_t *buf, int len)
2631 {
2632 return address_space_rw(as, addr, attrs, buf, len, false);
2633 }
2634
2635
2636 void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
2637 int len, int is_write)
2638 {
2639 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2640 buf, len, is_write);
2641 }
2642
2643 enum write_rom_type {
2644 WRITE_DATA,
2645 FLUSH_CACHE,
2646 };
2647
2648 static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
2649 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
2650 {
2651 hwaddr l;
2652 uint8_t *ptr;
2653 hwaddr addr1;
2654 MemoryRegion *mr;
2655
2656 rcu_read_lock();
2657 while (len > 0) {
2658 l = len;
2659 mr = address_space_translate(as, addr, &addr1, &l, true);
2660
2661 if (!(memory_region_is_ram(mr) ||
2662 memory_region_is_romd(mr))) {
2663 l = memory_access_size(mr, l, addr1);
2664 } else {
2665 addr1 += memory_region_get_ram_addr(mr);
2666 /* ROM/RAM case */
2667 ptr = qemu_get_ram_ptr(addr1);
2668 switch (type) {
2669 case WRITE_DATA:
2670 memcpy(ptr, buf, l);
2671 invalidate_and_set_dirty(mr, addr1, l);
2672 break;
2673 case FLUSH_CACHE:
2674 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2675 break;
2676 }
2677 }
2678 len -= l;
2679 buf += l;
2680 addr += l;
2681 }
2682 rcu_read_unlock();
2683 }
2684
2685 /* used for ROM loading : can write in RAM and ROM */
2686 void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
2687 const uint8_t *buf, int len)
2688 {
2689 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
2690 }
2691
2692 void cpu_flush_icache_range(hwaddr start, int len)
2693 {
2694 /*
2695 * This function should do the same thing as an icache flush that was
2696 * triggered from within the guest. For TCG we are always cache coherent,
2697 * so there is no need to flush anything. For KVM / Xen we need to flush
2698 * the host's instruction cache at least.
2699 */
2700 if (tcg_enabled()) {
2701 return;
2702 }
2703
2704 cpu_physical_memory_write_rom_internal(&address_space_memory,
2705 start, NULL, len, FLUSH_CACHE);
2706 }
2707
2708 typedef struct {
2709 MemoryRegion *mr;
2710 void *buffer;
2711 hwaddr addr;
2712 hwaddr len;
2713 bool in_use;
2714 } BounceBuffer;
2715
2716 static BounceBuffer bounce;
2717
2718 typedef struct MapClient {
2719 QEMUBH *bh;
2720 QLIST_ENTRY(MapClient) link;
2721 } MapClient;
2722
2723 QemuMutex map_client_list_lock;
2724 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2725 = QLIST_HEAD_INITIALIZER(map_client_list);
2726
2727 static void cpu_unregister_map_client_do(MapClient *client)
2728 {
2729 QLIST_REMOVE(client, link);
2730 g_free(client);
2731 }
2732
2733 static void cpu_notify_map_clients_locked(void)
2734 {
2735 MapClient *client;
2736
2737 while (!QLIST_EMPTY(&map_client_list)) {
2738 client = QLIST_FIRST(&map_client_list);
2739 qemu_bh_schedule(client->bh);
2740 cpu_unregister_map_client_do(client);
2741 }
2742 }
2743
2744 void cpu_register_map_client(QEMUBH *bh)
2745 {
2746 MapClient *client = g_malloc(sizeof(*client));
2747
2748 qemu_mutex_lock(&map_client_list_lock);
2749 client->bh = bh;
2750 QLIST_INSERT_HEAD(&map_client_list, client, link);
2751 if (!atomic_read(&bounce.in_use)) {
2752 cpu_notify_map_clients_locked();
2753 }
2754 qemu_mutex_unlock(&map_client_list_lock);
2755 }
2756
2757 void cpu_exec_init_all(void)
2758 {
2759 qemu_mutex_init(&ram_list.mutex);
2760 io_mem_init();
2761 memory_map_init();
2762 qemu_mutex_init(&map_client_list_lock);
2763 }
2764
2765 void cpu_unregister_map_client(QEMUBH *bh)
2766 {
2767 MapClient *client;
2768
2769 qemu_mutex_lock(&map_client_list_lock);
2770 QLIST_FOREACH(client, &map_client_list, link) {
2771 if (client->bh == bh) {
2772 cpu_unregister_map_client_do(client);
2773 break;
2774 }
2775 }
2776 qemu_mutex_unlock(&map_client_list_lock);
2777 }
2778
2779 static void cpu_notify_map_clients(void)
2780 {
2781 qemu_mutex_lock(&map_client_list_lock);
2782 cpu_notify_map_clients_locked();
2783 qemu_mutex_unlock(&map_client_list_lock);
2784 }
2785
2786 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2787 {
2788 MemoryRegion *mr;
2789 hwaddr l, xlat;
2790
2791 rcu_read_lock();
2792 while (len > 0) {
2793 l = len;
2794 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2795 if (!memory_access_is_direct(mr, is_write)) {
2796 l = memory_access_size(mr, l, addr);
2797 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
2798 return false;
2799 }
2800 }
2801
2802 len -= l;
2803 addr += l;
2804 }
2805 rcu_read_unlock();
2806 return true;
2807 }
2808
2809 /* Map a physical memory region into a host virtual address.
2810 * May map a subset of the requested range, given by and returned in *plen.
2811 * May return NULL if resources needed to perform the mapping are exhausted.
2812 * Use only for reads OR writes - not for read-modify-write operations.
2813 * Use cpu_register_map_client() to know when retrying the map operation is
2814 * likely to succeed.
2815 */
2816 void *address_space_map(AddressSpace *as,
2817 hwaddr addr,
2818 hwaddr *plen,
2819 bool is_write)
2820 {
2821 hwaddr len = *plen;
2822 hwaddr done = 0;
2823 hwaddr l, xlat, base;
2824 MemoryRegion *mr, *this_mr;
2825 ram_addr_t raddr;
2826
2827 if (len == 0) {
2828 return NULL;
2829 }
2830
2831 l = len;
2832 rcu_read_lock();
2833 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2834
2835 if (!memory_access_is_direct(mr, is_write)) {
2836 if (atomic_xchg(&bounce.in_use, true)) {
2837 rcu_read_unlock();
2838 return NULL;
2839 }
2840 /* Avoid unbounded allocations */
2841 l = MIN(l, TARGET_PAGE_SIZE);
2842 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
2843 bounce.addr = addr;
2844 bounce.len = l;
2845
2846 memory_region_ref(mr);
2847 bounce.mr = mr;
2848 if (!is_write) {
2849 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2850 bounce.buffer, l);
2851 }
2852
2853 rcu_read_unlock();
2854 *plen = l;
2855 return bounce.buffer;
2856 }
2857
2858 base = xlat;
2859 raddr = memory_region_get_ram_addr(mr);
2860
2861 for (;;) {
2862 len -= l;
2863 addr += l;
2864 done += l;
2865 if (len == 0) {
2866 break;
2867 }
2868
2869 l = len;
2870 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2871 if (this_mr != mr || xlat != base + done) {
2872 break;
2873 }
2874 }
2875
2876 memory_region_ref(mr);
2877 rcu_read_unlock();
2878 *plen = done;
2879 return qemu_ram_ptr_length(raddr + base, plen);
2880 }
2881
2882 /* Unmaps a memory region previously mapped by address_space_map().
2883 * Will also mark the memory as dirty if is_write == 1. access_len gives
2884 * the amount of memory that was actually read or written by the caller.
2885 */
2886 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2887 int is_write, hwaddr access_len)
2888 {
2889 if (buffer != bounce.buffer) {
2890 MemoryRegion *mr;
2891 ram_addr_t addr1;
2892
2893 mr = qemu_ram_addr_from_host(buffer, &addr1);
2894 assert(mr != NULL);
2895 if (is_write) {
2896 invalidate_and_set_dirty(mr, addr1, access_len);
2897 }
2898 if (xen_enabled()) {
2899 xen_invalidate_map_cache_entry(buffer);
2900 }
2901 memory_region_unref(mr);
2902 return;
2903 }
2904 if (is_write) {
2905 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2906 bounce.buffer, access_len);
2907 }
2908 qemu_vfree(bounce.buffer);
2909 bounce.buffer = NULL;
2910 memory_region_unref(bounce.mr);
2911 atomic_mb_set(&bounce.in_use, false);
2912 cpu_notify_map_clients();
2913 }
2914
2915 void *cpu_physical_memory_map(hwaddr addr,
2916 hwaddr *plen,
2917 int is_write)
2918 {
2919 return address_space_map(&address_space_memory, addr, plen, is_write);
2920 }
2921
2922 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2923 int is_write, hwaddr access_len)
2924 {
2925 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2926 }
2927
2928 /* warning: addr must be aligned */
2929 static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2930 MemTxAttrs attrs,
2931 MemTxResult *result,
2932 enum device_endian endian)
2933 {
2934 uint8_t *ptr;
2935 uint64_t val;
2936 MemoryRegion *mr;
2937 hwaddr l = 4;
2938 hwaddr addr1;
2939 MemTxResult r;
2940 bool release_lock = false;
2941
2942 rcu_read_lock();
2943 mr = address_space_translate(as, addr, &addr1, &l, false);
2944 if (l < 4 || !memory_access_is_direct(mr, false)) {
2945 release_lock |= prepare_mmio_access(mr);
2946
2947 /* I/O case */
2948 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
2949 #if defined(TARGET_WORDS_BIGENDIAN)
2950 if (endian == DEVICE_LITTLE_ENDIAN) {
2951 val = bswap32(val);
2952 }
2953 #else
2954 if (endian == DEVICE_BIG_ENDIAN) {
2955 val = bswap32(val);
2956 }
2957 #endif
2958 } else {
2959 /* RAM case */
2960 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
2961 & TARGET_PAGE_MASK)
2962 + addr1);
2963 switch (endian) {
2964 case DEVICE_LITTLE_ENDIAN:
2965 val = ldl_le_p(ptr);
2966 break;
2967 case DEVICE_BIG_ENDIAN:
2968 val = ldl_be_p(ptr);
2969 break;
2970 default:
2971 val = ldl_p(ptr);
2972 break;
2973 }
2974 r = MEMTX_OK;
2975 }
2976 if (result) {
2977 *result = r;
2978 }
2979 if (release_lock) {
2980 qemu_mutex_unlock_iothread();
2981 }
2982 rcu_read_unlock();
2983 return val;
2984 }
2985
2986 uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2987 MemTxAttrs attrs, MemTxResult *result)
2988 {
2989 return address_space_ldl_internal(as, addr, attrs, result,
2990 DEVICE_NATIVE_ENDIAN);
2991 }
2992
2993 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2994 MemTxAttrs attrs, MemTxResult *result)
2995 {
2996 return address_space_ldl_internal(as, addr, attrs, result,
2997 DEVICE_LITTLE_ENDIAN);
2998 }
2999
3000 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3001 MemTxAttrs attrs, MemTxResult *result)
3002 {
3003 return address_space_ldl_internal(as, addr, attrs, result,
3004 DEVICE_BIG_ENDIAN);
3005 }
3006
3007 uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
3008 {
3009 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3010 }
3011
3012 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
3013 {
3014 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3015 }
3016
3017 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
3018 {
3019 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3020 }
3021
3022 /* warning: addr must be aligned */
3023 static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3024 MemTxAttrs attrs,
3025 MemTxResult *result,
3026 enum device_endian endian)
3027 {
3028 uint8_t *ptr;
3029 uint64_t val;
3030 MemoryRegion *mr;
3031 hwaddr l = 8;
3032 hwaddr addr1;
3033 MemTxResult r;
3034 bool release_lock = false;
3035
3036 rcu_read_lock();
3037 mr = address_space_translate(as, addr, &addr1, &l,
3038 false);
3039 if (l < 8 || !memory_access_is_direct(mr, false)) {
3040 release_lock |= prepare_mmio_access(mr);
3041
3042 /* I/O case */
3043 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
3044 #if defined(TARGET_WORDS_BIGENDIAN)
3045 if (endian == DEVICE_LITTLE_ENDIAN) {
3046 val = bswap64(val);
3047 }
3048 #else
3049 if (endian == DEVICE_BIG_ENDIAN) {
3050 val = bswap64(val);
3051 }
3052 #endif
3053 } else {
3054 /* RAM case */
3055 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
3056 & TARGET_PAGE_MASK)
3057 + addr1);
3058 switch (endian) {
3059 case DEVICE_LITTLE_ENDIAN:
3060 val = ldq_le_p(ptr);
3061 break;
3062 case DEVICE_BIG_ENDIAN:
3063 val = ldq_be_p(ptr);
3064 break;
3065 default:
3066 val = ldq_p(ptr);
3067 break;
3068 }
3069 r = MEMTX_OK;
3070 }
3071 if (result) {
3072 *result = r;
3073 }
3074 if (release_lock) {
3075 qemu_mutex_unlock_iothread();
3076 }
3077 rcu_read_unlock();
3078 return val;
3079 }
3080
3081 uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3082 MemTxAttrs attrs, MemTxResult *result)
3083 {
3084 return address_space_ldq_internal(as, addr, attrs, result,
3085 DEVICE_NATIVE_ENDIAN);
3086 }
3087
3088 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3089 MemTxAttrs attrs, MemTxResult *result)
3090 {
3091 return address_space_ldq_internal(as, addr, attrs, result,
3092 DEVICE_LITTLE_ENDIAN);
3093 }
3094
3095 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3096 MemTxAttrs attrs, MemTxResult *result)
3097 {
3098 return address_space_ldq_internal(as, addr, attrs, result,
3099 DEVICE_BIG_ENDIAN);
3100 }
3101
3102 uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
3103 {
3104 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3105 }
3106
3107 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
3108 {
3109 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3110 }
3111
3112 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
3113 {
3114 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3115 }
3116
3117 /* XXX: optimize */
3118 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3119 MemTxAttrs attrs, MemTxResult *result)
3120 {
3121 uint8_t val;
3122 MemTxResult r;
3123
3124 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3125 if (result) {
3126 *result = r;
3127 }
3128 return val;
3129 }
3130
3131 uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3132 {
3133 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3134 }
3135
3136 /* warning: addr must be aligned */
3137 static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3138 hwaddr addr,
3139 MemTxAttrs attrs,
3140 MemTxResult *result,
3141 enum device_endian endian)
3142 {
3143 uint8_t *ptr;
3144 uint64_t val;
3145 MemoryRegion *mr;
3146 hwaddr l = 2;
3147 hwaddr addr1;
3148 MemTxResult r;
3149 bool release_lock = false;
3150
3151 rcu_read_lock();
3152 mr = address_space_translate(as, addr, &addr1, &l,
3153 false);
3154 if (l < 2 || !memory_access_is_direct(mr, false)) {
3155 release_lock |= prepare_mmio_access(mr);
3156
3157 /* I/O case */
3158 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
3159 #if defined(TARGET_WORDS_BIGENDIAN)
3160 if (endian == DEVICE_LITTLE_ENDIAN) {
3161 val = bswap16(val);
3162 }
3163 #else
3164 if (endian == DEVICE_BIG_ENDIAN) {
3165 val = bswap16(val);
3166 }
3167 #endif
3168 } else {
3169 /* RAM case */
3170 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
3171 & TARGET_PAGE_MASK)
3172 + addr1);
3173 switch (endian) {
3174 case DEVICE_LITTLE_ENDIAN:
3175 val = lduw_le_p(ptr);
3176 break;
3177 case DEVICE_BIG_ENDIAN:
3178 val = lduw_be_p(ptr);
3179 break;
3180 default:
3181 val = lduw_p(ptr);
3182 break;
3183 }
3184 r = MEMTX_OK;
3185 }
3186 if (result) {
3187 *result = r;
3188 }
3189 if (release_lock) {
3190 qemu_mutex_unlock_iothread();
3191 }
3192 rcu_read_unlock();
3193 return val;
3194 }
3195
3196 uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3197 MemTxAttrs attrs, MemTxResult *result)
3198 {
3199 return address_space_lduw_internal(as, addr, attrs, result,
3200 DEVICE_NATIVE_ENDIAN);
3201 }
3202
3203 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3204 MemTxAttrs attrs, MemTxResult *result)
3205 {
3206 return address_space_lduw_internal(as, addr, attrs, result,
3207 DEVICE_LITTLE_ENDIAN);
3208 }
3209
3210 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3211 MemTxAttrs attrs, MemTxResult *result)
3212 {
3213 return address_space_lduw_internal(as, addr, attrs, result,
3214 DEVICE_BIG_ENDIAN);
3215 }
3216
3217 uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
3218 {
3219 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3220 }
3221
3222 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
3223 {
3224 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3225 }
3226
3227 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
3228 {
3229 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3230 }
3231
3232 /* warning: addr must be aligned. The ram page is not masked as dirty
3233 and the code inside is not invalidated. It is useful if the dirty
3234 bits are used to track modified PTEs */
3235 void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3236 MemTxAttrs attrs, MemTxResult *result)
3237 {
3238 uint8_t *ptr;
3239 MemoryRegion *mr;
3240 hwaddr l = 4;
3241 hwaddr addr1;
3242 MemTxResult r;
3243 uint8_t dirty_log_mask;
3244 bool release_lock = false;
3245
3246 rcu_read_lock();
3247 mr = address_space_translate(as, addr, &addr1, &l,
3248 true);
3249 if (l < 4 || !memory_access_is_direct(mr, true)) {
3250 release_lock |= prepare_mmio_access(mr);
3251
3252 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3253 } else {
3254 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3255 ptr = qemu_get_ram_ptr(addr1);
3256 stl_p(ptr, val);
3257
3258 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3259 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
3260 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
3261 r = MEMTX_OK;
3262 }
3263 if (result) {
3264 *result = r;
3265 }
3266 if (release_lock) {
3267 qemu_mutex_unlock_iothread();
3268 }
3269 rcu_read_unlock();
3270 }
3271
3272 void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3273 {
3274 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3275 }
3276
3277 /* warning: addr must be aligned */
3278 static inline void address_space_stl_internal(AddressSpace *as,
3279 hwaddr addr, uint32_t val,
3280 MemTxAttrs attrs,
3281 MemTxResult *result,
3282 enum device_endian endian)
3283 {
3284 uint8_t *ptr;
3285 MemoryRegion *mr;
3286 hwaddr l = 4;
3287 hwaddr addr1;
3288 MemTxResult r;
3289 bool release_lock = false;
3290
3291 rcu_read_lock();
3292 mr = address_space_translate(as, addr, &addr1, &l,
3293 true);
3294 if (l < 4 || !memory_access_is_direct(mr, true)) {
3295 release_lock |= prepare_mmio_access(mr);
3296
3297 #if defined(TARGET_WORDS_BIGENDIAN)
3298 if (endian == DEVICE_LITTLE_ENDIAN) {
3299 val = bswap32(val);
3300 }
3301 #else
3302 if (endian == DEVICE_BIG_ENDIAN) {
3303 val = bswap32(val);
3304 }
3305 #endif
3306 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
3307 } else {
3308 /* RAM case */
3309 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3310 ptr = qemu_get_ram_ptr(addr1);
3311 switch (endian) {
3312 case DEVICE_LITTLE_ENDIAN:
3313 stl_le_p(ptr, val);
3314 break;
3315 case DEVICE_BIG_ENDIAN:
3316 stl_be_p(ptr, val);
3317 break;
3318 default:
3319 stl_p(ptr, val);
3320 break;
3321 }
3322 invalidate_and_set_dirty(mr, addr1, 4);
3323 r = MEMTX_OK;
3324 }
3325 if (result) {
3326 *result = r;
3327 }
3328 if (release_lock) {
3329 qemu_mutex_unlock_iothread();
3330 }
3331 rcu_read_unlock();
3332 }
3333
3334 void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3335 MemTxAttrs attrs, MemTxResult *result)
3336 {
3337 address_space_stl_internal(as, addr, val, attrs, result,
3338 DEVICE_NATIVE_ENDIAN);
3339 }
3340
3341 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3342 MemTxAttrs attrs, MemTxResult *result)
3343 {
3344 address_space_stl_internal(as, addr, val, attrs, result,
3345 DEVICE_LITTLE_ENDIAN);
3346 }
3347
3348 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3349 MemTxAttrs attrs, MemTxResult *result)
3350 {
3351 address_space_stl_internal(as, addr, val, attrs, result,
3352 DEVICE_BIG_ENDIAN);
3353 }
3354
3355 void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3356 {
3357 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3358 }
3359
3360 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3361 {
3362 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3363 }
3364
3365 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3366 {
3367 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3368 }
3369
3370 /* XXX: optimize */
3371 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3372 MemTxAttrs attrs, MemTxResult *result)
3373 {
3374 uint8_t v = val;
3375 MemTxResult r;
3376
3377 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3378 if (result) {
3379 *result = r;
3380 }
3381 }
3382
3383 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3384 {
3385 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3386 }
3387
3388 /* warning: addr must be aligned */
3389 static inline void address_space_stw_internal(AddressSpace *as,
3390 hwaddr addr, uint32_t val,
3391 MemTxAttrs attrs,
3392 MemTxResult *result,
3393 enum device_endian endian)
3394 {
3395 uint8_t *ptr;
3396 MemoryRegion *mr;
3397 hwaddr l = 2;
3398 hwaddr addr1;
3399 MemTxResult r;
3400 bool release_lock = false;
3401
3402 rcu_read_lock();
3403 mr = address_space_translate(as, addr, &addr1, &l, true);
3404 if (l < 2 || !memory_access_is_direct(mr, true)) {
3405 release_lock |= prepare_mmio_access(mr);
3406
3407 #if defined(TARGET_WORDS_BIGENDIAN)
3408 if (endian == DEVICE_LITTLE_ENDIAN) {
3409 val = bswap16(val);
3410 }
3411 #else
3412 if (endian == DEVICE_BIG_ENDIAN) {
3413 val = bswap16(val);
3414 }
3415 #endif
3416 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
3417 } else {
3418 /* RAM case */
3419 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
3420 ptr = qemu_get_ram_ptr(addr1);
3421 switch (endian) {
3422 case DEVICE_LITTLE_ENDIAN:
3423 stw_le_p(ptr, val);
3424 break;
3425 case DEVICE_BIG_ENDIAN:
3426 stw_be_p(ptr, val);
3427 break;
3428 default:
3429 stw_p(ptr, val);
3430 break;
3431 }
3432 invalidate_and_set_dirty(mr, addr1, 2);
3433 r = MEMTX_OK;
3434 }
3435 if (result) {
3436 *result = r;
3437 }
3438 if (release_lock) {
3439 qemu_mutex_unlock_iothread();
3440 }
3441 rcu_read_unlock();
3442 }
3443
3444 void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3445 MemTxAttrs attrs, MemTxResult *result)
3446 {
3447 address_space_stw_internal(as, addr, val, attrs, result,
3448 DEVICE_NATIVE_ENDIAN);
3449 }
3450
3451 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3452 MemTxAttrs attrs, MemTxResult *result)
3453 {
3454 address_space_stw_internal(as, addr, val, attrs, result,
3455 DEVICE_LITTLE_ENDIAN);
3456 }
3457
3458 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3459 MemTxAttrs attrs, MemTxResult *result)
3460 {
3461 address_space_stw_internal(as, addr, val, attrs, result,
3462 DEVICE_BIG_ENDIAN);
3463 }
3464
3465 void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3466 {
3467 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3468 }
3469
3470 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3471 {
3472 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3473 }
3474
3475 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3476 {
3477 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3478 }
3479
3480 /* XXX: optimize */
3481 void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3482 MemTxAttrs attrs, MemTxResult *result)
3483 {
3484 MemTxResult r;
3485 val = tswap64(val);
3486 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3487 if (result) {
3488 *result = r;
3489 }
3490 }
3491
3492 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3493 MemTxAttrs attrs, MemTxResult *result)
3494 {
3495 MemTxResult r;
3496 val = cpu_to_le64(val);
3497 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3498 if (result) {
3499 *result = r;
3500 }
3501 }
3502 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3503 MemTxAttrs attrs, MemTxResult *result)
3504 {
3505 MemTxResult r;
3506 val = cpu_to_be64(val);
3507 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3508 if (result) {
3509 *result = r;
3510 }
3511 }
3512
3513 void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3514 {
3515 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3516 }
3517
3518 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3519 {
3520 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3521 }
3522
3523 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
3524 {
3525 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3526 }
3527
3528 /* virtual memory access for debug (includes writing to ROM) */
3529 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
3530 uint8_t *buf, int len, int is_write)
3531 {
3532 int l;
3533 hwaddr phys_addr;
3534 target_ulong page;
3535
3536 while (len > 0) {
3537 page = addr & TARGET_PAGE_MASK;
3538 phys_addr = cpu_get_phys_page_debug(cpu, page);
3539 /* if no physical page mapped, return an error */
3540 if (phys_addr == -1)
3541 return -1;
3542 l = (page + TARGET_PAGE_SIZE) - addr;
3543 if (l > len)
3544 l = len;
3545 phys_addr += (addr & ~TARGET_PAGE_MASK);
3546 if (is_write) {
3547 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3548 } else {
3549 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3550 buf, l, 0);
3551 }
3552 len -= l;
3553 buf += l;
3554 addr += l;
3555 }
3556 return 0;
3557 }
3558
3559 /*
3560 * Allows code that needs to deal with migration bitmaps etc to still be built
3561 * target independent.
3562 */
3563 size_t qemu_target_page_bits(void)
3564 {
3565 return TARGET_PAGE_BITS;
3566 }
3567
3568 #endif
3569
3570 /*
3571 * A helper function for the _utterly broken_ virtio device model to find out if
3572 * it's running on a big endian machine. Don't do this at home kids!
3573 */
3574 bool target_words_bigendian(void);
3575 bool target_words_bigendian(void)
3576 {
3577 #if defined(TARGET_WORDS_BIGENDIAN)
3578 return true;
3579 #else
3580 return false;
3581 #endif
3582 }
3583
3584 #ifndef CONFIG_USER_ONLY
3585 bool cpu_physical_memory_is_io(hwaddr phys_addr)
3586 {
3587 MemoryRegion*mr;
3588 hwaddr l = 1;
3589 bool res;
3590
3591 rcu_read_lock();
3592 mr = address_space_translate(&address_space_memory,
3593 phys_addr, &phys_addr, &l, false);
3594
3595 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3596 rcu_read_unlock();
3597 return res;
3598 }
3599
3600 int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3601 {
3602 RAMBlock *block;
3603 int ret = 0;
3604
3605 rcu_read_lock();
3606 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
3607 ret = func(block->idstr, block->host, block->offset,
3608 block->used_length, opaque);
3609 if (ret) {
3610 break;
3611 }
3612 }
3613 rcu_read_unlock();
3614 return ret;
3615 }
3616 #endif