4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "exec/cputlb.h"
53 #include "translate-all.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
66 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
68 static MemoryRegion
*system_memory
;
69 static MemoryRegion
*system_io
;
71 AddressSpace address_space_io
;
72 AddressSpace address_space_memory
;
74 MemoryRegion io_mem_rom
, io_mem_notdirty
;
75 static MemoryRegion io_mem_unassigned
;
77 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78 #define RAM_PREALLOC (1 << 0)
80 /* RAM is mmap-ed with MAP_SHARED */
81 #define RAM_SHARED (1 << 1)
83 /* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
86 #define RAM_RESIZEABLE (1 << 2)
90 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
91 /* current CPU in the current thread. It is only valid inside
93 DEFINE_TLS(CPUState
*, current_cpu
);
94 /* 0 = Do not count executed instructions.
95 1 = Precise instruction counting.
96 2 = Adaptive rate instruction counting. */
99 #if !defined(CONFIG_USER_ONLY)
101 typedef struct PhysPageEntry PhysPageEntry
;
103 struct PhysPageEntry
{
104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
110 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112 /* Size of the L2 (and L3, etc) page tables. */
113 #define ADDR_SPACE_BITS 64
116 #define P_L2_SIZE (1 << P_L2_BITS)
118 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120 typedef PhysPageEntry Node
[P_L2_SIZE
];
122 typedef struct PhysPageMap
{
125 unsigned sections_nb
;
126 unsigned sections_nb_alloc
;
128 unsigned nodes_nb_alloc
;
130 MemoryRegionSection
*sections
;
133 struct AddressSpaceDispatch
{
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
139 PhysPageEntry phys_map
;
144 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145 typedef struct subpage_t
{
149 uint16_t sub_section
[TARGET_PAGE_SIZE
];
152 #define PHYS_SECTION_UNASSIGNED 0
153 #define PHYS_SECTION_NOTDIRTY 1
154 #define PHYS_SECTION_ROM 2
155 #define PHYS_SECTION_WATCH 3
157 static void io_mem_init(void);
158 static void memory_map_init(void);
159 static void tcg_commit(MemoryListener
*listener
);
161 static MemoryRegion io_mem_watch
;
164 #if !defined(CONFIG_USER_ONLY)
166 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
168 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
169 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
170 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
171 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
175 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
182 ret
= map
->nodes_nb
++;
184 assert(ret
!= PHYS_MAP_NODE_NIL
);
185 assert(ret
!= map
->nodes_nb_alloc
);
187 e
.skip
= leaf
? 0 : 1;
188 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
189 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
190 memcpy(&p
[i
], &e
, sizeof(e
));
195 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
196 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
200 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
202 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
203 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
205 p
= map
->nodes
[lp
->ptr
];
206 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
208 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
209 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
215 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
221 static void phys_page_set(AddressSpaceDispatch
*d
,
222 hwaddr index
, hwaddr nb
,
225 /* Wildly overreserve - it doesn't matter much. */
226 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
228 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
231 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
234 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
236 unsigned valid_ptr
= P_L2_SIZE
;
241 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
246 for (i
= 0; i
< P_L2_SIZE
; i
++) {
247 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
254 phys_page_compact(&p
[i
], nodes
, compacted
);
258 /* We can only compress if there's only one child. */
263 assert(valid_ptr
< P_L2_SIZE
);
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
270 lp
->ptr
= p
[valid_ptr
].ptr
;
271 if (!p
[valid_ptr
].skip
) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
280 lp
->skip
+= p
[valid_ptr
].skip
;
284 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
286 DECLARE_BITMAP(compacted
, nodes_nb
);
288 if (d
->phys_map
.skip
) {
289 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
293 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
294 Node
*nodes
, MemoryRegionSection
*sections
)
297 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
300 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
301 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
302 return §ions
[PHYS_SECTION_UNASSIGNED
];
305 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
308 if (sections
[lp
.ptr
].size
.hi
||
309 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
310 sections
[lp
.ptr
].size
.lo
, addr
)) {
311 return §ions
[lp
.ptr
];
313 return §ions
[PHYS_SECTION_UNASSIGNED
];
317 bool memory_region_is_unassigned(MemoryRegion
*mr
)
319 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
320 && mr
!= &io_mem_watch
;
323 /* Called from RCU critical section */
324 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
326 bool resolve_subpage
)
328 MemoryRegionSection
*section
;
331 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
332 if (resolve_subpage
&& section
->mr
->subpage
) {
333 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
334 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
339 /* Called from RCU critical section */
340 static MemoryRegionSection
*
341 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
342 hwaddr
*plen
, bool resolve_subpage
)
344 MemoryRegionSection
*section
;
348 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
349 /* Compute offset within MemoryRegionSection */
350 addr
-= section
->offset_within_address_space
;
352 /* Compute offset within MemoryRegion */
353 *xlat
= addr
+ section
->offset_within_region
;
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
368 if (memory_region_is_ram(mr
)) {
369 diff
= int128_sub(section
->size
, int128_make64(addr
));
370 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
375 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
377 if (memory_region_is_ram(mr
)) {
378 return !(is_write
&& mr
->readonly
);
380 if (memory_region_is_romd(mr
)) {
387 /* Called from RCU critical section */
388 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
389 hwaddr
*xlat
, hwaddr
*plen
,
393 MemoryRegionSection
*section
;
397 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
398 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
401 if (!mr
->iommu_ops
) {
405 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
406 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
407 | (addr
& iotlb
.addr_mask
));
408 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
409 if (!(iotlb
.perm
& (1 << is_write
))) {
410 mr
= &io_mem_unassigned
;
414 as
= iotlb
.target_as
;
417 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
418 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
419 *plen
= MIN(page
, *plen
);
426 /* Called from RCU critical section */
427 MemoryRegionSection
*
428 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
429 hwaddr
*xlat
, hwaddr
*plen
)
431 MemoryRegionSection
*section
;
432 section
= address_space_translate_internal(cpu
->memory_dispatch
,
433 addr
, xlat
, plen
, false);
435 assert(!section
->mr
->iommu_ops
);
440 #if !defined(CONFIG_USER_ONLY)
442 static int cpu_common_post_load(void *opaque
, int version_id
)
444 CPUState
*cpu
= opaque
;
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
448 cpu
->interrupt_request
&= ~0x01;
454 static int cpu_common_pre_load(void *opaque
)
456 CPUState
*cpu
= opaque
;
458 cpu
->exception_index
= -1;
463 static bool cpu_common_exception_index_needed(void *opaque
)
465 CPUState
*cpu
= opaque
;
467 return tcg_enabled() && cpu
->exception_index
!= -1;
470 static const VMStateDescription vmstate_cpu_common_exception_index
= {
471 .name
= "cpu_common/exception_index",
473 .minimum_version_id
= 1,
474 .needed
= cpu_common_exception_index_needed
,
475 .fields
= (VMStateField
[]) {
476 VMSTATE_INT32(exception_index
, CPUState
),
477 VMSTATE_END_OF_LIST()
481 const VMStateDescription vmstate_cpu_common
= {
482 .name
= "cpu_common",
484 .minimum_version_id
= 1,
485 .pre_load
= cpu_common_pre_load
,
486 .post_load
= cpu_common_post_load
,
487 .fields
= (VMStateField
[]) {
488 VMSTATE_UINT32(halted
, CPUState
),
489 VMSTATE_UINT32(interrupt_request
, CPUState
),
490 VMSTATE_END_OF_LIST()
492 .subsections
= (const VMStateDescription
*[]) {
493 &vmstate_cpu_common_exception_index
,
500 CPUState
*qemu_get_cpu(int index
)
505 if (cpu
->cpu_index
== index
) {
513 #if !defined(CONFIG_USER_ONLY)
514 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu
->as
== as
);
519 if (cpu
->tcg_as_listener
) {
520 memory_listener_unregister(cpu
->tcg_as_listener
);
522 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
524 cpu
->tcg_as_listener
->commit
= tcg_commit
;
525 memory_listener_register(cpu
->tcg_as_listener
, as
);
529 #ifndef CONFIG_USER_ONLY
530 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
532 static int cpu_get_free_index(Error
**errp
)
534 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
536 if (cpu
>= MAX_CPUMASK_BITS
) {
537 error_setg(errp
, "Trying to use more CPUs than max of %d",
542 bitmap_set(cpu_index_map
, cpu
, 1);
546 void cpu_exec_exit(CPUState
*cpu
)
548 if (cpu
->cpu_index
== -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
553 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
558 static int cpu_get_free_index(Error
**errp
)
563 CPU_FOREACH(some_cpu
) {
569 void cpu_exec_exit(CPUState
*cpu
)
574 void cpu_exec_init(CPUArchState
*env
, Error
**errp
)
576 CPUState
*cpu
= ENV_GET_CPU(env
);
577 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
579 Error
*local_err
= NULL
;
581 #ifndef CONFIG_USER_ONLY
582 cpu
->as
= &address_space_memory
;
583 cpu
->thread_id
= qemu_get_thread_id();
584 cpu_reload_memory_map(cpu
);
587 #if defined(CONFIG_USER_ONLY)
590 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
592 error_propagate(errp
, local_err
);
593 #if defined(CONFIG_USER_ONLY)
598 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
599 #if defined(CONFIG_USER_ONLY)
602 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
603 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
605 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
607 cpu_save
, cpu_load
, env
);
608 assert(cc
->vmsd
== NULL
);
609 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
611 if (cc
->vmsd
!= NULL
) {
612 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
616 #if defined(CONFIG_USER_ONLY)
617 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
619 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
622 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
624 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
626 tb_invalidate_phys_addr(cpu
->as
,
627 phys
| (pc
& ~TARGET_PAGE_MASK
));
632 #if defined(CONFIG_USER_ONLY)
633 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
638 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
644 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
648 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
649 int flags
, CPUWatchpoint
**watchpoint
)
654 /* Add a watchpoint. */
655 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
656 int flags
, CPUWatchpoint
**watchpoint
)
660 /* forbid ranges which are empty or run off the end of the address space */
661 if (len
== 0 || (addr
+ len
- 1) < addr
) {
662 error_report("tried to set invalid watchpoint at %"
663 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
666 wp
= g_malloc(sizeof(*wp
));
672 /* keep all GDB-injected watchpoints in front */
673 if (flags
& BP_GDB
) {
674 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
676 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
679 tlb_flush_page(cpu
, addr
);
686 /* Remove a specific watchpoint. */
687 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
692 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
693 if (addr
== wp
->vaddr
&& len
== wp
->len
694 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
695 cpu_watchpoint_remove_by_ref(cpu
, wp
);
702 /* Remove a specific watchpoint by reference. */
703 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
705 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
707 tlb_flush_page(cpu
, watchpoint
->vaddr
);
712 /* Remove all matching watchpoints. */
713 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
715 CPUWatchpoint
*wp
, *next
;
717 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
718 if (wp
->flags
& mask
) {
719 cpu_watchpoint_remove_by_ref(cpu
, wp
);
724 /* Return true if this watchpoint address matches the specified
725 * access (ie the address range covered by the watchpoint overlaps
726 * partially or completely with the address range covered by the
729 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
733 /* We know the lengths are non-zero, but a little caution is
734 * required to avoid errors in the case where the range ends
735 * exactly at the top of the address space and so addr + len
736 * wraps round to zero.
738 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
739 vaddr addrend
= addr
+ len
- 1;
741 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
746 /* Add a breakpoint. */
747 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
748 CPUBreakpoint
**breakpoint
)
752 bp
= g_malloc(sizeof(*bp
));
757 /* keep all GDB-injected breakpoints in front */
758 if (flags
& BP_GDB
) {
759 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
761 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
764 breakpoint_invalidate(cpu
, pc
);
772 /* Remove a specific breakpoint. */
773 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
777 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
778 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
779 cpu_breakpoint_remove_by_ref(cpu
, bp
);
786 /* Remove a specific breakpoint by reference. */
787 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
789 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
791 breakpoint_invalidate(cpu
, breakpoint
->pc
);
796 /* Remove all matching breakpoints. */
797 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
799 CPUBreakpoint
*bp
, *next
;
801 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
802 if (bp
->flags
& mask
) {
803 cpu_breakpoint_remove_by_ref(cpu
, bp
);
808 /* enable or disable single step mode. EXCP_DEBUG is returned by the
809 CPU loop after each instruction */
810 void cpu_single_step(CPUState
*cpu
, int enabled
)
812 if (cpu
->singlestep_enabled
!= enabled
) {
813 cpu
->singlestep_enabled
= enabled
;
815 kvm_update_guest_debug(cpu
, 0);
817 /* must flush all the translated code to avoid inconsistencies */
818 /* XXX: only flush what is necessary */
819 CPUArchState
*env
= cpu
->env_ptr
;
825 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
832 fprintf(stderr
, "qemu: fatal: ");
833 vfprintf(stderr
, fmt
, ap
);
834 fprintf(stderr
, "\n");
835 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
836 if (qemu_log_enabled()) {
837 qemu_log("qemu: fatal: ");
838 qemu_log_vprintf(fmt
, ap2
);
840 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
846 #if defined(CONFIG_USER_ONLY)
848 struct sigaction act
;
849 sigfillset(&act
.sa_mask
);
850 act
.sa_handler
= SIG_DFL
;
851 sigaction(SIGABRT
, &act
, NULL
);
857 #if !defined(CONFIG_USER_ONLY)
858 /* Called from RCU critical section */
859 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
863 block
= atomic_rcu_read(&ram_list
.mru_block
);
864 if (block
&& addr
- block
->offset
< block
->max_length
) {
867 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
868 if (addr
- block
->offset
< block
->max_length
) {
873 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
877 /* It is safe to write mru_block outside the iothread lock. This
882 * xxx removed from list
886 * call_rcu(reclaim_ramblock, xxx);
889 * atomic_rcu_set is not needed here. The block was already published
890 * when it was placed into the list. Here we're just making an extra
891 * copy of the pointer.
893 ram_list
.mru_block
= block
;
897 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
903 end
= TARGET_PAGE_ALIGN(start
+ length
);
904 start
&= TARGET_PAGE_MASK
;
907 block
= qemu_get_ram_block(start
);
908 assert(block
== qemu_get_ram_block(end
- 1));
909 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
910 cpu_tlb_reset_dirty_all(start1
, length
);
914 /* Note: start and end must be within the same ram block. */
915 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
919 unsigned long end
, page
;
926 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
927 page
= start
>> TARGET_PAGE_BITS
;
928 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
931 if (dirty
&& tcg_enabled()) {
932 tlb_reset_dirty_range_all(start
, length
);
938 /* Called from RCU critical section */
939 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
940 MemoryRegionSection
*section
,
942 hwaddr paddr
, hwaddr xlat
,
944 target_ulong
*address
)
949 if (memory_region_is_ram(section
->mr
)) {
951 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
953 if (!section
->readonly
) {
954 iotlb
|= PHYS_SECTION_NOTDIRTY
;
956 iotlb
|= PHYS_SECTION_ROM
;
959 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
963 /* Make accesses to pages with watchpoints go via the
964 watchpoint trap routines. */
965 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
966 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
967 /* Avoid trapping reads of pages with a write breakpoint. */
968 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
969 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
970 *address
|= TLB_MMIO
;
978 #endif /* defined(CONFIG_USER_ONLY) */
980 #if !defined(CONFIG_USER_ONLY)
982 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
984 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
986 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
990 * Set a custom physical guest memory alloator.
991 * Accelerators with unusual needs may need this. Hopefully, we can
992 * get rid of it eventually.
994 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
996 phys_mem_alloc
= alloc
;
999 static uint16_t phys_section_add(PhysPageMap
*map
,
1000 MemoryRegionSection
*section
)
1002 /* The physical section number is ORed with a page-aligned
1003 * pointer to produce the iotlb entries. Thus it should
1004 * never overflow into the page-aligned value.
1006 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1008 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1009 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1010 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1011 map
->sections_nb_alloc
);
1013 map
->sections
[map
->sections_nb
] = *section
;
1014 memory_region_ref(section
->mr
);
1015 return map
->sections_nb
++;
1018 static void phys_section_destroy(MemoryRegion
*mr
)
1020 memory_region_unref(mr
);
1023 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1024 object_unref(OBJECT(&subpage
->iomem
));
1029 static void phys_sections_free(PhysPageMap
*map
)
1031 while (map
->sections_nb
> 0) {
1032 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1033 phys_section_destroy(section
->mr
);
1035 g_free(map
->sections
);
1039 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1042 hwaddr base
= section
->offset_within_address_space
1044 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1045 d
->map
.nodes
, d
->map
.sections
);
1046 MemoryRegionSection subsection
= {
1047 .offset_within_address_space
= base
,
1048 .size
= int128_make64(TARGET_PAGE_SIZE
),
1052 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1054 if (!(existing
->mr
->subpage
)) {
1055 subpage
= subpage_init(d
->as
, base
);
1056 subsection
.address_space
= d
->as
;
1057 subsection
.mr
= &subpage
->iomem
;
1058 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1059 phys_section_add(&d
->map
, &subsection
));
1061 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1063 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1064 end
= start
+ int128_get64(section
->size
) - 1;
1065 subpage_register(subpage
, start
, end
,
1066 phys_section_add(&d
->map
, section
));
1070 static void register_multipage(AddressSpaceDispatch
*d
,
1071 MemoryRegionSection
*section
)
1073 hwaddr start_addr
= section
->offset_within_address_space
;
1074 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1075 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1079 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1082 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1084 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1085 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1086 MemoryRegionSection now
= *section
, remain
= *section
;
1087 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1089 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1090 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1091 - now
.offset_within_address_space
;
1093 now
.size
= int128_min(int128_make64(left
), now
.size
);
1094 register_subpage(d
, &now
);
1096 now
.size
= int128_zero();
1098 while (int128_ne(remain
.size
, now
.size
)) {
1099 remain
.size
= int128_sub(remain
.size
, now
.size
);
1100 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1101 remain
.offset_within_region
+= int128_get64(now
.size
);
1103 if (int128_lt(remain
.size
, page_size
)) {
1104 register_subpage(d
, &now
);
1105 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1106 now
.size
= page_size
;
1107 register_subpage(d
, &now
);
1109 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1110 register_multipage(d
, &now
);
1115 void qemu_flush_coalesced_mmio_buffer(void)
1118 kvm_flush_coalesced_mmio_buffer();
1121 void qemu_mutex_lock_ramlist(void)
1123 qemu_mutex_lock(&ram_list
.mutex
);
1126 void qemu_mutex_unlock_ramlist(void)
1128 qemu_mutex_unlock(&ram_list
.mutex
);
1133 #include <sys/vfs.h>
1135 #define HUGETLBFS_MAGIC 0x958458f6
1137 static long gethugepagesize(const char *path
, Error
**errp
)
1143 ret
= statfs(path
, &fs
);
1144 } while (ret
!= 0 && errno
== EINTR
);
1147 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1152 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1153 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1158 static void *file_ram_alloc(RAMBlock
*block
,
1164 char *sanitized_name
;
1169 Error
*local_err
= NULL
;
1171 hpagesize
= gethugepagesize(path
, &local_err
);
1173 error_propagate(errp
, local_err
);
1176 block
->mr
->align
= hpagesize
;
1178 if (memory
< hpagesize
) {
1179 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1180 "or larger than huge page size 0x%" PRIx64
,
1185 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1187 "host lacks kvm mmu notifiers, -mem-path unsupported");
1191 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1192 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1193 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1198 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1200 g_free(sanitized_name
);
1202 fd
= mkstemp(filename
);
1204 error_setg_errno(errp
, errno
,
1205 "unable to create backing store for hugepages");
1212 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1215 * ftruncate is not supported by hugetlbfs in older
1216 * hosts, so don't bother bailing out on errors.
1217 * If anything goes wrong with it under other filesystems,
1220 if (ftruncate(fd
, memory
)) {
1221 perror("ftruncate");
1224 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1225 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1227 if (area
== MAP_FAILED
) {
1228 error_setg_errno(errp
, errno
,
1229 "unable to map backing store for hugepages");
1235 os_mem_prealloc(fd
, area
, memory
);
1243 error_report("%s", error_get_pretty(*errp
));
1250 /* Called with the ramlist lock held. */
1251 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1253 RAMBlock
*block
, *next_block
;
1254 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1256 assert(size
!= 0); /* it would hand out same offset multiple times */
1258 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1262 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1263 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1265 end
= block
->offset
+ block
->max_length
;
1267 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1268 if (next_block
->offset
>= end
) {
1269 next
= MIN(next
, next_block
->offset
);
1272 if (next
- end
>= size
&& next
- end
< mingap
) {
1274 mingap
= next
- end
;
1278 if (offset
== RAM_ADDR_MAX
) {
1279 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1287 ram_addr_t
last_ram_offset(void)
1290 ram_addr_t last
= 0;
1293 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1294 last
= MAX(last
, block
->offset
+ block
->max_length
);
1300 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1304 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1305 if (!machine_dump_guest_core(current_machine
)) {
1306 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1308 perror("qemu_madvise");
1309 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1310 "but dump_guest_core=off specified\n");
1315 /* Called within an RCU critical section, or while the ramlist lock
1318 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1322 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1323 if (block
->offset
== addr
) {
1331 /* Called with iothread lock held. */
1332 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1334 RAMBlock
*new_block
, *block
;
1337 new_block
= find_ram_block(addr
);
1339 assert(!new_block
->idstr
[0]);
1342 char *id
= qdev_get_dev_path(dev
);
1344 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1348 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1350 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1351 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1352 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1360 /* Called with iothread lock held. */
1361 void qemu_ram_unset_idstr(ram_addr_t addr
)
1365 /* FIXME: arch_init.c assumes that this is not called throughout
1366 * migration. Ignore the problem since hot-unplug during migration
1367 * does not work anyway.
1371 block
= find_ram_block(addr
);
1373 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1378 static int memory_try_enable_merging(void *addr
, size_t len
)
1380 if (!machine_mem_merge(current_machine
)) {
1381 /* disabled by the user */
1385 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1388 /* Only legal before guest might have detected the memory size: e.g. on
1389 * incoming migration, or right after reset.
1391 * As memory core doesn't know how is memory accessed, it is up to
1392 * resize callback to update device state and/or add assertions to detect
1393 * misuse, if necessary.
1395 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1397 RAMBlock
*block
= find_ram_block(base
);
1401 newsize
= TARGET_PAGE_ALIGN(newsize
);
1403 if (block
->used_length
== newsize
) {
1407 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1408 error_setg_errno(errp
, EINVAL
,
1409 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1410 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1411 newsize
, block
->used_length
);
1415 if (block
->max_length
< newsize
) {
1416 error_setg_errno(errp
, EINVAL
,
1417 "Length too large: %s: 0x" RAM_ADDR_FMT
1418 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1419 newsize
, block
->max_length
);
1423 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1424 block
->used_length
= newsize
;
1425 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1427 memory_region_set_size(block
->mr
, newsize
);
1428 if (block
->resized
) {
1429 block
->resized(block
->idstr
, newsize
, block
->host
);
1434 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1437 RAMBlock
*last_block
= NULL
;
1438 ram_addr_t old_ram_size
, new_ram_size
;
1440 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1442 qemu_mutex_lock_ramlist();
1443 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1445 if (!new_block
->host
) {
1446 if (xen_enabled()) {
1447 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1450 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1451 &new_block
->mr
->align
);
1452 if (!new_block
->host
) {
1453 error_setg_errno(errp
, errno
,
1454 "cannot set up guest memory '%s'",
1455 memory_region_name(new_block
->mr
));
1456 qemu_mutex_unlock_ramlist();
1459 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1463 new_ram_size
= MAX(old_ram_size
,
1464 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1465 if (new_ram_size
> old_ram_size
) {
1466 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1468 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1469 * QLIST (which has an RCU-friendly variant) does not have insertion at
1470 * tail, so save the last element in last_block.
1472 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1474 if (block
->max_length
< new_block
->max_length
) {
1479 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1480 } else if (last_block
) {
1481 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1482 } else { /* list is empty */
1483 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1485 ram_list
.mru_block
= NULL
;
1487 /* Write list before version */
1490 qemu_mutex_unlock_ramlist();
1492 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1494 if (new_ram_size
> old_ram_size
) {
1497 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1498 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1499 ram_list
.dirty_memory
[i
] =
1500 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1501 old_ram_size
, new_ram_size
);
1504 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1505 new_block
->used_length
,
1508 if (new_block
->host
) {
1509 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1510 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1511 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1512 if (kvm_enabled()) {
1513 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1517 return new_block
->offset
;
1521 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1522 bool share
, const char *mem_path
,
1525 RAMBlock
*new_block
;
1527 Error
*local_err
= NULL
;
1529 if (xen_enabled()) {
1530 error_setg(errp
, "-mem-path not supported with Xen");
1534 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1536 * file_ram_alloc() needs to allocate just like
1537 * phys_mem_alloc, but we haven't bothered to provide
1541 "-mem-path not supported with this accelerator");
1545 size
= TARGET_PAGE_ALIGN(size
);
1546 new_block
= g_malloc0(sizeof(*new_block
));
1548 new_block
->used_length
= size
;
1549 new_block
->max_length
= size
;
1550 new_block
->flags
= share
? RAM_SHARED
: 0;
1551 new_block
->host
= file_ram_alloc(new_block
, size
,
1553 if (!new_block
->host
) {
1558 addr
= ram_block_add(new_block
, &local_err
);
1561 error_propagate(errp
, local_err
);
1569 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1570 void (*resized
)(const char*,
1573 void *host
, bool resizeable
,
1574 MemoryRegion
*mr
, Error
**errp
)
1576 RAMBlock
*new_block
;
1578 Error
*local_err
= NULL
;
1580 size
= TARGET_PAGE_ALIGN(size
);
1581 max_size
= TARGET_PAGE_ALIGN(max_size
);
1582 new_block
= g_malloc0(sizeof(*new_block
));
1584 new_block
->resized
= resized
;
1585 new_block
->used_length
= size
;
1586 new_block
->max_length
= max_size
;
1587 assert(max_size
>= size
);
1589 new_block
->host
= host
;
1591 new_block
->flags
|= RAM_PREALLOC
;
1594 new_block
->flags
|= RAM_RESIZEABLE
;
1596 addr
= ram_block_add(new_block
, &local_err
);
1599 error_propagate(errp
, local_err
);
1605 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1606 MemoryRegion
*mr
, Error
**errp
)
1608 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1611 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1613 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1616 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1617 void (*resized
)(const char*,
1620 MemoryRegion
*mr
, Error
**errp
)
1622 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1625 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1629 qemu_mutex_lock_ramlist();
1630 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1631 if (addr
== block
->offset
) {
1632 QLIST_REMOVE_RCU(block
, next
);
1633 ram_list
.mru_block
= NULL
;
1634 /* Write list before version */
1637 g_free_rcu(block
, rcu
);
1641 qemu_mutex_unlock_ramlist();
1644 static void reclaim_ramblock(RAMBlock
*block
)
1646 if (block
->flags
& RAM_PREALLOC
) {
1648 } else if (xen_enabled()) {
1649 xen_invalidate_map_cache_entry(block
->host
);
1651 } else if (block
->fd
>= 0) {
1652 munmap(block
->host
, block
->max_length
);
1656 qemu_anon_ram_free(block
->host
, block
->max_length
);
1661 void qemu_ram_free(ram_addr_t addr
)
1665 qemu_mutex_lock_ramlist();
1666 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1667 if (addr
== block
->offset
) {
1668 QLIST_REMOVE_RCU(block
, next
);
1669 ram_list
.mru_block
= NULL
;
1670 /* Write list before version */
1673 call_rcu(block
, reclaim_ramblock
, rcu
);
1677 qemu_mutex_unlock_ramlist();
1681 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1688 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1689 offset
= addr
- block
->offset
;
1690 if (offset
< block
->max_length
) {
1691 vaddr
= ramblock_ptr(block
, offset
);
1692 if (block
->flags
& RAM_PREALLOC
) {
1694 } else if (xen_enabled()) {
1698 if (block
->fd
>= 0) {
1699 flags
|= (block
->flags
& RAM_SHARED
?
1700 MAP_SHARED
: MAP_PRIVATE
);
1701 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1702 flags
, block
->fd
, offset
);
1705 * Remap needs to match alloc. Accelerators that
1706 * set phys_mem_alloc never remap. If they did,
1707 * we'd need a remap hook here.
1709 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1711 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1712 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1715 if (area
!= vaddr
) {
1716 fprintf(stderr
, "Could not remap addr: "
1717 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1721 memory_try_enable_merging(vaddr
, length
);
1722 qemu_ram_setup_dump(vaddr
, length
);
1727 #endif /* !_WIN32 */
1729 int qemu_get_ram_fd(ram_addr_t addr
)
1735 block
= qemu_get_ram_block(addr
);
1741 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1747 block
= qemu_get_ram_block(addr
);
1748 ptr
= ramblock_ptr(block
, 0);
1753 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1754 * This should not be used for general purpose DMA. Use address_space_map
1755 * or address_space_rw instead. For local memory (e.g. video ram) that the
1756 * device owns, use memory_region_get_ram_ptr.
1758 * By the time this function returns, the returned pointer is not protected
1759 * by RCU anymore. If the caller is not within an RCU critical section and
1760 * does not hold the iothread lock, it must have other means of protecting the
1761 * pointer, such as a reference to the region that includes the incoming
1764 void *qemu_get_ram_ptr(ram_addr_t addr
)
1770 block
= qemu_get_ram_block(addr
);
1772 if (xen_enabled() && block
->host
== NULL
) {
1773 /* We need to check if the requested address is in the RAM
1774 * because we don't want to map the entire memory in QEMU.
1775 * In that case just map until the end of the page.
1777 if (block
->offset
== 0) {
1778 ptr
= xen_map_cache(addr
, 0, 0);
1782 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1784 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1791 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1792 * but takes a size argument.
1794 * By the time this function returns, the returned pointer is not protected
1795 * by RCU anymore. If the caller is not within an RCU critical section and
1796 * does not hold the iothread lock, it must have other means of protecting the
1797 * pointer, such as a reference to the region that includes the incoming
1800 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1806 if (xen_enabled()) {
1807 return xen_map_cache(addr
, *size
, 1);
1811 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1812 if (addr
- block
->offset
< block
->max_length
) {
1813 if (addr
- block
->offset
+ *size
> block
->max_length
)
1814 *size
= block
->max_length
- addr
+ block
->offset
;
1815 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1821 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1826 /* Some of the softmmu routines need to translate from a host pointer
1827 * (typically a TLB entry) back to a ram offset.
1829 * By the time this function returns, the returned pointer is not protected
1830 * by RCU anymore. If the caller is not within an RCU critical section and
1831 * does not hold the iothread lock, it must have other means of protecting the
1832 * pointer, such as a reference to the region that includes the incoming
1835 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1838 uint8_t *host
= ptr
;
1841 if (xen_enabled()) {
1843 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1844 mr
= qemu_get_ram_block(*ram_addr
)->mr
;
1850 block
= atomic_rcu_read(&ram_list
.mru_block
);
1851 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1855 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1856 /* This case append when the block is not mapped. */
1857 if (block
->host
== NULL
) {
1860 if (host
- block
->host
< block
->max_length
) {
1869 *ram_addr
= block
->offset
+ (host
- block
->host
);
1875 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1876 uint64_t val
, unsigned size
)
1878 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1879 tb_invalidate_phys_page_fast(ram_addr
, size
);
1883 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1886 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1889 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1894 /* Set both VGA and migration bits for simplicity and to remove
1895 * the notdirty callback faster.
1897 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1898 DIRTY_CLIENTS_NOCODE
);
1899 /* we remove the notdirty callback only if the code has been
1901 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1902 CPUArchState
*env
= current_cpu
->env_ptr
;
1903 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1907 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1908 unsigned size
, bool is_write
)
1913 static const MemoryRegionOps notdirty_mem_ops
= {
1914 .write
= notdirty_mem_write
,
1915 .valid
.accepts
= notdirty_mem_accepts
,
1916 .endianness
= DEVICE_NATIVE_ENDIAN
,
1919 /* Generate a debug exception if a watchpoint has been hit. */
1920 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1922 CPUState
*cpu
= current_cpu
;
1923 CPUArchState
*env
= cpu
->env_ptr
;
1924 target_ulong pc
, cs_base
;
1929 if (cpu
->watchpoint_hit
) {
1930 /* We re-entered the check after replacing the TB. Now raise
1931 * the debug interrupt so that is will trigger after the
1932 * current instruction. */
1933 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1936 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1937 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1938 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1939 && (wp
->flags
& flags
)) {
1940 if (flags
== BP_MEM_READ
) {
1941 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1943 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1945 wp
->hitaddr
= vaddr
;
1946 wp
->hitattrs
= attrs
;
1947 if (!cpu
->watchpoint_hit
) {
1948 cpu
->watchpoint_hit
= wp
;
1949 tb_check_watchpoint(cpu
);
1950 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1951 cpu
->exception_index
= EXCP_DEBUG
;
1954 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1955 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1956 cpu_resume_from_signal(cpu
, NULL
);
1960 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1965 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1966 so these check for a hit then pass through to the normal out-of-line
1968 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
1969 unsigned size
, MemTxAttrs attrs
)
1974 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
1977 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
1980 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
1983 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
1991 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
1992 uint64_t val
, unsigned size
,
1997 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
2000 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2003 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2006 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2013 static const MemoryRegionOps watch_mem_ops
= {
2014 .read_with_attrs
= watch_mem_read
,
2015 .write_with_attrs
= watch_mem_write
,
2016 .endianness
= DEVICE_NATIVE_ENDIAN
,
2019 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2020 unsigned len
, MemTxAttrs attrs
)
2022 subpage_t
*subpage
= opaque
;
2026 #if defined(DEBUG_SUBPAGE)
2027 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2028 subpage
, len
, addr
);
2030 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2037 *data
= ldub_p(buf
);
2040 *data
= lduw_p(buf
);
2053 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2054 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2056 subpage_t
*subpage
= opaque
;
2059 #if defined(DEBUG_SUBPAGE)
2060 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2061 " value %"PRIx64
"\n",
2062 __func__
, subpage
, len
, addr
, value
);
2080 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2084 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2085 unsigned len
, bool is_write
)
2087 subpage_t
*subpage
= opaque
;
2088 #if defined(DEBUG_SUBPAGE)
2089 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2090 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2093 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2097 static const MemoryRegionOps subpage_ops
= {
2098 .read_with_attrs
= subpage_read
,
2099 .write_with_attrs
= subpage_write
,
2100 .impl
.min_access_size
= 1,
2101 .impl
.max_access_size
= 8,
2102 .valid
.min_access_size
= 1,
2103 .valid
.max_access_size
= 8,
2104 .valid
.accepts
= subpage_accepts
,
2105 .endianness
= DEVICE_NATIVE_ENDIAN
,
2108 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2113 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2115 idx
= SUBPAGE_IDX(start
);
2116 eidx
= SUBPAGE_IDX(end
);
2117 #if defined(DEBUG_SUBPAGE)
2118 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2119 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2121 for (; idx
<= eidx
; idx
++) {
2122 mmio
->sub_section
[idx
] = section
;
2128 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2132 mmio
= g_malloc0(sizeof(subpage_t
));
2136 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2137 NULL
, TARGET_PAGE_SIZE
);
2138 mmio
->iomem
.subpage
= true;
2139 #if defined(DEBUG_SUBPAGE)
2140 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2141 mmio
, base
, TARGET_PAGE_SIZE
);
2143 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2148 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2152 MemoryRegionSection section
= {
2153 .address_space
= as
,
2155 .offset_within_address_space
= 0,
2156 .offset_within_region
= 0,
2157 .size
= int128_2_64(),
2160 return phys_section_add(map
, §ion
);
2163 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2165 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->memory_dispatch
);
2166 MemoryRegionSection
*sections
= d
->map
.sections
;
2168 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2171 static void io_mem_init(void)
2173 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2174 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2176 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2178 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2182 static void mem_begin(MemoryListener
*listener
)
2184 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2185 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2188 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2189 assert(n
== PHYS_SECTION_UNASSIGNED
);
2190 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2191 assert(n
== PHYS_SECTION_NOTDIRTY
);
2192 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2193 assert(n
== PHYS_SECTION_ROM
);
2194 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2195 assert(n
== PHYS_SECTION_WATCH
);
2197 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2199 as
->next_dispatch
= d
;
2202 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2204 phys_sections_free(&d
->map
);
2208 static void mem_commit(MemoryListener
*listener
)
2210 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2211 AddressSpaceDispatch
*cur
= as
->dispatch
;
2212 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2214 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2216 atomic_rcu_set(&as
->dispatch
, next
);
2218 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2222 static void tcg_commit(MemoryListener
*listener
)
2226 /* since each CPU stores ram addresses in its TLB cache, we must
2227 reset the modified entries */
2230 /* FIXME: Disentangle the cpu.h circular files deps so we can
2231 directly get the right CPU from listener. */
2232 if (cpu
->tcg_as_listener
!= listener
) {
2235 cpu_reload_memory_map(cpu
);
2239 void address_space_init_dispatch(AddressSpace
*as
)
2241 as
->dispatch
= NULL
;
2242 as
->dispatch_listener
= (MemoryListener
) {
2244 .commit
= mem_commit
,
2245 .region_add
= mem_add
,
2246 .region_nop
= mem_add
,
2249 memory_listener_register(&as
->dispatch_listener
, as
);
2252 void address_space_unregister(AddressSpace
*as
)
2254 memory_listener_unregister(&as
->dispatch_listener
);
2257 void address_space_destroy_dispatch(AddressSpace
*as
)
2259 AddressSpaceDispatch
*d
= as
->dispatch
;
2261 atomic_rcu_set(&as
->dispatch
, NULL
);
2263 call_rcu(d
, address_space_dispatch_free
, rcu
);
2267 static void memory_map_init(void)
2269 system_memory
= g_malloc(sizeof(*system_memory
));
2271 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2272 address_space_init(&address_space_memory
, system_memory
, "memory");
2274 system_io
= g_malloc(sizeof(*system_io
));
2275 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2277 address_space_init(&address_space_io
, system_io
, "I/O");
2280 MemoryRegion
*get_system_memory(void)
2282 return system_memory
;
2285 MemoryRegion
*get_system_io(void)
2290 #endif /* !defined(CONFIG_USER_ONLY) */
2292 /* physical memory access (slow version, mainly for debug) */
2293 #if defined(CONFIG_USER_ONLY)
2294 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2295 uint8_t *buf
, int len
, int is_write
)
2302 page
= addr
& TARGET_PAGE_MASK
;
2303 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2306 flags
= page_get_flags(page
);
2307 if (!(flags
& PAGE_VALID
))
2310 if (!(flags
& PAGE_WRITE
))
2312 /* XXX: this code should not depend on lock_user */
2313 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2316 unlock_user(p
, addr
, l
);
2318 if (!(flags
& PAGE_READ
))
2320 /* XXX: this code should not depend on lock_user */
2321 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2324 unlock_user(p
, addr
, 0);
2335 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2338 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2339 /* No early return if dirty_log_mask is or becomes 0, because
2340 * cpu_physical_memory_set_dirty_range will still call
2341 * xen_modified_memory.
2343 if (dirty_log_mask
) {
2345 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2347 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2348 tb_invalidate_phys_range(addr
, addr
+ length
);
2349 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2351 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2354 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2356 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2358 /* Regions are assumed to support 1-4 byte accesses unless
2359 otherwise specified. */
2360 if (access_size_max
== 0) {
2361 access_size_max
= 4;
2364 /* Bound the maximum access by the alignment of the address. */
2365 if (!mr
->ops
->impl
.unaligned
) {
2366 unsigned align_size_max
= addr
& -addr
;
2367 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2368 access_size_max
= align_size_max
;
2372 /* Don't attempt accesses larger than the maximum. */
2373 if (l
> access_size_max
) {
2374 l
= access_size_max
;
2377 l
= 1 << (qemu_fls(l
) - 1);
2383 static bool prepare_mmio_access(MemoryRegion
*mr
)
2385 bool unlocked
= !qemu_mutex_iothread_locked();
2386 bool release_lock
= false;
2388 if (unlocked
&& mr
->global_locking
) {
2389 qemu_mutex_lock_iothread();
2391 release_lock
= true;
2393 if (mr
->flush_coalesced_mmio
) {
2395 qemu_mutex_lock_iothread();
2397 qemu_flush_coalesced_mmio_buffer();
2399 qemu_mutex_unlock_iothread();
2403 return release_lock
;
2406 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2407 uint8_t *buf
, int len
, bool is_write
)
2414 MemTxResult result
= MEMTX_OK
;
2415 bool release_lock
= false;
2420 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2423 if (!memory_access_is_direct(mr
, is_write
)) {
2424 release_lock
|= prepare_mmio_access(mr
);
2425 l
= memory_access_size(mr
, l
, addr1
);
2426 /* XXX: could force current_cpu to NULL to avoid
2430 /* 64 bit write access */
2432 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2436 /* 32 bit write access */
2438 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2442 /* 16 bit write access */
2444 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2448 /* 8 bit write access */
2450 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2457 addr1
+= memory_region_get_ram_addr(mr
);
2459 ptr
= qemu_get_ram_ptr(addr1
);
2460 memcpy(ptr
, buf
, l
);
2461 invalidate_and_set_dirty(mr
, addr1
, l
);
2464 if (!memory_access_is_direct(mr
, is_write
)) {
2466 release_lock
|= prepare_mmio_access(mr
);
2467 l
= memory_access_size(mr
, l
, addr1
);
2470 /* 64 bit read access */
2471 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2476 /* 32 bit read access */
2477 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2482 /* 16 bit read access */
2483 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2488 /* 8 bit read access */
2489 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2498 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2499 memcpy(buf
, ptr
, l
);
2504 qemu_mutex_unlock_iothread();
2505 release_lock
= false;
2517 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2518 const uint8_t *buf
, int len
)
2520 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2523 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2524 uint8_t *buf
, int len
)
2526 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2530 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2531 int len
, int is_write
)
2533 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2534 buf
, len
, is_write
);
2537 enum write_rom_type
{
2542 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2543 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2553 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2555 if (!(memory_region_is_ram(mr
) ||
2556 memory_region_is_romd(mr
))) {
2557 l
= memory_access_size(mr
, l
, addr1
);
2559 addr1
+= memory_region_get_ram_addr(mr
);
2561 ptr
= qemu_get_ram_ptr(addr1
);
2564 memcpy(ptr
, buf
, l
);
2565 invalidate_and_set_dirty(mr
, addr1
, l
);
2568 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2579 /* used for ROM loading : can write in RAM and ROM */
2580 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2581 const uint8_t *buf
, int len
)
2583 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2586 void cpu_flush_icache_range(hwaddr start
, int len
)
2589 * This function should do the same thing as an icache flush that was
2590 * triggered from within the guest. For TCG we are always cache coherent,
2591 * so there is no need to flush anything. For KVM / Xen we need to flush
2592 * the host's instruction cache at least.
2594 if (tcg_enabled()) {
2598 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2599 start
, NULL
, len
, FLUSH_CACHE
);
2610 static BounceBuffer bounce
;
2612 typedef struct MapClient
{
2614 QLIST_ENTRY(MapClient
) link
;
2617 QemuMutex map_client_list_lock
;
2618 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2619 = QLIST_HEAD_INITIALIZER(map_client_list
);
2621 static void cpu_unregister_map_client_do(MapClient
*client
)
2623 QLIST_REMOVE(client
, link
);
2627 static void cpu_notify_map_clients_locked(void)
2631 while (!QLIST_EMPTY(&map_client_list
)) {
2632 client
= QLIST_FIRST(&map_client_list
);
2633 qemu_bh_schedule(client
->bh
);
2634 cpu_unregister_map_client_do(client
);
2638 void cpu_register_map_client(QEMUBH
*bh
)
2640 MapClient
*client
= g_malloc(sizeof(*client
));
2642 qemu_mutex_lock(&map_client_list_lock
);
2644 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2645 if (!atomic_read(&bounce
.in_use
)) {
2646 cpu_notify_map_clients_locked();
2648 qemu_mutex_unlock(&map_client_list_lock
);
2651 void cpu_exec_init_all(void)
2653 qemu_mutex_init(&ram_list
.mutex
);
2656 qemu_mutex_init(&map_client_list_lock
);
2659 void cpu_unregister_map_client(QEMUBH
*bh
)
2663 qemu_mutex_lock(&map_client_list_lock
);
2664 QLIST_FOREACH(client
, &map_client_list
, link
) {
2665 if (client
->bh
== bh
) {
2666 cpu_unregister_map_client_do(client
);
2670 qemu_mutex_unlock(&map_client_list_lock
);
2673 static void cpu_notify_map_clients(void)
2675 qemu_mutex_lock(&map_client_list_lock
);
2676 cpu_notify_map_clients_locked();
2677 qemu_mutex_unlock(&map_client_list_lock
);
2680 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2688 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2689 if (!memory_access_is_direct(mr
, is_write
)) {
2690 l
= memory_access_size(mr
, l
, addr
);
2691 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2703 /* Map a physical memory region into a host virtual address.
2704 * May map a subset of the requested range, given by and returned in *plen.
2705 * May return NULL if resources needed to perform the mapping are exhausted.
2706 * Use only for reads OR writes - not for read-modify-write operations.
2707 * Use cpu_register_map_client() to know when retrying the map operation is
2708 * likely to succeed.
2710 void *address_space_map(AddressSpace
*as
,
2717 hwaddr l
, xlat
, base
;
2718 MemoryRegion
*mr
, *this_mr
;
2727 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2729 if (!memory_access_is_direct(mr
, is_write
)) {
2730 if (atomic_xchg(&bounce
.in_use
, true)) {
2734 /* Avoid unbounded allocations */
2735 l
= MIN(l
, TARGET_PAGE_SIZE
);
2736 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2740 memory_region_ref(mr
);
2743 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2749 return bounce
.buffer
;
2753 raddr
= memory_region_get_ram_addr(mr
);
2764 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2765 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2770 memory_region_ref(mr
);
2773 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2776 /* Unmaps a memory region previously mapped by address_space_map().
2777 * Will also mark the memory as dirty if is_write == 1. access_len gives
2778 * the amount of memory that was actually read or written by the caller.
2780 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2781 int is_write
, hwaddr access_len
)
2783 if (buffer
!= bounce
.buffer
) {
2787 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2790 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2792 if (xen_enabled()) {
2793 xen_invalidate_map_cache_entry(buffer
);
2795 memory_region_unref(mr
);
2799 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2800 bounce
.buffer
, access_len
);
2802 qemu_vfree(bounce
.buffer
);
2803 bounce
.buffer
= NULL
;
2804 memory_region_unref(bounce
.mr
);
2805 atomic_mb_set(&bounce
.in_use
, false);
2806 cpu_notify_map_clients();
2809 void *cpu_physical_memory_map(hwaddr addr
,
2813 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2816 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2817 int is_write
, hwaddr access_len
)
2819 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2822 /* warning: addr must be aligned */
2823 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2825 MemTxResult
*result
,
2826 enum device_endian endian
)
2834 bool release_lock
= false;
2837 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2838 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2839 release_lock
|= prepare_mmio_access(mr
);
2842 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2843 #if defined(TARGET_WORDS_BIGENDIAN)
2844 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2848 if (endian
== DEVICE_BIG_ENDIAN
) {
2854 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2858 case DEVICE_LITTLE_ENDIAN
:
2859 val
= ldl_le_p(ptr
);
2861 case DEVICE_BIG_ENDIAN
:
2862 val
= ldl_be_p(ptr
);
2874 qemu_mutex_unlock_iothread();
2880 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2881 MemTxAttrs attrs
, MemTxResult
*result
)
2883 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2884 DEVICE_NATIVE_ENDIAN
);
2887 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2888 MemTxAttrs attrs
, MemTxResult
*result
)
2890 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2891 DEVICE_LITTLE_ENDIAN
);
2894 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2895 MemTxAttrs attrs
, MemTxResult
*result
)
2897 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2901 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2903 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2906 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2908 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2911 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2913 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2916 /* warning: addr must be aligned */
2917 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
2919 MemTxResult
*result
,
2920 enum device_endian endian
)
2928 bool release_lock
= false;
2931 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2933 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2934 release_lock
|= prepare_mmio_access(mr
);
2937 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
2938 #if defined(TARGET_WORDS_BIGENDIAN)
2939 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2943 if (endian
== DEVICE_BIG_ENDIAN
) {
2949 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2953 case DEVICE_LITTLE_ENDIAN
:
2954 val
= ldq_le_p(ptr
);
2956 case DEVICE_BIG_ENDIAN
:
2957 val
= ldq_be_p(ptr
);
2969 qemu_mutex_unlock_iothread();
2975 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
2976 MemTxAttrs attrs
, MemTxResult
*result
)
2978 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2979 DEVICE_NATIVE_ENDIAN
);
2982 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
2983 MemTxAttrs attrs
, MemTxResult
*result
)
2985 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2986 DEVICE_LITTLE_ENDIAN
);
2989 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
2990 MemTxAttrs attrs
, MemTxResult
*result
)
2992 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2996 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2998 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3001 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3003 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3006 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3008 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3012 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3013 MemTxAttrs attrs
, MemTxResult
*result
)
3018 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3025 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3027 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3030 /* warning: addr must be aligned */
3031 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3034 MemTxResult
*result
,
3035 enum device_endian endian
)
3043 bool release_lock
= false;
3046 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3048 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3049 release_lock
|= prepare_mmio_access(mr
);
3052 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3053 #if defined(TARGET_WORDS_BIGENDIAN)
3054 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3058 if (endian
== DEVICE_BIG_ENDIAN
) {
3064 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3068 case DEVICE_LITTLE_ENDIAN
:
3069 val
= lduw_le_p(ptr
);
3071 case DEVICE_BIG_ENDIAN
:
3072 val
= lduw_be_p(ptr
);
3084 qemu_mutex_unlock_iothread();
3090 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3091 MemTxAttrs attrs
, MemTxResult
*result
)
3093 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3094 DEVICE_NATIVE_ENDIAN
);
3097 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3098 MemTxAttrs attrs
, MemTxResult
*result
)
3100 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3101 DEVICE_LITTLE_ENDIAN
);
3104 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3105 MemTxAttrs attrs
, MemTxResult
*result
)
3107 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3111 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3113 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3116 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3118 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3121 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3123 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3126 /* warning: addr must be aligned. The ram page is not masked as dirty
3127 and the code inside is not invalidated. It is useful if the dirty
3128 bits are used to track modified PTEs */
3129 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3130 MemTxAttrs attrs
, MemTxResult
*result
)
3137 uint8_t dirty_log_mask
;
3138 bool release_lock
= false;
3141 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3143 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3144 release_lock
|= prepare_mmio_access(mr
);
3146 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3148 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3149 ptr
= qemu_get_ram_ptr(addr1
);
3152 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3153 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3154 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3161 qemu_mutex_unlock_iothread();
3166 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3168 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3171 /* warning: addr must be aligned */
3172 static inline void address_space_stl_internal(AddressSpace
*as
,
3173 hwaddr addr
, uint32_t val
,
3175 MemTxResult
*result
,
3176 enum device_endian endian
)
3183 bool release_lock
= false;
3186 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3188 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3189 release_lock
|= prepare_mmio_access(mr
);
3191 #if defined(TARGET_WORDS_BIGENDIAN)
3192 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3196 if (endian
== DEVICE_BIG_ENDIAN
) {
3200 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3203 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3204 ptr
= qemu_get_ram_ptr(addr1
);
3206 case DEVICE_LITTLE_ENDIAN
:
3209 case DEVICE_BIG_ENDIAN
:
3216 invalidate_and_set_dirty(mr
, addr1
, 4);
3223 qemu_mutex_unlock_iothread();
3228 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3229 MemTxAttrs attrs
, MemTxResult
*result
)
3231 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3232 DEVICE_NATIVE_ENDIAN
);
3235 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3236 MemTxAttrs attrs
, MemTxResult
*result
)
3238 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3239 DEVICE_LITTLE_ENDIAN
);
3242 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3243 MemTxAttrs attrs
, MemTxResult
*result
)
3245 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3249 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3251 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3254 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3256 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3259 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3261 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3265 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3266 MemTxAttrs attrs
, MemTxResult
*result
)
3271 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3277 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3279 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3282 /* warning: addr must be aligned */
3283 static inline void address_space_stw_internal(AddressSpace
*as
,
3284 hwaddr addr
, uint32_t val
,
3286 MemTxResult
*result
,
3287 enum device_endian endian
)
3294 bool release_lock
= false;
3297 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3298 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3299 release_lock
|= prepare_mmio_access(mr
);
3301 #if defined(TARGET_WORDS_BIGENDIAN)
3302 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3306 if (endian
== DEVICE_BIG_ENDIAN
) {
3310 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3313 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3314 ptr
= qemu_get_ram_ptr(addr1
);
3316 case DEVICE_LITTLE_ENDIAN
:
3319 case DEVICE_BIG_ENDIAN
:
3326 invalidate_and_set_dirty(mr
, addr1
, 2);
3333 qemu_mutex_unlock_iothread();
3338 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3339 MemTxAttrs attrs
, MemTxResult
*result
)
3341 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3342 DEVICE_NATIVE_ENDIAN
);
3345 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3346 MemTxAttrs attrs
, MemTxResult
*result
)
3348 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3349 DEVICE_LITTLE_ENDIAN
);
3352 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3353 MemTxAttrs attrs
, MemTxResult
*result
)
3355 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3359 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3361 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3364 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3366 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3369 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3371 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3375 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3376 MemTxAttrs attrs
, MemTxResult
*result
)
3380 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3386 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3387 MemTxAttrs attrs
, MemTxResult
*result
)
3390 val
= cpu_to_le64(val
);
3391 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3396 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3397 MemTxAttrs attrs
, MemTxResult
*result
)
3400 val
= cpu_to_be64(val
);
3401 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3407 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3409 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3412 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3414 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3417 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3419 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3422 /* virtual memory access for debug (includes writing to ROM) */
3423 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3424 uint8_t *buf
, int len
, int is_write
)
3431 page
= addr
& TARGET_PAGE_MASK
;
3432 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3433 /* if no physical page mapped, return an error */
3434 if (phys_addr
== -1)
3436 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3439 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3441 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3443 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3455 * A helper function for the _utterly broken_ virtio device model to find out if
3456 * it's running on a big endian machine. Don't do this at home kids!
3458 bool target_words_bigendian(void);
3459 bool target_words_bigendian(void)
3461 #if defined(TARGET_WORDS_BIGENDIAN)
3468 #ifndef CONFIG_USER_ONLY
3469 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3476 mr
= address_space_translate(&address_space_memory
,
3477 phys_addr
, &phys_addr
, &l
, false);
3479 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3484 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3490 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3491 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3492 block
->used_length
, opaque
);