4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include <sys/types.h>
25 #include "qemu-common.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "hw/boards.h"
33 #include "qemu/osdep.h"
34 #include "sysemu/kvm.h"
35 #include "sysemu/sysemu.h"
36 #include "hw/xen/xen.h"
37 #include "qemu/timer.h"
38 #include "qemu/config-file.h"
39 #include "qemu/error-report.h"
40 #include "exec/memory.h"
41 #include "sysemu/dma.h"
42 #include "exec/address-spaces.h"
43 #if defined(CONFIG_USER_ONLY)
45 #else /* !CONFIG_USER_ONLY */
46 #include "sysemu/xen-mapcache.h"
49 #include "exec/cpu-all.h"
50 #include "qemu/rcu_queue.h"
51 #include "qemu/main-loop.h"
52 #include "exec/cputlb.h"
53 #include "translate-all.h"
55 #include "exec/memory-internal.h"
56 #include "exec/ram_addr.h"
58 #include "qemu/range.h"
60 //#define DEBUG_SUBPAGE
62 #if !defined(CONFIG_USER_ONLY)
63 /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
64 * are protected by the ramlist lock.
66 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
68 static MemoryRegion
*system_memory
;
69 static MemoryRegion
*system_io
;
71 AddressSpace address_space_io
;
72 AddressSpace address_space_memory
;
74 MemoryRegion io_mem_rom
, io_mem_notdirty
;
75 static MemoryRegion io_mem_unassigned
;
77 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
78 #define RAM_PREALLOC (1 << 0)
80 /* RAM is mmap-ed with MAP_SHARED */
81 #define RAM_SHARED (1 << 1)
83 /* Only a portion of RAM (used_length) is actually used, and migrated.
84 * This used_length size can change across reboots.
86 #define RAM_RESIZEABLE (1 << 2)
90 struct CPUTailQ cpus
= QTAILQ_HEAD_INITIALIZER(cpus
);
91 /* current CPU in the current thread. It is only valid inside
93 DEFINE_TLS(CPUState
*, current_cpu
);
94 /* 0 = Do not count executed instructions.
95 1 = Precise instruction counting.
96 2 = Adaptive rate instruction counting. */
99 #if !defined(CONFIG_USER_ONLY)
101 typedef struct PhysPageEntry PhysPageEntry
;
103 struct PhysPageEntry
{
104 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
106 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
110 #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112 /* Size of the L2 (and L3, etc) page tables. */
113 #define ADDR_SPACE_BITS 64
116 #define P_L2_SIZE (1 << P_L2_BITS)
118 #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120 typedef PhysPageEntry Node
[P_L2_SIZE
];
122 typedef struct PhysPageMap
{
125 unsigned sections_nb
;
126 unsigned sections_nb_alloc
;
128 unsigned nodes_nb_alloc
;
130 MemoryRegionSection
*sections
;
133 struct AddressSpaceDispatch
{
136 /* This is a multi-level map on the physical address space.
137 * The bottom level has pointers to MemoryRegionSections.
139 PhysPageEntry phys_map
;
144 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
145 typedef struct subpage_t
{
149 uint16_t sub_section
[TARGET_PAGE_SIZE
];
152 #define PHYS_SECTION_UNASSIGNED 0
153 #define PHYS_SECTION_NOTDIRTY 1
154 #define PHYS_SECTION_ROM 2
155 #define PHYS_SECTION_WATCH 3
157 static void io_mem_init(void);
158 static void memory_map_init(void);
159 static void tcg_commit(MemoryListener
*listener
);
161 static MemoryRegion io_mem_watch
;
164 #if !defined(CONFIG_USER_ONLY)
166 static void phys_map_node_reserve(PhysPageMap
*map
, unsigned nodes
)
168 if (map
->nodes_nb
+ nodes
> map
->nodes_nb_alloc
) {
169 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
* 2, 16);
170 map
->nodes_nb_alloc
= MAX(map
->nodes_nb_alloc
, map
->nodes_nb
+ nodes
);
171 map
->nodes
= g_renew(Node
, map
->nodes
, map
->nodes_nb_alloc
);
175 static uint32_t phys_map_node_alloc(PhysPageMap
*map
, bool leaf
)
182 ret
= map
->nodes_nb
++;
184 assert(ret
!= PHYS_MAP_NODE_NIL
);
185 assert(ret
!= map
->nodes_nb_alloc
);
187 e
.skip
= leaf
? 0 : 1;
188 e
.ptr
= leaf
? PHYS_SECTION_UNASSIGNED
: PHYS_MAP_NODE_NIL
;
189 for (i
= 0; i
< P_L2_SIZE
; ++i
) {
190 memcpy(&p
[i
], &e
, sizeof(e
));
195 static void phys_page_set_level(PhysPageMap
*map
, PhysPageEntry
*lp
,
196 hwaddr
*index
, hwaddr
*nb
, uint16_t leaf
,
200 hwaddr step
= (hwaddr
)1 << (level
* P_L2_BITS
);
202 if (lp
->skip
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
203 lp
->ptr
= phys_map_node_alloc(map
, level
== 0);
205 p
= map
->nodes
[lp
->ptr
];
206 lp
= &p
[(*index
>> (level
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
208 while (*nb
&& lp
< &p
[P_L2_SIZE
]) {
209 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
215 phys_page_set_level(map
, lp
, index
, nb
, leaf
, level
- 1);
221 static void phys_page_set(AddressSpaceDispatch
*d
,
222 hwaddr index
, hwaddr nb
,
225 /* Wildly overreserve - it doesn't matter much. */
226 phys_map_node_reserve(&d
->map
, 3 * P_L2_LEVELS
);
228 phys_page_set_level(&d
->map
, &d
->phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
231 /* Compact a non leaf page entry. Simply detect that the entry has a single child,
232 * and update our entry so we can skip it and go directly to the destination.
234 static void phys_page_compact(PhysPageEntry
*lp
, Node
*nodes
, unsigned long *compacted
)
236 unsigned valid_ptr
= P_L2_SIZE
;
241 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
246 for (i
= 0; i
< P_L2_SIZE
; i
++) {
247 if (p
[i
].ptr
== PHYS_MAP_NODE_NIL
) {
254 phys_page_compact(&p
[i
], nodes
, compacted
);
258 /* We can only compress if there's only one child. */
263 assert(valid_ptr
< P_L2_SIZE
);
265 /* Don't compress if it won't fit in the # of bits we have. */
266 if (lp
->skip
+ p
[valid_ptr
].skip
>= (1 << 3)) {
270 lp
->ptr
= p
[valid_ptr
].ptr
;
271 if (!p
[valid_ptr
].skip
) {
272 /* If our only child is a leaf, make this a leaf. */
273 /* By design, we should have made this node a leaf to begin with so we
274 * should never reach here.
275 * But since it's so simple to handle this, let's do it just in case we
280 lp
->skip
+= p
[valid_ptr
].skip
;
284 static void phys_page_compact_all(AddressSpaceDispatch
*d
, int nodes_nb
)
286 DECLARE_BITMAP(compacted
, nodes_nb
);
288 if (d
->phys_map
.skip
) {
289 phys_page_compact(&d
->phys_map
, d
->map
.nodes
, compacted
);
293 static MemoryRegionSection
*phys_page_find(PhysPageEntry lp
, hwaddr addr
,
294 Node
*nodes
, MemoryRegionSection
*sections
)
297 hwaddr index
= addr
>> TARGET_PAGE_BITS
;
300 for (i
= P_L2_LEVELS
; lp
.skip
&& (i
-= lp
.skip
) >= 0;) {
301 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
302 return §ions
[PHYS_SECTION_UNASSIGNED
];
305 lp
= p
[(index
>> (i
* P_L2_BITS
)) & (P_L2_SIZE
- 1)];
308 if (sections
[lp
.ptr
].size
.hi
||
309 range_covers_byte(sections
[lp
.ptr
].offset_within_address_space
,
310 sections
[lp
.ptr
].size
.lo
, addr
)) {
311 return §ions
[lp
.ptr
];
313 return §ions
[PHYS_SECTION_UNASSIGNED
];
317 bool memory_region_is_unassigned(MemoryRegion
*mr
)
319 return mr
!= &io_mem_rom
&& mr
!= &io_mem_notdirty
&& !mr
->rom_device
320 && mr
!= &io_mem_watch
;
323 /* Called from RCU critical section */
324 static MemoryRegionSection
*address_space_lookup_region(AddressSpaceDispatch
*d
,
326 bool resolve_subpage
)
328 MemoryRegionSection
*section
;
331 section
= phys_page_find(d
->phys_map
, addr
, d
->map
.nodes
, d
->map
.sections
);
332 if (resolve_subpage
&& section
->mr
->subpage
) {
333 subpage
= container_of(section
->mr
, subpage_t
, iomem
);
334 section
= &d
->map
.sections
[subpage
->sub_section
[SUBPAGE_IDX(addr
)]];
339 /* Called from RCU critical section */
340 static MemoryRegionSection
*
341 address_space_translate_internal(AddressSpaceDispatch
*d
, hwaddr addr
, hwaddr
*xlat
,
342 hwaddr
*plen
, bool resolve_subpage
)
344 MemoryRegionSection
*section
;
348 section
= address_space_lookup_region(d
, addr
, resolve_subpage
);
349 /* Compute offset within MemoryRegionSection */
350 addr
-= section
->offset_within_address_space
;
352 /* Compute offset within MemoryRegion */
353 *xlat
= addr
+ section
->offset_within_region
;
357 /* MMIO registers can be expected to perform full-width accesses based only
358 * on their address, without considering adjacent registers that could
359 * decode to completely different MemoryRegions. When such registers
360 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
361 * regions overlap wildly. For this reason we cannot clamp the accesses
364 * If the length is small (as is the case for address_space_ldl/stl),
365 * everything works fine. If the incoming length is large, however,
366 * the caller really has to do the clamping through memory_access_size.
368 if (memory_region_is_ram(mr
)) {
369 diff
= int128_sub(section
->size
, int128_make64(addr
));
370 *plen
= int128_get64(int128_min(diff
, int128_make64(*plen
)));
375 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
377 if (memory_region_is_ram(mr
)) {
378 return !(is_write
&& mr
->readonly
);
380 if (memory_region_is_romd(mr
)) {
387 /* Called from RCU critical section */
388 MemoryRegion
*address_space_translate(AddressSpace
*as
, hwaddr addr
,
389 hwaddr
*xlat
, hwaddr
*plen
,
393 MemoryRegionSection
*section
;
397 AddressSpaceDispatch
*d
= atomic_rcu_read(&as
->dispatch
);
398 section
= address_space_translate_internal(d
, addr
, &addr
, plen
, true);
401 if (!mr
->iommu_ops
) {
405 iotlb
= mr
->iommu_ops
->translate(mr
, addr
, is_write
);
406 addr
= ((iotlb
.translated_addr
& ~iotlb
.addr_mask
)
407 | (addr
& iotlb
.addr_mask
));
408 *plen
= MIN(*plen
, (addr
| iotlb
.addr_mask
) - addr
+ 1);
409 if (!(iotlb
.perm
& (1 << is_write
))) {
410 mr
= &io_mem_unassigned
;
414 as
= iotlb
.target_as
;
417 if (xen_enabled() && memory_access_is_direct(mr
, is_write
)) {
418 hwaddr page
= ((addr
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
) - addr
;
419 *plen
= MIN(page
, *plen
);
426 /* Called from RCU critical section */
427 MemoryRegionSection
*
428 address_space_translate_for_iotlb(CPUState
*cpu
, hwaddr addr
,
429 hwaddr
*xlat
, hwaddr
*plen
)
431 MemoryRegionSection
*section
;
432 section
= address_space_translate_internal(cpu
->memory_dispatch
,
433 addr
, xlat
, plen
, false);
435 assert(!section
->mr
->iommu_ops
);
440 #if !defined(CONFIG_USER_ONLY)
442 static int cpu_common_post_load(void *opaque
, int version_id
)
444 CPUState
*cpu
= opaque
;
446 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
447 version_id is increased. */
448 cpu
->interrupt_request
&= ~0x01;
454 static int cpu_common_pre_load(void *opaque
)
456 CPUState
*cpu
= opaque
;
458 cpu
->exception_index
= -1;
463 static bool cpu_common_exception_index_needed(void *opaque
)
465 CPUState
*cpu
= opaque
;
467 return tcg_enabled() && cpu
->exception_index
!= -1;
470 static const VMStateDescription vmstate_cpu_common_exception_index
= {
471 .name
= "cpu_common/exception_index",
473 .minimum_version_id
= 1,
474 .needed
= cpu_common_exception_index_needed
,
475 .fields
= (VMStateField
[]) {
476 VMSTATE_INT32(exception_index
, CPUState
),
477 VMSTATE_END_OF_LIST()
481 const VMStateDescription vmstate_cpu_common
= {
482 .name
= "cpu_common",
484 .minimum_version_id
= 1,
485 .pre_load
= cpu_common_pre_load
,
486 .post_load
= cpu_common_post_load
,
487 .fields
= (VMStateField
[]) {
488 VMSTATE_UINT32(halted
, CPUState
),
489 VMSTATE_UINT32(interrupt_request
, CPUState
),
490 VMSTATE_END_OF_LIST()
492 .subsections
= (const VMStateDescription
*[]) {
493 &vmstate_cpu_common_exception_index
,
500 CPUState
*qemu_get_cpu(int index
)
505 if (cpu
->cpu_index
== index
) {
513 #if !defined(CONFIG_USER_ONLY)
514 void tcg_cpu_address_space_init(CPUState
*cpu
, AddressSpace
*as
)
516 /* We only support one address space per cpu at the moment. */
517 assert(cpu
->as
== as
);
519 if (cpu
->tcg_as_listener
) {
520 memory_listener_unregister(cpu
->tcg_as_listener
);
522 cpu
->tcg_as_listener
= g_new0(MemoryListener
, 1);
524 cpu
->tcg_as_listener
->commit
= tcg_commit
;
525 memory_listener_register(cpu
->tcg_as_listener
, as
);
529 #ifndef CONFIG_USER_ONLY
530 static DECLARE_BITMAP(cpu_index_map
, MAX_CPUMASK_BITS
);
532 static int cpu_get_free_index(Error
**errp
)
534 int cpu
= find_first_zero_bit(cpu_index_map
, MAX_CPUMASK_BITS
);
536 if (cpu
>= MAX_CPUMASK_BITS
) {
537 error_setg(errp
, "Trying to use more CPUs than max of %d",
542 bitmap_set(cpu_index_map
, cpu
, 1);
546 void cpu_exec_exit(CPUState
*cpu
)
548 if (cpu
->cpu_index
== -1) {
549 /* cpu_index was never allocated by this @cpu or was already freed. */
553 bitmap_clear(cpu_index_map
, cpu
->cpu_index
, 1);
558 static int cpu_get_free_index(Error
**errp
)
563 CPU_FOREACH(some_cpu
) {
569 void cpu_exec_exit(CPUState
*cpu
)
574 void cpu_exec_init(CPUArchState
*env
, Error
**errp
)
576 CPUState
*cpu
= ENV_GET_CPU(env
);
577 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
579 Error
*local_err
= NULL
;
581 #ifndef CONFIG_USER_ONLY
582 cpu
->as
= &address_space_memory
;
583 cpu
->thread_id
= qemu_get_thread_id();
584 cpu_reload_memory_map(cpu
);
587 #if defined(CONFIG_USER_ONLY)
590 cpu_index
= cpu
->cpu_index
= cpu_get_free_index(&local_err
);
592 error_propagate(errp
, local_err
);
593 #if defined(CONFIG_USER_ONLY)
598 QTAILQ_INSERT_TAIL(&cpus
, cpu
, node
);
599 #if defined(CONFIG_USER_ONLY)
602 if (qdev_get_vmsd(DEVICE(cpu
)) == NULL
) {
603 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, cpu
);
605 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
607 cpu_save
, cpu_load
, env
);
608 assert(cc
->vmsd
== NULL
);
609 assert(qdev_get_vmsd(DEVICE(cpu
)) == NULL
);
611 if (cc
->vmsd
!= NULL
) {
612 vmstate_register(NULL
, cpu_index
, cc
->vmsd
, cpu
);
616 #if defined(CONFIG_USER_ONLY)
617 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
619 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
622 static void breakpoint_invalidate(CPUState
*cpu
, target_ulong pc
)
624 hwaddr phys
= cpu_get_phys_page_debug(cpu
, pc
);
626 tb_invalidate_phys_addr(cpu
->as
,
627 phys
| (pc
& ~TARGET_PAGE_MASK
));
632 #if defined(CONFIG_USER_ONLY)
633 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
638 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
644 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
648 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
649 int flags
, CPUWatchpoint
**watchpoint
)
654 /* Add a watchpoint. */
655 int cpu_watchpoint_insert(CPUState
*cpu
, vaddr addr
, vaddr len
,
656 int flags
, CPUWatchpoint
**watchpoint
)
660 /* forbid ranges which are empty or run off the end of the address space */
661 if (len
== 0 || (addr
+ len
- 1) < addr
) {
662 error_report("tried to set invalid watchpoint at %"
663 VADDR_PRIx
", len=%" VADDR_PRIu
, addr
, len
);
666 wp
= g_malloc(sizeof(*wp
));
672 /* keep all GDB-injected watchpoints in front */
673 if (flags
& BP_GDB
) {
674 QTAILQ_INSERT_HEAD(&cpu
->watchpoints
, wp
, entry
);
676 QTAILQ_INSERT_TAIL(&cpu
->watchpoints
, wp
, entry
);
679 tlb_flush_page(cpu
, addr
);
686 /* Remove a specific watchpoint. */
687 int cpu_watchpoint_remove(CPUState
*cpu
, vaddr addr
, vaddr len
,
692 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
693 if (addr
== wp
->vaddr
&& len
== wp
->len
694 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
695 cpu_watchpoint_remove_by_ref(cpu
, wp
);
702 /* Remove a specific watchpoint by reference. */
703 void cpu_watchpoint_remove_by_ref(CPUState
*cpu
, CPUWatchpoint
*watchpoint
)
705 QTAILQ_REMOVE(&cpu
->watchpoints
, watchpoint
, entry
);
707 tlb_flush_page(cpu
, watchpoint
->vaddr
);
712 /* Remove all matching watchpoints. */
713 void cpu_watchpoint_remove_all(CPUState
*cpu
, int mask
)
715 CPUWatchpoint
*wp
, *next
;
717 QTAILQ_FOREACH_SAFE(wp
, &cpu
->watchpoints
, entry
, next
) {
718 if (wp
->flags
& mask
) {
719 cpu_watchpoint_remove_by_ref(cpu
, wp
);
724 /* Return true if this watchpoint address matches the specified
725 * access (ie the address range covered by the watchpoint overlaps
726 * partially or completely with the address range covered by the
729 static inline bool cpu_watchpoint_address_matches(CPUWatchpoint
*wp
,
733 /* We know the lengths are non-zero, but a little caution is
734 * required to avoid errors in the case where the range ends
735 * exactly at the top of the address space and so addr + len
736 * wraps round to zero.
738 vaddr wpend
= wp
->vaddr
+ wp
->len
- 1;
739 vaddr addrend
= addr
+ len
- 1;
741 return !(addr
> wpend
|| wp
->vaddr
> addrend
);
746 /* Add a breakpoint. */
747 int cpu_breakpoint_insert(CPUState
*cpu
, vaddr pc
, int flags
,
748 CPUBreakpoint
**breakpoint
)
752 bp
= g_malloc(sizeof(*bp
));
757 /* keep all GDB-injected breakpoints in front */
758 if (flags
& BP_GDB
) {
759 QTAILQ_INSERT_HEAD(&cpu
->breakpoints
, bp
, entry
);
761 QTAILQ_INSERT_TAIL(&cpu
->breakpoints
, bp
, entry
);
764 breakpoint_invalidate(cpu
, pc
);
772 /* Remove a specific breakpoint. */
773 int cpu_breakpoint_remove(CPUState
*cpu
, vaddr pc
, int flags
)
777 QTAILQ_FOREACH(bp
, &cpu
->breakpoints
, entry
) {
778 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
779 cpu_breakpoint_remove_by_ref(cpu
, bp
);
786 /* Remove a specific breakpoint by reference. */
787 void cpu_breakpoint_remove_by_ref(CPUState
*cpu
, CPUBreakpoint
*breakpoint
)
789 QTAILQ_REMOVE(&cpu
->breakpoints
, breakpoint
, entry
);
791 breakpoint_invalidate(cpu
, breakpoint
->pc
);
796 /* Remove all matching breakpoints. */
797 void cpu_breakpoint_remove_all(CPUState
*cpu
, int mask
)
799 CPUBreakpoint
*bp
, *next
;
801 QTAILQ_FOREACH_SAFE(bp
, &cpu
->breakpoints
, entry
, next
) {
802 if (bp
->flags
& mask
) {
803 cpu_breakpoint_remove_by_ref(cpu
, bp
);
808 /* enable or disable single step mode. EXCP_DEBUG is returned by the
809 CPU loop after each instruction */
810 void cpu_single_step(CPUState
*cpu
, int enabled
)
812 if (cpu
->singlestep_enabled
!= enabled
) {
813 cpu
->singlestep_enabled
= enabled
;
815 kvm_update_guest_debug(cpu
, 0);
817 /* must flush all the translated code to avoid inconsistencies */
818 /* XXX: only flush what is necessary */
824 void cpu_abort(CPUState
*cpu
, const char *fmt
, ...)
831 fprintf(stderr
, "qemu: fatal: ");
832 vfprintf(stderr
, fmt
, ap
);
833 fprintf(stderr
, "\n");
834 cpu_dump_state(cpu
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
835 if (qemu_log_enabled()) {
836 qemu_log("qemu: fatal: ");
837 qemu_log_vprintf(fmt
, ap2
);
839 log_cpu_state(cpu
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
845 #if defined(CONFIG_USER_ONLY)
847 struct sigaction act
;
848 sigfillset(&act
.sa_mask
);
849 act
.sa_handler
= SIG_DFL
;
850 sigaction(SIGABRT
, &act
, NULL
);
856 #if !defined(CONFIG_USER_ONLY)
857 /* Called from RCU critical section */
858 static RAMBlock
*qemu_get_ram_block(ram_addr_t addr
)
862 block
= atomic_rcu_read(&ram_list
.mru_block
);
863 if (block
&& addr
- block
->offset
< block
->max_length
) {
866 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
867 if (addr
- block
->offset
< block
->max_length
) {
872 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
876 /* It is safe to write mru_block outside the iothread lock. This
881 * xxx removed from list
885 * call_rcu(reclaim_ramblock, xxx);
888 * atomic_rcu_set is not needed here. The block was already published
889 * when it was placed into the list. Here we're just making an extra
890 * copy of the pointer.
892 ram_list
.mru_block
= block
;
896 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t length
)
902 end
= TARGET_PAGE_ALIGN(start
+ length
);
903 start
&= TARGET_PAGE_MASK
;
906 block
= qemu_get_ram_block(start
);
907 assert(block
== qemu_get_ram_block(end
- 1));
908 start1
= (uintptr_t)ramblock_ptr(block
, start
- block
->offset
);
909 cpu_tlb_reset_dirty_all(start1
, length
);
913 /* Note: start and end must be within the same ram block. */
914 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start
,
918 unsigned long end
, page
;
925 end
= TARGET_PAGE_ALIGN(start
+ length
) >> TARGET_PAGE_BITS
;
926 page
= start
>> TARGET_PAGE_BITS
;
927 dirty
= bitmap_test_and_clear_atomic(ram_list
.dirty_memory
[client
],
930 if (dirty
&& tcg_enabled()) {
931 tlb_reset_dirty_range_all(start
, length
);
937 /* Called from RCU critical section */
938 hwaddr
memory_region_section_get_iotlb(CPUState
*cpu
,
939 MemoryRegionSection
*section
,
941 hwaddr paddr
, hwaddr xlat
,
943 target_ulong
*address
)
948 if (memory_region_is_ram(section
->mr
)) {
950 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
952 if (!section
->readonly
) {
953 iotlb
|= PHYS_SECTION_NOTDIRTY
;
955 iotlb
|= PHYS_SECTION_ROM
;
958 iotlb
= section
- section
->address_space
->dispatch
->map
.sections
;
962 /* Make accesses to pages with watchpoints go via the
963 watchpoint trap routines. */
964 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
965 if (cpu_watchpoint_address_matches(wp
, vaddr
, TARGET_PAGE_SIZE
)) {
966 /* Avoid trapping reads of pages with a write breakpoint. */
967 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
968 iotlb
= PHYS_SECTION_WATCH
+ paddr
;
969 *address
|= TLB_MMIO
;
977 #endif /* defined(CONFIG_USER_ONLY) */
979 #if !defined(CONFIG_USER_ONLY)
981 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
983 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
);
985 static void *(*phys_mem_alloc
)(size_t size
, uint64_t *align
) =
989 * Set a custom physical guest memory alloator.
990 * Accelerators with unusual needs may need this. Hopefully, we can
991 * get rid of it eventually.
993 void phys_mem_set_alloc(void *(*alloc
)(size_t, uint64_t *align
))
995 phys_mem_alloc
= alloc
;
998 static uint16_t phys_section_add(PhysPageMap
*map
,
999 MemoryRegionSection
*section
)
1001 /* The physical section number is ORed with a page-aligned
1002 * pointer to produce the iotlb entries. Thus it should
1003 * never overflow into the page-aligned value.
1005 assert(map
->sections_nb
< TARGET_PAGE_SIZE
);
1007 if (map
->sections_nb
== map
->sections_nb_alloc
) {
1008 map
->sections_nb_alloc
= MAX(map
->sections_nb_alloc
* 2, 16);
1009 map
->sections
= g_renew(MemoryRegionSection
, map
->sections
,
1010 map
->sections_nb_alloc
);
1012 map
->sections
[map
->sections_nb
] = *section
;
1013 memory_region_ref(section
->mr
);
1014 return map
->sections_nb
++;
1017 static void phys_section_destroy(MemoryRegion
*mr
)
1019 memory_region_unref(mr
);
1022 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
1023 object_unref(OBJECT(&subpage
->iomem
));
1028 static void phys_sections_free(PhysPageMap
*map
)
1030 while (map
->sections_nb
> 0) {
1031 MemoryRegionSection
*section
= &map
->sections
[--map
->sections_nb
];
1032 phys_section_destroy(section
->mr
);
1034 g_free(map
->sections
);
1038 static void register_subpage(AddressSpaceDispatch
*d
, MemoryRegionSection
*section
)
1041 hwaddr base
= section
->offset_within_address_space
1043 MemoryRegionSection
*existing
= phys_page_find(d
->phys_map
, base
,
1044 d
->map
.nodes
, d
->map
.sections
);
1045 MemoryRegionSection subsection
= {
1046 .offset_within_address_space
= base
,
1047 .size
= int128_make64(TARGET_PAGE_SIZE
),
1051 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
1053 if (!(existing
->mr
->subpage
)) {
1054 subpage
= subpage_init(d
->as
, base
);
1055 subsection
.address_space
= d
->as
;
1056 subsection
.mr
= &subpage
->iomem
;
1057 phys_page_set(d
, base
>> TARGET_PAGE_BITS
, 1,
1058 phys_section_add(&d
->map
, &subsection
));
1060 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
1062 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
1063 end
= start
+ int128_get64(section
->size
) - 1;
1064 subpage_register(subpage
, start
, end
,
1065 phys_section_add(&d
->map
, section
));
1069 static void register_multipage(AddressSpaceDispatch
*d
,
1070 MemoryRegionSection
*section
)
1072 hwaddr start_addr
= section
->offset_within_address_space
;
1073 uint16_t section_index
= phys_section_add(&d
->map
, section
);
1074 uint64_t num_pages
= int128_get64(int128_rshift(section
->size
,
1078 phys_page_set(d
, start_addr
>> TARGET_PAGE_BITS
, num_pages
, section_index
);
1081 static void mem_add(MemoryListener
*listener
, MemoryRegionSection
*section
)
1083 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
1084 AddressSpaceDispatch
*d
= as
->next_dispatch
;
1085 MemoryRegionSection now
= *section
, remain
= *section
;
1086 Int128 page_size
= int128_make64(TARGET_PAGE_SIZE
);
1088 if (now
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1089 uint64_t left
= TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
1090 - now
.offset_within_address_space
;
1092 now
.size
= int128_min(int128_make64(left
), now
.size
);
1093 register_subpage(d
, &now
);
1095 now
.size
= int128_zero();
1097 while (int128_ne(remain
.size
, now
.size
)) {
1098 remain
.size
= int128_sub(remain
.size
, now
.size
);
1099 remain
.offset_within_address_space
+= int128_get64(now
.size
);
1100 remain
.offset_within_region
+= int128_get64(now
.size
);
1102 if (int128_lt(remain
.size
, page_size
)) {
1103 register_subpage(d
, &now
);
1104 } else if (remain
.offset_within_address_space
& ~TARGET_PAGE_MASK
) {
1105 now
.size
= page_size
;
1106 register_subpage(d
, &now
);
1108 now
.size
= int128_and(now
.size
, int128_neg(page_size
));
1109 register_multipage(d
, &now
);
1114 void qemu_flush_coalesced_mmio_buffer(void)
1117 kvm_flush_coalesced_mmio_buffer();
1120 void qemu_mutex_lock_ramlist(void)
1122 qemu_mutex_lock(&ram_list
.mutex
);
1125 void qemu_mutex_unlock_ramlist(void)
1127 qemu_mutex_unlock(&ram_list
.mutex
);
1132 #include <sys/vfs.h>
1134 #define HUGETLBFS_MAGIC 0x958458f6
1136 static long gethugepagesize(const char *path
, Error
**errp
)
1142 ret
= statfs(path
, &fs
);
1143 } while (ret
!= 0 && errno
== EINTR
);
1146 error_setg_errno(errp
, errno
, "failed to get page size of file %s",
1151 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
1152 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
1157 static void *file_ram_alloc(RAMBlock
*block
,
1163 char *sanitized_name
;
1168 Error
*local_err
= NULL
;
1170 hpagesize
= gethugepagesize(path
, &local_err
);
1172 error_propagate(errp
, local_err
);
1175 block
->mr
->align
= hpagesize
;
1177 if (memory
< hpagesize
) {
1178 error_setg(errp
, "memory size 0x" RAM_ADDR_FMT
" must be equal to "
1179 "or larger than huge page size 0x%" PRIx64
,
1184 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1186 "host lacks kvm mmu notifiers, -mem-path unsupported");
1190 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1191 sanitized_name
= g_strdup(memory_region_name(block
->mr
));
1192 for (c
= sanitized_name
; *c
!= '\0'; c
++) {
1197 filename
= g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path
,
1199 g_free(sanitized_name
);
1201 fd
= mkstemp(filename
);
1203 error_setg_errno(errp
, errno
,
1204 "unable to create backing store for hugepages");
1211 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
1214 * ftruncate is not supported by hugetlbfs in older
1215 * hosts, so don't bother bailing out on errors.
1216 * If anything goes wrong with it under other filesystems,
1219 if (ftruncate(fd
, memory
)) {
1220 perror("ftruncate");
1223 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
,
1224 (block
->flags
& RAM_SHARED
? MAP_SHARED
: MAP_PRIVATE
),
1226 if (area
== MAP_FAILED
) {
1227 error_setg_errno(errp
, errno
,
1228 "unable to map backing store for hugepages");
1234 os_mem_prealloc(fd
, area
, memory
);
1242 error_report("%s", error_get_pretty(*errp
));
1249 /* Called with the ramlist lock held. */
1250 static ram_addr_t
find_ram_offset(ram_addr_t size
)
1252 RAMBlock
*block
, *next_block
;
1253 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
1255 assert(size
!= 0); /* it would hand out same offset multiple times */
1257 if (QLIST_EMPTY_RCU(&ram_list
.blocks
)) {
1261 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1262 ram_addr_t end
, next
= RAM_ADDR_MAX
;
1264 end
= block
->offset
+ block
->max_length
;
1266 QLIST_FOREACH_RCU(next_block
, &ram_list
.blocks
, next
) {
1267 if (next_block
->offset
>= end
) {
1268 next
= MIN(next
, next_block
->offset
);
1271 if (next
- end
>= size
&& next
- end
< mingap
) {
1273 mingap
= next
- end
;
1277 if (offset
== RAM_ADDR_MAX
) {
1278 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
1286 ram_addr_t
last_ram_offset(void)
1289 ram_addr_t last
= 0;
1292 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1293 last
= MAX(last
, block
->offset
+ block
->max_length
);
1299 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
1303 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1304 if (!machine_dump_guest_core(current_machine
)) {
1305 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
1307 perror("qemu_madvise");
1308 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
1309 "but dump_guest_core=off specified\n");
1314 /* Called within an RCU critical section, or while the ramlist lock
1317 static RAMBlock
*find_ram_block(ram_addr_t addr
)
1321 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1322 if (block
->offset
== addr
) {
1330 /* Called with iothread lock held. */
1331 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
1333 RAMBlock
*new_block
, *block
;
1336 new_block
= find_ram_block(addr
);
1338 assert(!new_block
->idstr
[0]);
1341 char *id
= qdev_get_dev_path(dev
);
1343 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
1347 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
1349 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1350 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
1351 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
1359 /* Called with iothread lock held. */
1360 void qemu_ram_unset_idstr(ram_addr_t addr
)
1364 /* FIXME: arch_init.c assumes that this is not called throughout
1365 * migration. Ignore the problem since hot-unplug during migration
1366 * does not work anyway.
1370 block
= find_ram_block(addr
);
1372 memset(block
->idstr
, 0, sizeof(block
->idstr
));
1377 static int memory_try_enable_merging(void *addr
, size_t len
)
1379 if (!machine_mem_merge(current_machine
)) {
1380 /* disabled by the user */
1384 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
1387 /* Only legal before guest might have detected the memory size: e.g. on
1388 * incoming migration, or right after reset.
1390 * As memory core doesn't know how is memory accessed, it is up to
1391 * resize callback to update device state and/or add assertions to detect
1392 * misuse, if necessary.
1394 int qemu_ram_resize(ram_addr_t base
, ram_addr_t newsize
, Error
**errp
)
1396 RAMBlock
*block
= find_ram_block(base
);
1400 newsize
= TARGET_PAGE_ALIGN(newsize
);
1402 if (block
->used_length
== newsize
) {
1406 if (!(block
->flags
& RAM_RESIZEABLE
)) {
1407 error_setg_errno(errp
, EINVAL
,
1408 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1409 " in != 0x" RAM_ADDR_FMT
, block
->idstr
,
1410 newsize
, block
->used_length
);
1414 if (block
->max_length
< newsize
) {
1415 error_setg_errno(errp
, EINVAL
,
1416 "Length too large: %s: 0x" RAM_ADDR_FMT
1417 " > 0x" RAM_ADDR_FMT
, block
->idstr
,
1418 newsize
, block
->max_length
);
1422 cpu_physical_memory_clear_dirty_range(block
->offset
, block
->used_length
);
1423 block
->used_length
= newsize
;
1424 cpu_physical_memory_set_dirty_range(block
->offset
, block
->used_length
,
1426 memory_region_set_size(block
->mr
, newsize
);
1427 if (block
->resized
) {
1428 block
->resized(block
->idstr
, newsize
, block
->host
);
1433 static ram_addr_t
ram_block_add(RAMBlock
*new_block
, Error
**errp
)
1436 RAMBlock
*last_block
= NULL
;
1437 ram_addr_t old_ram_size
, new_ram_size
;
1439 old_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1441 qemu_mutex_lock_ramlist();
1442 new_block
->offset
= find_ram_offset(new_block
->max_length
);
1444 if (!new_block
->host
) {
1445 if (xen_enabled()) {
1446 xen_ram_alloc(new_block
->offset
, new_block
->max_length
,
1449 new_block
->host
= phys_mem_alloc(new_block
->max_length
,
1450 &new_block
->mr
->align
);
1451 if (!new_block
->host
) {
1452 error_setg_errno(errp
, errno
,
1453 "cannot set up guest memory '%s'",
1454 memory_region_name(new_block
->mr
));
1455 qemu_mutex_unlock_ramlist();
1458 memory_try_enable_merging(new_block
->host
, new_block
->max_length
);
1462 new_ram_size
= MAX(old_ram_size
,
1463 (new_block
->offset
+ new_block
->max_length
) >> TARGET_PAGE_BITS
);
1464 if (new_ram_size
> old_ram_size
) {
1465 migration_bitmap_extend(old_ram_size
, new_ram_size
);
1467 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1468 * QLIST (which has an RCU-friendly variant) does not have insertion at
1469 * tail, so save the last element in last_block.
1471 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1473 if (block
->max_length
< new_block
->max_length
) {
1478 QLIST_INSERT_BEFORE_RCU(block
, new_block
, next
);
1479 } else if (last_block
) {
1480 QLIST_INSERT_AFTER_RCU(last_block
, new_block
, next
);
1481 } else { /* list is empty */
1482 QLIST_INSERT_HEAD_RCU(&ram_list
.blocks
, new_block
, next
);
1484 ram_list
.mru_block
= NULL
;
1486 /* Write list before version */
1489 qemu_mutex_unlock_ramlist();
1491 new_ram_size
= last_ram_offset() >> TARGET_PAGE_BITS
;
1493 if (new_ram_size
> old_ram_size
) {
1496 /* ram_list.dirty_memory[] is protected by the iothread lock. */
1497 for (i
= 0; i
< DIRTY_MEMORY_NUM
; i
++) {
1498 ram_list
.dirty_memory
[i
] =
1499 bitmap_zero_extend(ram_list
.dirty_memory
[i
],
1500 old_ram_size
, new_ram_size
);
1503 cpu_physical_memory_set_dirty_range(new_block
->offset
,
1504 new_block
->used_length
,
1507 if (new_block
->host
) {
1508 qemu_ram_setup_dump(new_block
->host
, new_block
->max_length
);
1509 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_HUGEPAGE
);
1510 qemu_madvise(new_block
->host
, new_block
->max_length
, QEMU_MADV_DONTFORK
);
1511 if (kvm_enabled()) {
1512 kvm_setup_guest_memory(new_block
->host
, new_block
->max_length
);
1516 return new_block
->offset
;
1520 ram_addr_t
qemu_ram_alloc_from_file(ram_addr_t size
, MemoryRegion
*mr
,
1521 bool share
, const char *mem_path
,
1524 RAMBlock
*new_block
;
1526 Error
*local_err
= NULL
;
1528 if (xen_enabled()) {
1529 error_setg(errp
, "-mem-path not supported with Xen");
1533 if (phys_mem_alloc
!= qemu_anon_ram_alloc
) {
1535 * file_ram_alloc() needs to allocate just like
1536 * phys_mem_alloc, but we haven't bothered to provide
1540 "-mem-path not supported with this accelerator");
1544 size
= TARGET_PAGE_ALIGN(size
);
1545 new_block
= g_malloc0(sizeof(*new_block
));
1547 new_block
->used_length
= size
;
1548 new_block
->max_length
= size
;
1549 new_block
->flags
= share
? RAM_SHARED
: 0;
1550 new_block
->host
= file_ram_alloc(new_block
, size
,
1552 if (!new_block
->host
) {
1557 addr
= ram_block_add(new_block
, &local_err
);
1560 error_propagate(errp
, local_err
);
1568 ram_addr_t
qemu_ram_alloc_internal(ram_addr_t size
, ram_addr_t max_size
,
1569 void (*resized
)(const char*,
1572 void *host
, bool resizeable
,
1573 MemoryRegion
*mr
, Error
**errp
)
1575 RAMBlock
*new_block
;
1577 Error
*local_err
= NULL
;
1579 size
= TARGET_PAGE_ALIGN(size
);
1580 max_size
= TARGET_PAGE_ALIGN(max_size
);
1581 new_block
= g_malloc0(sizeof(*new_block
));
1583 new_block
->resized
= resized
;
1584 new_block
->used_length
= size
;
1585 new_block
->max_length
= max_size
;
1586 assert(max_size
>= size
);
1588 new_block
->host
= host
;
1590 new_block
->flags
|= RAM_PREALLOC
;
1593 new_block
->flags
|= RAM_RESIZEABLE
;
1595 addr
= ram_block_add(new_block
, &local_err
);
1598 error_propagate(errp
, local_err
);
1604 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
1605 MemoryRegion
*mr
, Error
**errp
)
1607 return qemu_ram_alloc_internal(size
, size
, NULL
, host
, false, mr
, errp
);
1610 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
, Error
**errp
)
1612 return qemu_ram_alloc_internal(size
, size
, NULL
, NULL
, false, mr
, errp
);
1615 ram_addr_t
qemu_ram_alloc_resizeable(ram_addr_t size
, ram_addr_t maxsz
,
1616 void (*resized
)(const char*,
1619 MemoryRegion
*mr
, Error
**errp
)
1621 return qemu_ram_alloc_internal(size
, maxsz
, resized
, NULL
, true, mr
, errp
);
1624 void qemu_ram_free_from_ptr(ram_addr_t addr
)
1628 qemu_mutex_lock_ramlist();
1629 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1630 if (addr
== block
->offset
) {
1631 QLIST_REMOVE_RCU(block
, next
);
1632 ram_list
.mru_block
= NULL
;
1633 /* Write list before version */
1636 g_free_rcu(block
, rcu
);
1640 qemu_mutex_unlock_ramlist();
1643 static void reclaim_ramblock(RAMBlock
*block
)
1645 if (block
->flags
& RAM_PREALLOC
) {
1647 } else if (xen_enabled()) {
1648 xen_invalidate_map_cache_entry(block
->host
);
1650 } else if (block
->fd
>= 0) {
1651 munmap(block
->host
, block
->max_length
);
1655 qemu_anon_ram_free(block
->host
, block
->max_length
);
1660 void qemu_ram_free(ram_addr_t addr
)
1664 qemu_mutex_lock_ramlist();
1665 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1666 if (addr
== block
->offset
) {
1667 QLIST_REMOVE_RCU(block
, next
);
1668 ram_list
.mru_block
= NULL
;
1669 /* Write list before version */
1672 call_rcu(block
, reclaim_ramblock
, rcu
);
1676 qemu_mutex_unlock_ramlist();
1680 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
1687 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1688 offset
= addr
- block
->offset
;
1689 if (offset
< block
->max_length
) {
1690 vaddr
= ramblock_ptr(block
, offset
);
1691 if (block
->flags
& RAM_PREALLOC
) {
1693 } else if (xen_enabled()) {
1697 if (block
->fd
>= 0) {
1698 flags
|= (block
->flags
& RAM_SHARED
?
1699 MAP_SHARED
: MAP_PRIVATE
);
1700 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1701 flags
, block
->fd
, offset
);
1704 * Remap needs to match alloc. Accelerators that
1705 * set phys_mem_alloc never remap. If they did,
1706 * we'd need a remap hook here.
1708 assert(phys_mem_alloc
== qemu_anon_ram_alloc
);
1710 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
1711 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
1714 if (area
!= vaddr
) {
1715 fprintf(stderr
, "Could not remap addr: "
1716 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
1720 memory_try_enable_merging(vaddr
, length
);
1721 qemu_ram_setup_dump(vaddr
, length
);
1726 #endif /* !_WIN32 */
1728 int qemu_get_ram_fd(ram_addr_t addr
)
1734 block
= qemu_get_ram_block(addr
);
1740 void *qemu_get_ram_block_host_ptr(ram_addr_t addr
)
1746 block
= qemu_get_ram_block(addr
);
1747 ptr
= ramblock_ptr(block
, 0);
1752 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1753 * This should not be used for general purpose DMA. Use address_space_map
1754 * or address_space_rw instead. For local memory (e.g. video ram) that the
1755 * device owns, use memory_region_get_ram_ptr.
1757 * By the time this function returns, the returned pointer is not protected
1758 * by RCU anymore. If the caller is not within an RCU critical section and
1759 * does not hold the iothread lock, it must have other means of protecting the
1760 * pointer, such as a reference to the region that includes the incoming
1763 void *qemu_get_ram_ptr(ram_addr_t addr
)
1769 block
= qemu_get_ram_block(addr
);
1771 if (xen_enabled() && block
->host
== NULL
) {
1772 /* We need to check if the requested address is in the RAM
1773 * because we don't want to map the entire memory in QEMU.
1774 * In that case just map until the end of the page.
1776 if (block
->offset
== 0) {
1777 ptr
= xen_map_cache(addr
, 0, 0);
1781 block
->host
= xen_map_cache(block
->offset
, block
->max_length
, 1);
1783 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1790 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1791 * but takes a size argument.
1793 * By the time this function returns, the returned pointer is not protected
1794 * by RCU anymore. If the caller is not within an RCU critical section and
1795 * does not hold the iothread lock, it must have other means of protecting the
1796 * pointer, such as a reference to the region that includes the incoming
1799 static void *qemu_ram_ptr_length(ram_addr_t addr
, hwaddr
*size
)
1805 if (xen_enabled()) {
1806 return xen_map_cache(addr
, *size
, 1);
1810 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1811 if (addr
- block
->offset
< block
->max_length
) {
1812 if (addr
- block
->offset
+ *size
> block
->max_length
)
1813 *size
= block
->max_length
- addr
+ block
->offset
;
1814 ptr
= ramblock_ptr(block
, addr
- block
->offset
);
1820 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
1825 /* Some of the softmmu routines need to translate from a host pointer
1826 * (typically a TLB entry) back to a ram offset.
1828 * By the time this function returns, the returned pointer is not protected
1829 * by RCU anymore. If the caller is not within an RCU critical section and
1830 * does not hold the iothread lock, it must have other means of protecting the
1831 * pointer, such as a reference to the region that includes the incoming
1834 MemoryRegion
*qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
1837 uint8_t *host
= ptr
;
1840 if (xen_enabled()) {
1842 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
1843 mr
= qemu_get_ram_block(*ram_addr
)->mr
;
1849 block
= atomic_rcu_read(&ram_list
.mru_block
);
1850 if (block
&& block
->host
&& host
- block
->host
< block
->max_length
) {
1854 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
1855 /* This case append when the block is not mapped. */
1856 if (block
->host
== NULL
) {
1859 if (host
- block
->host
< block
->max_length
) {
1868 *ram_addr
= block
->offset
+ (host
- block
->host
);
1874 static void notdirty_mem_write(void *opaque
, hwaddr ram_addr
,
1875 uint64_t val
, unsigned size
)
1877 if (!cpu_physical_memory_get_dirty_flag(ram_addr
, DIRTY_MEMORY_CODE
)) {
1878 tb_invalidate_phys_page_fast(ram_addr
, size
);
1882 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
1885 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
1888 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
1893 /* Set both VGA and migration bits for simplicity and to remove
1894 * the notdirty callback faster.
1896 cpu_physical_memory_set_dirty_range(ram_addr
, size
,
1897 DIRTY_CLIENTS_NOCODE
);
1898 /* we remove the notdirty callback only if the code has been
1900 if (!cpu_physical_memory_is_clean(ram_addr
)) {
1901 CPUArchState
*env
= current_cpu
->env_ptr
;
1902 tlb_set_dirty(env
, current_cpu
->mem_io_vaddr
);
1906 static bool notdirty_mem_accepts(void *opaque
, hwaddr addr
,
1907 unsigned size
, bool is_write
)
1912 static const MemoryRegionOps notdirty_mem_ops
= {
1913 .write
= notdirty_mem_write
,
1914 .valid
.accepts
= notdirty_mem_accepts
,
1915 .endianness
= DEVICE_NATIVE_ENDIAN
,
1918 /* Generate a debug exception if a watchpoint has been hit. */
1919 static void check_watchpoint(int offset
, int len
, MemTxAttrs attrs
, int flags
)
1921 CPUState
*cpu
= current_cpu
;
1922 CPUArchState
*env
= cpu
->env_ptr
;
1923 target_ulong pc
, cs_base
;
1928 if (cpu
->watchpoint_hit
) {
1929 /* We re-entered the check after replacing the TB. Now raise
1930 * the debug interrupt so that is will trigger after the
1931 * current instruction. */
1932 cpu_interrupt(cpu
, CPU_INTERRUPT_DEBUG
);
1935 vaddr
= (cpu
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
1936 QTAILQ_FOREACH(wp
, &cpu
->watchpoints
, entry
) {
1937 if (cpu_watchpoint_address_matches(wp
, vaddr
, len
)
1938 && (wp
->flags
& flags
)) {
1939 if (flags
== BP_MEM_READ
) {
1940 wp
->flags
|= BP_WATCHPOINT_HIT_READ
;
1942 wp
->flags
|= BP_WATCHPOINT_HIT_WRITE
;
1944 wp
->hitaddr
= vaddr
;
1945 wp
->hitattrs
= attrs
;
1946 if (!cpu
->watchpoint_hit
) {
1947 cpu
->watchpoint_hit
= wp
;
1948 tb_check_watchpoint(cpu
);
1949 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
1950 cpu
->exception_index
= EXCP_DEBUG
;
1953 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
1954 tb_gen_code(cpu
, pc
, cs_base
, cpu_flags
, 1);
1955 cpu_resume_from_signal(cpu
, NULL
);
1959 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
1964 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1965 so these check for a hit then pass through to the normal out-of-line
1967 static MemTxResult
watch_mem_read(void *opaque
, hwaddr addr
, uint64_t *pdata
,
1968 unsigned size
, MemTxAttrs attrs
)
1973 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_READ
);
1976 data
= address_space_ldub(&address_space_memory
, addr
, attrs
, &res
);
1979 data
= address_space_lduw(&address_space_memory
, addr
, attrs
, &res
);
1982 data
= address_space_ldl(&address_space_memory
, addr
, attrs
, &res
);
1990 static MemTxResult
watch_mem_write(void *opaque
, hwaddr addr
,
1991 uint64_t val
, unsigned size
,
1996 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, size
, attrs
, BP_MEM_WRITE
);
1999 address_space_stb(&address_space_memory
, addr
, val
, attrs
, &res
);
2002 address_space_stw(&address_space_memory
, addr
, val
, attrs
, &res
);
2005 address_space_stl(&address_space_memory
, addr
, val
, attrs
, &res
);
2012 static const MemoryRegionOps watch_mem_ops
= {
2013 .read_with_attrs
= watch_mem_read
,
2014 .write_with_attrs
= watch_mem_write
,
2015 .endianness
= DEVICE_NATIVE_ENDIAN
,
2018 static MemTxResult
subpage_read(void *opaque
, hwaddr addr
, uint64_t *data
,
2019 unsigned len
, MemTxAttrs attrs
)
2021 subpage_t
*subpage
= opaque
;
2025 #if defined(DEBUG_SUBPAGE)
2026 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
"\n", __func__
,
2027 subpage
, len
, addr
);
2029 res
= address_space_read(subpage
->as
, addr
+ subpage
->base
,
2036 *data
= ldub_p(buf
);
2039 *data
= lduw_p(buf
);
2052 static MemTxResult
subpage_write(void *opaque
, hwaddr addr
,
2053 uint64_t value
, unsigned len
, MemTxAttrs attrs
)
2055 subpage_t
*subpage
= opaque
;
2058 #if defined(DEBUG_SUBPAGE)
2059 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
2060 " value %"PRIx64
"\n",
2061 __func__
, subpage
, len
, addr
, value
);
2079 return address_space_write(subpage
->as
, addr
+ subpage
->base
,
2083 static bool subpage_accepts(void *opaque
, hwaddr addr
,
2084 unsigned len
, bool is_write
)
2086 subpage_t
*subpage
= opaque
;
2087 #if defined(DEBUG_SUBPAGE)
2088 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx
"\n",
2089 __func__
, subpage
, is_write
? 'w' : 'r', len
, addr
);
2092 return address_space_access_valid(subpage
->as
, addr
+ subpage
->base
,
2096 static const MemoryRegionOps subpage_ops
= {
2097 .read_with_attrs
= subpage_read
,
2098 .write_with_attrs
= subpage_write
,
2099 .impl
.min_access_size
= 1,
2100 .impl
.max_access_size
= 8,
2101 .valid
.min_access_size
= 1,
2102 .valid
.max_access_size
= 8,
2103 .valid
.accepts
= subpage_accepts
,
2104 .endianness
= DEVICE_NATIVE_ENDIAN
,
2107 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2112 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2114 idx
= SUBPAGE_IDX(start
);
2115 eidx
= SUBPAGE_IDX(end
);
2116 #if defined(DEBUG_SUBPAGE)
2117 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2118 __func__
, mmio
, start
, end
, idx
, eidx
, section
);
2120 for (; idx
<= eidx
; idx
++) {
2121 mmio
->sub_section
[idx
] = section
;
2127 static subpage_t
*subpage_init(AddressSpace
*as
, hwaddr base
)
2131 mmio
= g_malloc0(sizeof(subpage_t
));
2135 memory_region_init_io(&mmio
->iomem
, NULL
, &subpage_ops
, mmio
,
2136 NULL
, TARGET_PAGE_SIZE
);
2137 mmio
->iomem
.subpage
= true;
2138 #if defined(DEBUG_SUBPAGE)
2139 printf("%s: %p base " TARGET_FMT_plx
" len %08x\n", __func__
,
2140 mmio
, base
, TARGET_PAGE_SIZE
);
2142 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, PHYS_SECTION_UNASSIGNED
);
2147 static uint16_t dummy_section(PhysPageMap
*map
, AddressSpace
*as
,
2151 MemoryRegionSection section
= {
2152 .address_space
= as
,
2154 .offset_within_address_space
= 0,
2155 .offset_within_region
= 0,
2156 .size
= int128_2_64(),
2159 return phys_section_add(map
, §ion
);
2162 MemoryRegion
*iotlb_to_region(CPUState
*cpu
, hwaddr index
)
2164 AddressSpaceDispatch
*d
= atomic_rcu_read(&cpu
->memory_dispatch
);
2165 MemoryRegionSection
*sections
= d
->map
.sections
;
2167 return sections
[index
& ~TARGET_PAGE_MASK
].mr
;
2170 static void io_mem_init(void)
2172 memory_region_init_io(&io_mem_rom
, NULL
, &unassigned_mem_ops
, NULL
, NULL
, UINT64_MAX
);
2173 memory_region_init_io(&io_mem_unassigned
, NULL
, &unassigned_mem_ops
, NULL
,
2175 memory_region_init_io(&io_mem_notdirty
, NULL
, ¬dirty_mem_ops
, NULL
,
2177 memory_region_init_io(&io_mem_watch
, NULL
, &watch_mem_ops
, NULL
,
2181 static void mem_begin(MemoryListener
*listener
)
2183 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2184 AddressSpaceDispatch
*d
= g_new0(AddressSpaceDispatch
, 1);
2187 n
= dummy_section(&d
->map
, as
, &io_mem_unassigned
);
2188 assert(n
== PHYS_SECTION_UNASSIGNED
);
2189 n
= dummy_section(&d
->map
, as
, &io_mem_notdirty
);
2190 assert(n
== PHYS_SECTION_NOTDIRTY
);
2191 n
= dummy_section(&d
->map
, as
, &io_mem_rom
);
2192 assert(n
== PHYS_SECTION_ROM
);
2193 n
= dummy_section(&d
->map
, as
, &io_mem_watch
);
2194 assert(n
== PHYS_SECTION_WATCH
);
2196 d
->phys_map
= (PhysPageEntry
) { .ptr
= PHYS_MAP_NODE_NIL
, .skip
= 1 };
2198 as
->next_dispatch
= d
;
2201 static void address_space_dispatch_free(AddressSpaceDispatch
*d
)
2203 phys_sections_free(&d
->map
);
2207 static void mem_commit(MemoryListener
*listener
)
2209 AddressSpace
*as
= container_of(listener
, AddressSpace
, dispatch_listener
);
2210 AddressSpaceDispatch
*cur
= as
->dispatch
;
2211 AddressSpaceDispatch
*next
= as
->next_dispatch
;
2213 phys_page_compact_all(next
, next
->map
.nodes_nb
);
2215 atomic_rcu_set(&as
->dispatch
, next
);
2217 call_rcu(cur
, address_space_dispatch_free
, rcu
);
2221 static void tcg_commit(MemoryListener
*listener
)
2225 /* since each CPU stores ram addresses in its TLB cache, we must
2226 reset the modified entries */
2229 /* FIXME: Disentangle the cpu.h circular files deps so we can
2230 directly get the right CPU from listener. */
2231 if (cpu
->tcg_as_listener
!= listener
) {
2234 cpu_reload_memory_map(cpu
);
2238 void address_space_init_dispatch(AddressSpace
*as
)
2240 as
->dispatch
= NULL
;
2241 as
->dispatch_listener
= (MemoryListener
) {
2243 .commit
= mem_commit
,
2244 .region_add
= mem_add
,
2245 .region_nop
= mem_add
,
2248 memory_listener_register(&as
->dispatch_listener
, as
);
2251 void address_space_unregister(AddressSpace
*as
)
2253 memory_listener_unregister(&as
->dispatch_listener
);
2256 void address_space_destroy_dispatch(AddressSpace
*as
)
2258 AddressSpaceDispatch
*d
= as
->dispatch
;
2260 atomic_rcu_set(&as
->dispatch
, NULL
);
2262 call_rcu(d
, address_space_dispatch_free
, rcu
);
2266 static void memory_map_init(void)
2268 system_memory
= g_malloc(sizeof(*system_memory
));
2270 memory_region_init(system_memory
, NULL
, "system", UINT64_MAX
);
2271 address_space_init(&address_space_memory
, system_memory
, "memory");
2273 system_io
= g_malloc(sizeof(*system_io
));
2274 memory_region_init_io(system_io
, NULL
, &unassigned_io_ops
, NULL
, "io",
2276 address_space_init(&address_space_io
, system_io
, "I/O");
2279 MemoryRegion
*get_system_memory(void)
2281 return system_memory
;
2284 MemoryRegion
*get_system_io(void)
2289 #endif /* !defined(CONFIG_USER_ONLY) */
2291 /* physical memory access (slow version, mainly for debug) */
2292 #if defined(CONFIG_USER_ONLY)
2293 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
2294 uint8_t *buf
, int len
, int is_write
)
2301 page
= addr
& TARGET_PAGE_MASK
;
2302 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2305 flags
= page_get_flags(page
);
2306 if (!(flags
& PAGE_VALID
))
2309 if (!(flags
& PAGE_WRITE
))
2311 /* XXX: this code should not depend on lock_user */
2312 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2315 unlock_user(p
, addr
, l
);
2317 if (!(flags
& PAGE_READ
))
2319 /* XXX: this code should not depend on lock_user */
2320 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2323 unlock_user(p
, addr
, 0);
2334 static void invalidate_and_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2337 uint8_t dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
2338 /* No early return if dirty_log_mask is or becomes 0, because
2339 * cpu_physical_memory_set_dirty_range will still call
2340 * xen_modified_memory.
2342 if (dirty_log_mask
) {
2344 cpu_physical_memory_range_includes_clean(addr
, length
, dirty_log_mask
);
2346 if (dirty_log_mask
& (1 << DIRTY_MEMORY_CODE
)) {
2347 tb_invalidate_phys_range(addr
, addr
+ length
);
2348 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
2350 cpu_physical_memory_set_dirty_range(addr
, length
, dirty_log_mask
);
2353 static int memory_access_size(MemoryRegion
*mr
, unsigned l
, hwaddr addr
)
2355 unsigned access_size_max
= mr
->ops
->valid
.max_access_size
;
2357 /* Regions are assumed to support 1-4 byte accesses unless
2358 otherwise specified. */
2359 if (access_size_max
== 0) {
2360 access_size_max
= 4;
2363 /* Bound the maximum access by the alignment of the address. */
2364 if (!mr
->ops
->impl
.unaligned
) {
2365 unsigned align_size_max
= addr
& -addr
;
2366 if (align_size_max
!= 0 && align_size_max
< access_size_max
) {
2367 access_size_max
= align_size_max
;
2371 /* Don't attempt accesses larger than the maximum. */
2372 if (l
> access_size_max
) {
2373 l
= access_size_max
;
2376 l
= 1 << (qemu_fls(l
) - 1);
2382 static bool prepare_mmio_access(MemoryRegion
*mr
)
2384 bool unlocked
= !qemu_mutex_iothread_locked();
2385 bool release_lock
= false;
2387 if (unlocked
&& mr
->global_locking
) {
2388 qemu_mutex_lock_iothread();
2390 release_lock
= true;
2392 if (mr
->flush_coalesced_mmio
) {
2394 qemu_mutex_lock_iothread();
2396 qemu_flush_coalesced_mmio_buffer();
2398 qemu_mutex_unlock_iothread();
2402 return release_lock
;
2405 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2406 uint8_t *buf
, int len
, bool is_write
)
2413 MemTxResult result
= MEMTX_OK
;
2414 bool release_lock
= false;
2419 mr
= address_space_translate(as
, addr
, &addr1
, &l
, is_write
);
2422 if (!memory_access_is_direct(mr
, is_write
)) {
2423 release_lock
|= prepare_mmio_access(mr
);
2424 l
= memory_access_size(mr
, l
, addr1
);
2425 /* XXX: could force current_cpu to NULL to avoid
2429 /* 64 bit write access */
2431 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 8,
2435 /* 32 bit write access */
2437 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 4,
2441 /* 16 bit write access */
2443 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 2,
2447 /* 8 bit write access */
2449 result
|= memory_region_dispatch_write(mr
, addr1
, val
, 1,
2456 addr1
+= memory_region_get_ram_addr(mr
);
2458 ptr
= qemu_get_ram_ptr(addr1
);
2459 memcpy(ptr
, buf
, l
);
2460 invalidate_and_set_dirty(mr
, addr1
, l
);
2463 if (!memory_access_is_direct(mr
, is_write
)) {
2465 release_lock
|= prepare_mmio_access(mr
);
2466 l
= memory_access_size(mr
, l
, addr1
);
2469 /* 64 bit read access */
2470 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 8,
2475 /* 32 bit read access */
2476 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 4,
2481 /* 16 bit read access */
2482 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 2,
2487 /* 8 bit read access */
2488 result
|= memory_region_dispatch_read(mr
, addr1
, &val
, 1,
2497 ptr
= qemu_get_ram_ptr(mr
->ram_addr
+ addr1
);
2498 memcpy(buf
, ptr
, l
);
2503 qemu_mutex_unlock_iothread();
2504 release_lock
= false;
2516 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2517 const uint8_t *buf
, int len
)
2519 return address_space_rw(as
, addr
, attrs
, (uint8_t *)buf
, len
, true);
2522 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
, MemTxAttrs attrs
,
2523 uint8_t *buf
, int len
)
2525 return address_space_rw(as
, addr
, attrs
, buf
, len
, false);
2529 void cpu_physical_memory_rw(hwaddr addr
, uint8_t *buf
,
2530 int len
, int is_write
)
2532 address_space_rw(&address_space_memory
, addr
, MEMTXATTRS_UNSPECIFIED
,
2533 buf
, len
, is_write
);
2536 enum write_rom_type
{
2541 static inline void cpu_physical_memory_write_rom_internal(AddressSpace
*as
,
2542 hwaddr addr
, const uint8_t *buf
, int len
, enum write_rom_type type
)
2552 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
2554 if (!(memory_region_is_ram(mr
) ||
2555 memory_region_is_romd(mr
))) {
2556 l
= memory_access_size(mr
, l
, addr1
);
2558 addr1
+= memory_region_get_ram_addr(mr
);
2560 ptr
= qemu_get_ram_ptr(addr1
);
2563 memcpy(ptr
, buf
, l
);
2564 invalidate_and_set_dirty(mr
, addr1
, l
);
2567 flush_icache_range((uintptr_t)ptr
, (uintptr_t)ptr
+ l
);
2578 /* used for ROM loading : can write in RAM and ROM */
2579 void cpu_physical_memory_write_rom(AddressSpace
*as
, hwaddr addr
,
2580 const uint8_t *buf
, int len
)
2582 cpu_physical_memory_write_rom_internal(as
, addr
, buf
, len
, WRITE_DATA
);
2585 void cpu_flush_icache_range(hwaddr start
, int len
)
2588 * This function should do the same thing as an icache flush that was
2589 * triggered from within the guest. For TCG we are always cache coherent,
2590 * so there is no need to flush anything. For KVM / Xen we need to flush
2591 * the host's instruction cache at least.
2593 if (tcg_enabled()) {
2597 cpu_physical_memory_write_rom_internal(&address_space_memory
,
2598 start
, NULL
, len
, FLUSH_CACHE
);
2609 static BounceBuffer bounce
;
2611 typedef struct MapClient
{
2613 QLIST_ENTRY(MapClient
) link
;
2616 QemuMutex map_client_list_lock
;
2617 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
2618 = QLIST_HEAD_INITIALIZER(map_client_list
);
2620 static void cpu_unregister_map_client_do(MapClient
*client
)
2622 QLIST_REMOVE(client
, link
);
2626 static void cpu_notify_map_clients_locked(void)
2630 while (!QLIST_EMPTY(&map_client_list
)) {
2631 client
= QLIST_FIRST(&map_client_list
);
2632 qemu_bh_schedule(client
->bh
);
2633 cpu_unregister_map_client_do(client
);
2637 void cpu_register_map_client(QEMUBH
*bh
)
2639 MapClient
*client
= g_malloc(sizeof(*client
));
2641 qemu_mutex_lock(&map_client_list_lock
);
2643 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
2644 if (!atomic_read(&bounce
.in_use
)) {
2645 cpu_notify_map_clients_locked();
2647 qemu_mutex_unlock(&map_client_list_lock
);
2650 void cpu_exec_init_all(void)
2652 qemu_mutex_init(&ram_list
.mutex
);
2655 qemu_mutex_init(&map_client_list_lock
);
2658 void cpu_unregister_map_client(QEMUBH
*bh
)
2662 qemu_mutex_lock(&map_client_list_lock
);
2663 QLIST_FOREACH(client
, &map_client_list
, link
) {
2664 if (client
->bh
== bh
) {
2665 cpu_unregister_map_client_do(client
);
2669 qemu_mutex_unlock(&map_client_list_lock
);
2672 static void cpu_notify_map_clients(void)
2674 qemu_mutex_lock(&map_client_list_lock
);
2675 cpu_notify_map_clients_locked();
2676 qemu_mutex_unlock(&map_client_list_lock
);
2679 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, int len
, bool is_write
)
2687 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2688 if (!memory_access_is_direct(mr
, is_write
)) {
2689 l
= memory_access_size(mr
, l
, addr
);
2690 if (!memory_region_access_valid(mr
, xlat
, l
, is_write
)) {
2702 /* Map a physical memory region into a host virtual address.
2703 * May map a subset of the requested range, given by and returned in *plen.
2704 * May return NULL if resources needed to perform the mapping are exhausted.
2705 * Use only for reads OR writes - not for read-modify-write operations.
2706 * Use cpu_register_map_client() to know when retrying the map operation is
2707 * likely to succeed.
2709 void *address_space_map(AddressSpace
*as
,
2716 hwaddr l
, xlat
, base
;
2717 MemoryRegion
*mr
, *this_mr
;
2726 mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2728 if (!memory_access_is_direct(mr
, is_write
)) {
2729 if (atomic_xchg(&bounce
.in_use
, true)) {
2733 /* Avoid unbounded allocations */
2734 l
= MIN(l
, TARGET_PAGE_SIZE
);
2735 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, l
);
2739 memory_region_ref(mr
);
2742 address_space_read(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
2748 return bounce
.buffer
;
2752 raddr
= memory_region_get_ram_addr(mr
);
2763 this_mr
= address_space_translate(as
, addr
, &xlat
, &l
, is_write
);
2764 if (this_mr
!= mr
|| xlat
!= base
+ done
) {
2769 memory_region_ref(mr
);
2772 return qemu_ram_ptr_length(raddr
+ base
, plen
);
2775 /* Unmaps a memory region previously mapped by address_space_map().
2776 * Will also mark the memory as dirty if is_write == 1. access_len gives
2777 * the amount of memory that was actually read or written by the caller.
2779 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2780 int is_write
, hwaddr access_len
)
2782 if (buffer
!= bounce
.buffer
) {
2786 mr
= qemu_ram_addr_from_host(buffer
, &addr1
);
2789 invalidate_and_set_dirty(mr
, addr1
, access_len
);
2791 if (xen_enabled()) {
2792 xen_invalidate_map_cache_entry(buffer
);
2794 memory_region_unref(mr
);
2798 address_space_write(as
, bounce
.addr
, MEMTXATTRS_UNSPECIFIED
,
2799 bounce
.buffer
, access_len
);
2801 qemu_vfree(bounce
.buffer
);
2802 bounce
.buffer
= NULL
;
2803 memory_region_unref(bounce
.mr
);
2804 atomic_mb_set(&bounce
.in_use
, false);
2805 cpu_notify_map_clients();
2808 void *cpu_physical_memory_map(hwaddr addr
,
2812 return address_space_map(&address_space_memory
, addr
, plen
, is_write
);
2815 void cpu_physical_memory_unmap(void *buffer
, hwaddr len
,
2816 int is_write
, hwaddr access_len
)
2818 return address_space_unmap(&address_space_memory
, buffer
, len
, is_write
, access_len
);
2821 /* warning: addr must be aligned */
2822 static inline uint32_t address_space_ldl_internal(AddressSpace
*as
, hwaddr addr
,
2824 MemTxResult
*result
,
2825 enum device_endian endian
)
2833 bool release_lock
= false;
2836 mr
= address_space_translate(as
, addr
, &addr1
, &l
, false);
2837 if (l
< 4 || !memory_access_is_direct(mr
, false)) {
2838 release_lock
|= prepare_mmio_access(mr
);
2841 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 4, attrs
);
2842 #if defined(TARGET_WORDS_BIGENDIAN)
2843 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2847 if (endian
== DEVICE_BIG_ENDIAN
) {
2853 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2857 case DEVICE_LITTLE_ENDIAN
:
2858 val
= ldl_le_p(ptr
);
2860 case DEVICE_BIG_ENDIAN
:
2861 val
= ldl_be_p(ptr
);
2873 qemu_mutex_unlock_iothread();
2879 uint32_t address_space_ldl(AddressSpace
*as
, hwaddr addr
,
2880 MemTxAttrs attrs
, MemTxResult
*result
)
2882 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2883 DEVICE_NATIVE_ENDIAN
);
2886 uint32_t address_space_ldl_le(AddressSpace
*as
, hwaddr addr
,
2887 MemTxAttrs attrs
, MemTxResult
*result
)
2889 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2890 DEVICE_LITTLE_ENDIAN
);
2893 uint32_t address_space_ldl_be(AddressSpace
*as
, hwaddr addr
,
2894 MemTxAttrs attrs
, MemTxResult
*result
)
2896 return address_space_ldl_internal(as
, addr
, attrs
, result
,
2900 uint32_t ldl_phys(AddressSpace
*as
, hwaddr addr
)
2902 return address_space_ldl(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2905 uint32_t ldl_le_phys(AddressSpace
*as
, hwaddr addr
)
2907 return address_space_ldl_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2910 uint32_t ldl_be_phys(AddressSpace
*as
, hwaddr addr
)
2912 return address_space_ldl_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
2915 /* warning: addr must be aligned */
2916 static inline uint64_t address_space_ldq_internal(AddressSpace
*as
, hwaddr addr
,
2918 MemTxResult
*result
,
2919 enum device_endian endian
)
2927 bool release_lock
= false;
2930 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
2932 if (l
< 8 || !memory_access_is_direct(mr
, false)) {
2933 release_lock
|= prepare_mmio_access(mr
);
2936 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 8, attrs
);
2937 #if defined(TARGET_WORDS_BIGENDIAN)
2938 if (endian
== DEVICE_LITTLE_ENDIAN
) {
2942 if (endian
== DEVICE_BIG_ENDIAN
) {
2948 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
2952 case DEVICE_LITTLE_ENDIAN
:
2953 val
= ldq_le_p(ptr
);
2955 case DEVICE_BIG_ENDIAN
:
2956 val
= ldq_be_p(ptr
);
2968 qemu_mutex_unlock_iothread();
2974 uint64_t address_space_ldq(AddressSpace
*as
, hwaddr addr
,
2975 MemTxAttrs attrs
, MemTxResult
*result
)
2977 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2978 DEVICE_NATIVE_ENDIAN
);
2981 uint64_t address_space_ldq_le(AddressSpace
*as
, hwaddr addr
,
2982 MemTxAttrs attrs
, MemTxResult
*result
)
2984 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2985 DEVICE_LITTLE_ENDIAN
);
2988 uint64_t address_space_ldq_be(AddressSpace
*as
, hwaddr addr
,
2989 MemTxAttrs attrs
, MemTxResult
*result
)
2991 return address_space_ldq_internal(as
, addr
, attrs
, result
,
2995 uint64_t ldq_phys(AddressSpace
*as
, hwaddr addr
)
2997 return address_space_ldq(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3000 uint64_t ldq_le_phys(AddressSpace
*as
, hwaddr addr
)
3002 return address_space_ldq_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3005 uint64_t ldq_be_phys(AddressSpace
*as
, hwaddr addr
)
3007 return address_space_ldq_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3011 uint32_t address_space_ldub(AddressSpace
*as
, hwaddr addr
,
3012 MemTxAttrs attrs
, MemTxResult
*result
)
3017 r
= address_space_rw(as
, addr
, attrs
, &val
, 1, 0);
3024 uint32_t ldub_phys(AddressSpace
*as
, hwaddr addr
)
3026 return address_space_ldub(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3029 /* warning: addr must be aligned */
3030 static inline uint32_t address_space_lduw_internal(AddressSpace
*as
,
3033 MemTxResult
*result
,
3034 enum device_endian endian
)
3042 bool release_lock
= false;
3045 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3047 if (l
< 2 || !memory_access_is_direct(mr
, false)) {
3048 release_lock
|= prepare_mmio_access(mr
);
3051 r
= memory_region_dispatch_read(mr
, addr1
, &val
, 2, attrs
);
3052 #if defined(TARGET_WORDS_BIGENDIAN)
3053 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3057 if (endian
== DEVICE_BIG_ENDIAN
) {
3063 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(mr
)
3067 case DEVICE_LITTLE_ENDIAN
:
3068 val
= lduw_le_p(ptr
);
3070 case DEVICE_BIG_ENDIAN
:
3071 val
= lduw_be_p(ptr
);
3083 qemu_mutex_unlock_iothread();
3089 uint32_t address_space_lduw(AddressSpace
*as
, hwaddr addr
,
3090 MemTxAttrs attrs
, MemTxResult
*result
)
3092 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3093 DEVICE_NATIVE_ENDIAN
);
3096 uint32_t address_space_lduw_le(AddressSpace
*as
, hwaddr addr
,
3097 MemTxAttrs attrs
, MemTxResult
*result
)
3099 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3100 DEVICE_LITTLE_ENDIAN
);
3103 uint32_t address_space_lduw_be(AddressSpace
*as
, hwaddr addr
,
3104 MemTxAttrs attrs
, MemTxResult
*result
)
3106 return address_space_lduw_internal(as
, addr
, attrs
, result
,
3110 uint32_t lduw_phys(AddressSpace
*as
, hwaddr addr
)
3112 return address_space_lduw(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3115 uint32_t lduw_le_phys(AddressSpace
*as
, hwaddr addr
)
3117 return address_space_lduw_le(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3120 uint32_t lduw_be_phys(AddressSpace
*as
, hwaddr addr
)
3122 return address_space_lduw_be(as
, addr
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3125 /* warning: addr must be aligned. The ram page is not masked as dirty
3126 and the code inside is not invalidated. It is useful if the dirty
3127 bits are used to track modified PTEs */
3128 void address_space_stl_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3129 MemTxAttrs attrs
, MemTxResult
*result
)
3136 uint8_t dirty_log_mask
;
3137 bool release_lock
= false;
3140 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3142 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3143 release_lock
|= prepare_mmio_access(mr
);
3145 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3147 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3148 ptr
= qemu_get_ram_ptr(addr1
);
3151 dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
3152 dirty_log_mask
&= ~(1 << DIRTY_MEMORY_CODE
);
3153 cpu_physical_memory_set_dirty_range(addr1
, 4, dirty_log_mask
);
3160 qemu_mutex_unlock_iothread();
3165 void stl_phys_notdirty(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3167 address_space_stl_notdirty(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3170 /* warning: addr must be aligned */
3171 static inline void address_space_stl_internal(AddressSpace
*as
,
3172 hwaddr addr
, uint32_t val
,
3174 MemTxResult
*result
,
3175 enum device_endian endian
)
3182 bool release_lock
= false;
3185 mr
= address_space_translate(as
, addr
, &addr1
, &l
,
3187 if (l
< 4 || !memory_access_is_direct(mr
, true)) {
3188 release_lock
|= prepare_mmio_access(mr
);
3190 #if defined(TARGET_WORDS_BIGENDIAN)
3191 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3195 if (endian
== DEVICE_BIG_ENDIAN
) {
3199 r
= memory_region_dispatch_write(mr
, addr1
, val
, 4, attrs
);
3202 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3203 ptr
= qemu_get_ram_ptr(addr1
);
3205 case DEVICE_LITTLE_ENDIAN
:
3208 case DEVICE_BIG_ENDIAN
:
3215 invalidate_and_set_dirty(mr
, addr1
, 4);
3222 qemu_mutex_unlock_iothread();
3227 void address_space_stl(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3228 MemTxAttrs attrs
, MemTxResult
*result
)
3230 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3231 DEVICE_NATIVE_ENDIAN
);
3234 void address_space_stl_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3235 MemTxAttrs attrs
, MemTxResult
*result
)
3237 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3238 DEVICE_LITTLE_ENDIAN
);
3241 void address_space_stl_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3242 MemTxAttrs attrs
, MemTxResult
*result
)
3244 address_space_stl_internal(as
, addr
, val
, attrs
, result
,
3248 void stl_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3250 address_space_stl(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3253 void stl_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3255 address_space_stl_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3258 void stl_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3260 address_space_stl_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3264 void address_space_stb(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3265 MemTxAttrs attrs
, MemTxResult
*result
)
3270 r
= address_space_rw(as
, addr
, attrs
, &v
, 1, 1);
3276 void stb_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3278 address_space_stb(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3281 /* warning: addr must be aligned */
3282 static inline void address_space_stw_internal(AddressSpace
*as
,
3283 hwaddr addr
, uint32_t val
,
3285 MemTxResult
*result
,
3286 enum device_endian endian
)
3293 bool release_lock
= false;
3296 mr
= address_space_translate(as
, addr
, &addr1
, &l
, true);
3297 if (l
< 2 || !memory_access_is_direct(mr
, true)) {
3298 release_lock
|= prepare_mmio_access(mr
);
3300 #if defined(TARGET_WORDS_BIGENDIAN)
3301 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3305 if (endian
== DEVICE_BIG_ENDIAN
) {
3309 r
= memory_region_dispatch_write(mr
, addr1
, val
, 2, attrs
);
3312 addr1
+= memory_region_get_ram_addr(mr
) & TARGET_PAGE_MASK
;
3313 ptr
= qemu_get_ram_ptr(addr1
);
3315 case DEVICE_LITTLE_ENDIAN
:
3318 case DEVICE_BIG_ENDIAN
:
3325 invalidate_and_set_dirty(mr
, addr1
, 2);
3332 qemu_mutex_unlock_iothread();
3337 void address_space_stw(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3338 MemTxAttrs attrs
, MemTxResult
*result
)
3340 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3341 DEVICE_NATIVE_ENDIAN
);
3344 void address_space_stw_le(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3345 MemTxAttrs attrs
, MemTxResult
*result
)
3347 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3348 DEVICE_LITTLE_ENDIAN
);
3351 void address_space_stw_be(AddressSpace
*as
, hwaddr addr
, uint32_t val
,
3352 MemTxAttrs attrs
, MemTxResult
*result
)
3354 address_space_stw_internal(as
, addr
, val
, attrs
, result
,
3358 void stw_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3360 address_space_stw(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3363 void stw_le_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3365 address_space_stw_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3368 void stw_be_phys(AddressSpace
*as
, hwaddr addr
, uint32_t val
)
3370 address_space_stw_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3374 void address_space_stq(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3375 MemTxAttrs attrs
, MemTxResult
*result
)
3379 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3385 void address_space_stq_le(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3386 MemTxAttrs attrs
, MemTxResult
*result
)
3389 val
= cpu_to_le64(val
);
3390 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3395 void address_space_stq_be(AddressSpace
*as
, hwaddr addr
, uint64_t val
,
3396 MemTxAttrs attrs
, MemTxResult
*result
)
3399 val
= cpu_to_be64(val
);
3400 r
= address_space_rw(as
, addr
, attrs
, (void *) &val
, 8, 1);
3406 void stq_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3408 address_space_stq(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3411 void stq_le_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3413 address_space_stq_le(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3416 void stq_be_phys(AddressSpace
*as
, hwaddr addr
, uint64_t val
)
3418 address_space_stq_be(as
, addr
, val
, MEMTXATTRS_UNSPECIFIED
, NULL
);
3421 /* virtual memory access for debug (includes writing to ROM) */
3422 int cpu_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
3423 uint8_t *buf
, int len
, int is_write
)
3430 page
= addr
& TARGET_PAGE_MASK
;
3431 phys_addr
= cpu_get_phys_page_debug(cpu
, page
);
3432 /* if no physical page mapped, return an error */
3433 if (phys_addr
== -1)
3435 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3438 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3440 cpu_physical_memory_write_rom(cpu
->as
, phys_addr
, buf
, l
);
3442 address_space_rw(cpu
->as
, phys_addr
, MEMTXATTRS_UNSPECIFIED
,
3454 * A helper function for the _utterly broken_ virtio device model to find out if
3455 * it's running on a big endian machine. Don't do this at home kids!
3457 bool target_words_bigendian(void);
3458 bool target_words_bigendian(void)
3460 #if defined(TARGET_WORDS_BIGENDIAN)
3467 #ifndef CONFIG_USER_ONLY
3468 bool cpu_physical_memory_is_io(hwaddr phys_addr
)
3475 mr
= address_space_translate(&address_space_memory
,
3476 phys_addr
, &phys_addr
, &l
, false);
3478 res
= !(memory_region_is_ram(mr
) || memory_region_is_romd(mr
));
3483 int qemu_ram_foreach_block(RAMBlockIterFunc func
, void *opaque
)
3489 QLIST_FOREACH_RCU(block
, &ram_list
.blocks
, next
) {
3490 ret
= func(block
->idstr
, block
->host
, block
->offset
,
3491 block
->used_length
, opaque
);