2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
19 #include "exec/memory.h"
20 #include "exec/address-spaces.h"
21 #include "qapi/visitor.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/qemu-print.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/sysemu.h"
33 #include "sysemu/tcg.h"
34 #include "sysemu/accel.h"
35 #include "hw/qdev-properties.h"
36 #include "hw/boards.h"
37 #include "migration/vmstate.h"
39 //#define DEBUG_UNASSIGNED
41 static unsigned memory_region_transaction_depth
;
42 static bool memory_region_update_pending
;
43 static bool ioeventfd_update_pending
;
44 bool global_dirty_log
;
46 static QTAILQ_HEAD(, MemoryListener
) memory_listeners
47 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
49 static QTAILQ_HEAD(, AddressSpace
) address_spaces
50 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
52 static GHashTable
*flat_views
;
54 typedef struct AddrRange AddrRange
;
57 * Note that signed integers are needed for negative offsetting in aliases
58 * (large MemoryRegion::alias_offset).
65 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
67 return (AddrRange
) { start
, size
};
70 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
72 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
75 static Int128
addrrange_end(AddrRange r
)
77 return int128_add(r
.start
, r
.size
);
80 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
82 int128_addto(&range
.start
, delta
);
86 static bool addrrange_contains(AddrRange range
, Int128 addr
)
88 return int128_ge(addr
, range
.start
)
89 && int128_lt(addr
, addrrange_end(range
));
92 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
94 return addrrange_contains(r1
, r2
.start
)
95 || addrrange_contains(r2
, r1
.start
);
98 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
100 Int128 start
= int128_max(r1
.start
, r2
.start
);
101 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
102 return addrrange_make(start
, int128_sub(end
, start
));
105 enum ListenerDirection
{ Forward
, Reverse
};
107 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
109 MemoryListener *_listener; \
111 switch (_direction) { \
113 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
114 if (_listener->_callback) { \
115 _listener->_callback(_listener, ##_args); \
120 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
121 if (_listener->_callback) { \
122 _listener->_callback(_listener, ##_args); \
131 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
133 MemoryListener *_listener; \
135 switch (_direction) { \
137 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
138 if (_listener->_callback) { \
139 _listener->_callback(_listener, _section, ##_args); \
144 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
145 if (_listener->_callback) { \
146 _listener->_callback(_listener, _section, ##_args); \
155 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
156 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
158 MemoryRegionSection mrs = section_from_flat_range(fr, \
159 address_space_to_flatview(as)); \
160 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
163 struct CoalescedMemoryRange
{
165 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
168 struct MemoryRegionIoeventfd
{
175 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd
*a
,
176 MemoryRegionIoeventfd
*b
)
178 if (int128_lt(a
->addr
.start
, b
->addr
.start
)) {
180 } else if (int128_gt(a
->addr
.start
, b
->addr
.start
)) {
182 } else if (int128_lt(a
->addr
.size
, b
->addr
.size
)) {
184 } else if (int128_gt(a
->addr
.size
, b
->addr
.size
)) {
186 } else if (a
->match_data
< b
->match_data
) {
188 } else if (a
->match_data
> b
->match_data
) {
190 } else if (a
->match_data
) {
191 if (a
->data
< b
->data
) {
193 } else if (a
->data
> b
->data
) {
199 } else if (a
->e
> b
->e
) {
205 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd
*a
,
206 MemoryRegionIoeventfd
*b
)
208 return !memory_region_ioeventfd_before(a
, b
)
209 && !memory_region_ioeventfd_before(b
, a
);
212 /* Range of memory in the global map. Addresses are absolute. */
215 hwaddr offset_in_region
;
217 uint8_t dirty_log_mask
;
221 int has_coalesced_range
;
224 #define FOR_EACH_FLAT_RANGE(var, view) \
225 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
227 static inline MemoryRegionSection
228 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
230 return (MemoryRegionSection
) {
233 .offset_within_region
= fr
->offset_in_region
,
234 .size
= fr
->addr
.size
,
235 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
236 .readonly
= fr
->readonly
,
237 .nonvolatile
= fr
->nonvolatile
,
241 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
243 return a
->mr
== b
->mr
244 && addrrange_equal(a
->addr
, b
->addr
)
245 && a
->offset_in_region
== b
->offset_in_region
246 && a
->romd_mode
== b
->romd_mode
247 && a
->readonly
== b
->readonly
248 && a
->nonvolatile
== b
->nonvolatile
;
251 static FlatView
*flatview_new(MemoryRegion
*mr_root
)
255 view
= g_new0(FlatView
, 1);
257 view
->root
= mr_root
;
258 memory_region_ref(mr_root
);
259 trace_flatview_new(view
, mr_root
);
264 /* Insert a range into a given position. Caller is responsible for maintaining
267 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
269 if (view
->nr
== view
->nr_allocated
) {
270 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
271 view
->ranges
= g_realloc(view
->ranges
,
272 view
->nr_allocated
* sizeof(*view
->ranges
));
274 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
275 (view
->nr
- pos
) * sizeof(FlatRange
));
276 view
->ranges
[pos
] = *range
;
277 memory_region_ref(range
->mr
);
281 static void flatview_destroy(FlatView
*view
)
285 trace_flatview_destroy(view
, view
->root
);
286 if (view
->dispatch
) {
287 address_space_dispatch_free(view
->dispatch
);
289 for (i
= 0; i
< view
->nr
; i
++) {
290 memory_region_unref(view
->ranges
[i
].mr
);
292 g_free(view
->ranges
);
293 memory_region_unref(view
->root
);
297 static bool flatview_ref(FlatView
*view
)
299 return atomic_fetch_inc_nonzero(&view
->ref
) > 0;
302 void flatview_unref(FlatView
*view
)
304 if (atomic_fetch_dec(&view
->ref
) == 1) {
305 trace_flatview_destroy_rcu(view
, view
->root
);
307 call_rcu(view
, flatview_destroy
, rcu
);
311 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
313 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
315 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
317 int128_make64(r2
->offset_in_region
))
318 && r1
->dirty_log_mask
== r2
->dirty_log_mask
319 && r1
->romd_mode
== r2
->romd_mode
320 && r1
->readonly
== r2
->readonly
321 && r1
->nonvolatile
== r2
->nonvolatile
;
324 /* Attempt to simplify a view by merging adjacent ranges */
325 static void flatview_simplify(FlatView
*view
)
330 while (i
< view
->nr
) {
333 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
334 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
338 for (k
= i
; k
< j
; k
++) {
339 memory_region_unref(view
->ranges
[k
].mr
);
341 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
342 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
347 static bool memory_region_big_endian(MemoryRegion
*mr
)
349 #ifdef TARGET_WORDS_BIGENDIAN
350 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
352 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
356 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
358 #ifdef TARGET_WORDS_BIGENDIAN
359 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
361 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
365 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
367 if (memory_region_wrong_endianness(mr
)) {
372 *data
= bswap16(*data
);
375 *data
= bswap32(*data
);
378 *data
= bswap64(*data
);
386 static inline void memory_region_shift_read_access(uint64_t *value
,
392 *value
|= (tmp
& mask
) << shift
;
394 *value
|= (tmp
& mask
) >> -shift
;
398 static inline uint64_t memory_region_shift_write_access(uint64_t *value
,
405 tmp
= (*value
>> shift
) & mask
;
407 tmp
= (*value
<< -shift
) & mask
;
413 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
416 hwaddr abs_addr
= offset
;
418 abs_addr
+= mr
->addr
;
419 for (root
= mr
; root
->container
; ) {
420 root
= root
->container
;
421 abs_addr
+= root
->addr
;
427 static int get_cpu_index(void)
430 return current_cpu
->cpu_index
;
435 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
445 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
447 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
448 } else if (mr
== &io_mem_notdirty
) {
449 /* Accesses to code which has previously been translated into a TB show
450 * up in the MMIO path, as accesses to the io_mem_notdirty
452 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
453 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
454 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
455 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
457 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
461 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
472 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
474 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
475 } else if (mr
== &io_mem_notdirty
) {
476 /* Accesses to code which has previously been translated into a TB show
477 * up in the MMIO path, as accesses to the io_mem_notdirty
479 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
480 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
481 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
482 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
484 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
488 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
496 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
499 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
500 } else if (mr
== &io_mem_notdirty
) {
501 /* Accesses to code which has previously been translated into a TB show
502 * up in the MMIO path, as accesses to the io_mem_notdirty
504 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
505 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
506 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
507 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
509 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
513 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
521 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
524 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
525 } else if (mr
== &io_mem_notdirty
) {
526 /* Accesses to code which has previously been translated into a TB show
527 * up in the MMIO path, as accesses to the io_mem_notdirty
529 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
530 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
531 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
532 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
534 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
537 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
540 unsigned access_size_min
,
541 unsigned access_size_max
,
542 MemTxResult (*access_fn
)
553 uint64_t access_mask
;
554 unsigned access_size
;
556 MemTxResult r
= MEMTX_OK
;
558 if (!access_size_min
) {
561 if (!access_size_max
) {
565 /* FIXME: support unaligned access? */
566 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
567 access_mask
= MAKE_64BIT_MASK(0, access_size
* 8);
568 if (memory_region_big_endian(mr
)) {
569 for (i
= 0; i
< size
; i
+= access_size
) {
570 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
571 (size
- access_size
- i
) * 8, access_mask
, attrs
);
574 for (i
= 0; i
< size
; i
+= access_size
) {
575 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
582 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
586 while (mr
->container
) {
589 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
590 if (mr
== as
->root
) {
597 /* Render a memory region into the global view. Ranges in @view obscure
600 static void render_memory_region(FlatView
*view
,
607 MemoryRegion
*subregion
;
609 hwaddr offset_in_region
;
619 int128_addto(&base
, int128_make64(mr
->addr
));
620 readonly
|= mr
->readonly
;
621 nonvolatile
|= mr
->nonvolatile
;
623 tmp
= addrrange_make(base
, mr
->size
);
625 if (!addrrange_intersects(tmp
, clip
)) {
629 clip
= addrrange_intersection(tmp
, clip
);
632 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
633 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
634 render_memory_region(view
, mr
->alias
, base
, clip
,
635 readonly
, nonvolatile
);
639 /* Render subregions in priority order. */
640 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
641 render_memory_region(view
, subregion
, base
, clip
,
642 readonly
, nonvolatile
);
645 if (!mr
->terminates
) {
649 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
654 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
655 fr
.romd_mode
= mr
->romd_mode
;
656 fr
.readonly
= readonly
;
657 fr
.nonvolatile
= nonvolatile
;
658 fr
.has_coalesced_range
= 0;
660 /* Render the region itself into any gaps left by the current view. */
661 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
662 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
665 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
666 now
= int128_min(remain
,
667 int128_sub(view
->ranges
[i
].addr
.start
, base
));
668 fr
.offset_in_region
= offset_in_region
;
669 fr
.addr
= addrrange_make(base
, now
);
670 flatview_insert(view
, i
, &fr
);
672 int128_addto(&base
, now
);
673 offset_in_region
+= int128_get64(now
);
674 int128_subfrom(&remain
, now
);
676 now
= int128_sub(int128_min(int128_add(base
, remain
),
677 addrrange_end(view
->ranges
[i
].addr
)),
679 int128_addto(&base
, now
);
680 offset_in_region
+= int128_get64(now
);
681 int128_subfrom(&remain
, now
);
683 if (int128_nz(remain
)) {
684 fr
.offset_in_region
= offset_in_region
;
685 fr
.addr
= addrrange_make(base
, remain
);
686 flatview_insert(view
, i
, &fr
);
690 static MemoryRegion
*memory_region_get_flatview_root(MemoryRegion
*mr
)
692 while (mr
->enabled
) {
694 if (!mr
->alias_offset
&& int128_ge(mr
->size
, mr
->alias
->size
)) {
695 /* The alias is included in its entirety. Use it as
696 * the "real" root, so that we can share more FlatViews.
701 } else if (!mr
->terminates
) {
702 unsigned int found
= 0;
703 MemoryRegion
*child
, *next
= NULL
;
704 QTAILQ_FOREACH(child
, &mr
->subregions
, subregions_link
) {
705 if (child
->enabled
) {
710 if (!child
->addr
&& int128_ge(mr
->size
, child
->size
)) {
711 /* A child is included in its entirety. If it's the only
712 * enabled one, use it in the hope of finding an alias down the
713 * way. This will also let us share FlatViews.
734 /* Render a memory topology into a list of disjoint absolute ranges. */
735 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
740 view
= flatview_new(mr
);
743 render_memory_region(view
, mr
, int128_zero(),
744 addrrange_make(int128_zero(), int128_2_64()),
747 flatview_simplify(view
);
749 view
->dispatch
= address_space_dispatch_new(view
);
750 for (i
= 0; i
< view
->nr
; i
++) {
751 MemoryRegionSection mrs
=
752 section_from_flat_range(&view
->ranges
[i
], view
);
753 flatview_add_to_dispatch(view
, &mrs
);
755 address_space_dispatch_compact(view
->dispatch
);
756 g_hash_table_replace(flat_views
, mr
, view
);
761 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
762 MemoryRegionIoeventfd
*fds_new
,
764 MemoryRegionIoeventfd
*fds_old
,
768 MemoryRegionIoeventfd
*fd
;
769 MemoryRegionSection section
;
771 /* Generate a symmetric difference of the old and new fd sets, adding
772 * and deleting as necessary.
776 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
777 if (iold
< fds_old_nb
778 && (inew
== fds_new_nb
779 || memory_region_ioeventfd_before(&fds_old
[iold
],
782 section
= (MemoryRegionSection
) {
783 .fv
= address_space_to_flatview(as
),
784 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
785 .size
= fd
->addr
.size
,
787 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
788 fd
->match_data
, fd
->data
, fd
->e
);
790 } else if (inew
< fds_new_nb
791 && (iold
== fds_old_nb
792 || memory_region_ioeventfd_before(&fds_new
[inew
],
795 section
= (MemoryRegionSection
) {
796 .fv
= address_space_to_flatview(as
),
797 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
798 .size
= fd
->addr
.size
,
800 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
801 fd
->match_data
, fd
->data
, fd
->e
);
810 FlatView
*address_space_get_flatview(AddressSpace
*as
)
816 view
= address_space_to_flatview(as
);
817 /* If somebody has replaced as->current_map concurrently,
818 * flatview_ref returns false.
820 } while (!flatview_ref(view
));
825 static void address_space_update_ioeventfds(AddressSpace
*as
)
829 unsigned ioeventfd_nb
= 0;
830 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
834 view
= address_space_get_flatview(as
);
835 FOR_EACH_FLAT_RANGE(fr
, view
) {
836 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
837 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
838 int128_sub(fr
->addr
.start
,
839 int128_make64(fr
->offset_in_region
)));
840 if (addrrange_intersects(fr
->addr
, tmp
)) {
842 ioeventfds
= g_realloc(ioeventfds
,
843 ioeventfd_nb
* sizeof(*ioeventfds
));
844 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
845 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
850 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
851 as
->ioeventfds
, as
->ioeventfd_nb
);
853 g_free(as
->ioeventfds
);
854 as
->ioeventfds
= ioeventfds
;
855 as
->ioeventfd_nb
= ioeventfd_nb
;
856 flatview_unref(view
);
859 static void flat_range_coalesced_io_del(FlatRange
*fr
, AddressSpace
*as
)
861 if (!fr
->has_coalesced_range
) {
865 if (--fr
->has_coalesced_range
> 0) {
869 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Reverse
, coalesced_io_del
,
870 int128_get64(fr
->addr
.start
),
871 int128_get64(fr
->addr
.size
));
874 static void flat_range_coalesced_io_add(FlatRange
*fr
, AddressSpace
*as
)
876 MemoryRegion
*mr
= fr
->mr
;
877 CoalescedMemoryRange
*cmr
;
880 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
884 if (fr
->has_coalesced_range
++) {
888 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
889 tmp
= addrrange_shift(cmr
->addr
,
890 int128_sub(fr
->addr
.start
,
891 int128_make64(fr
->offset_in_region
)));
892 if (!addrrange_intersects(tmp
, fr
->addr
)) {
895 tmp
= addrrange_intersection(tmp
, fr
->addr
);
896 MEMORY_LISTENER_UPDATE_REGION(fr
, as
, Forward
, coalesced_io_add
,
897 int128_get64(tmp
.start
),
898 int128_get64(tmp
.size
));
902 static void address_space_update_topology_pass(AddressSpace
*as
,
903 const FlatView
*old_view
,
904 const FlatView
*new_view
,
908 FlatRange
*frold
, *frnew
;
910 /* Generate a symmetric difference of the old and new memory maps.
911 * Kill ranges in the old map, and instantiate ranges in the new map.
914 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
915 if (iold
< old_view
->nr
) {
916 frold
= &old_view
->ranges
[iold
];
920 if (inew
< new_view
->nr
) {
921 frnew
= &new_view
->ranges
[inew
];
928 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
929 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
930 && !flatrange_equal(frold
, frnew
)))) {
931 /* In old but not in new, or in both but attributes changed. */
934 flat_range_coalesced_io_del(frold
, as
);
935 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
939 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
940 /* In both and unchanged (except logging may have changed) */
943 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
944 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
945 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
946 frold
->dirty_log_mask
,
947 frnew
->dirty_log_mask
);
949 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
950 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
951 frold
->dirty_log_mask
,
952 frnew
->dirty_log_mask
);
962 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
963 flat_range_coalesced_io_add(frnew
, as
);
971 static void flatviews_init(void)
973 static FlatView
*empty_view
;
979 flat_views
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
, NULL
,
980 (GDestroyNotify
) flatview_unref
);
982 empty_view
= generate_memory_topology(NULL
);
983 /* We keep it alive forever in the global variable. */
984 flatview_ref(empty_view
);
986 g_hash_table_replace(flat_views
, NULL
, empty_view
);
987 flatview_ref(empty_view
);
991 static void flatviews_reset(void)
996 g_hash_table_unref(flat_views
);
1001 /* Render unique FVs */
1002 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1003 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1005 if (g_hash_table_lookup(flat_views
, physmr
)) {
1009 generate_memory_topology(physmr
);
1013 static void address_space_set_flatview(AddressSpace
*as
)
1015 FlatView
*old_view
= address_space_to_flatview(as
);
1016 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1017 FlatView
*new_view
= g_hash_table_lookup(flat_views
, physmr
);
1021 if (old_view
== new_view
) {
1026 flatview_ref(old_view
);
1029 flatview_ref(new_view
);
1031 if (!QTAILQ_EMPTY(&as
->listeners
)) {
1032 FlatView tmpview
= { .nr
= 0 }, *old_view2
= old_view
;
1035 old_view2
= &tmpview
;
1037 address_space_update_topology_pass(as
, old_view2
, new_view
, false);
1038 address_space_update_topology_pass(as
, old_view2
, new_view
, true);
1041 /* Writes are protected by the BQL. */
1042 atomic_rcu_set(&as
->current_map
, new_view
);
1044 flatview_unref(old_view
);
1047 /* Note that all the old MemoryRegions are still alive up to this
1048 * point. This relieves most MemoryListeners from the need to
1049 * ref/unref the MemoryRegions they get---unless they use them
1050 * outside the iothread mutex, in which case precise reference
1051 * counting is necessary.
1054 flatview_unref(old_view
);
1058 static void address_space_update_topology(AddressSpace
*as
)
1060 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1063 if (!g_hash_table_lookup(flat_views
, physmr
)) {
1064 generate_memory_topology(physmr
);
1066 address_space_set_flatview(as
);
1069 void memory_region_transaction_begin(void)
1071 qemu_flush_coalesced_mmio_buffer();
1072 ++memory_region_transaction_depth
;
1075 void memory_region_transaction_commit(void)
1079 assert(memory_region_transaction_depth
);
1080 assert(qemu_mutex_iothread_locked());
1082 --memory_region_transaction_depth
;
1083 if (!memory_region_transaction_depth
) {
1084 if (memory_region_update_pending
) {
1087 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
1089 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1090 address_space_set_flatview(as
);
1091 address_space_update_ioeventfds(as
);
1093 memory_region_update_pending
= false;
1094 ioeventfd_update_pending
= false;
1095 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
1096 } else if (ioeventfd_update_pending
) {
1097 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1098 address_space_update_ioeventfds(as
);
1100 ioeventfd_update_pending
= false;
1105 static void memory_region_destructor_none(MemoryRegion
*mr
)
1109 static void memory_region_destructor_ram(MemoryRegion
*mr
)
1111 qemu_ram_free(mr
->ram_block
);
1114 static bool memory_region_need_escape(char c
)
1116 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
1119 static char *memory_region_escape_name(const char *name
)
1126 for (p
= name
; *p
; p
++) {
1127 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
1129 if (bytes
== p
- name
) {
1130 return g_memdup(name
, bytes
+ 1);
1133 escaped
= g_malloc(bytes
+ 1);
1134 for (p
= name
, q
= escaped
; *p
; p
++) {
1136 if (unlikely(memory_region_need_escape(c
))) {
1139 *q
++ = "0123456789abcdef"[c
>> 4];
1140 c
= "0123456789abcdef"[c
& 15];
1148 static void memory_region_do_init(MemoryRegion
*mr
,
1153 mr
->size
= int128_make64(size
);
1154 if (size
== UINT64_MAX
) {
1155 mr
->size
= int128_2_64();
1157 mr
->name
= g_strdup(name
);
1159 mr
->ram_block
= NULL
;
1162 char *escaped_name
= memory_region_escape_name(name
);
1163 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1166 owner
= container_get(qdev_get_machine(), "/unattached");
1169 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1170 object_unref(OBJECT(mr
));
1172 g_free(escaped_name
);
1176 void memory_region_init(MemoryRegion
*mr
,
1181 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1182 memory_region_do_init(mr
, owner
, name
, size
);
1185 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1186 void *opaque
, Error
**errp
)
1188 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1189 uint64_t value
= mr
->addr
;
1191 visit_type_uint64(v
, name
, &value
, errp
);
1194 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1195 const char *name
, void *opaque
,
1198 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1199 gchar
*path
= (gchar
*)"";
1201 if (mr
->container
) {
1202 path
= object_get_canonical_path(OBJECT(mr
->container
));
1204 visit_type_str(v
, name
, &path
, errp
);
1205 if (mr
->container
) {
1210 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1213 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1215 return OBJECT(mr
->container
);
1218 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1219 const char *name
, void *opaque
,
1222 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1223 int32_t value
= mr
->priority
;
1225 visit_type_int32(v
, name
, &value
, errp
);
1228 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1229 void *opaque
, Error
**errp
)
1231 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1232 uint64_t value
= memory_region_size(mr
);
1234 visit_type_uint64(v
, name
, &value
, errp
);
1237 static void memory_region_initfn(Object
*obj
)
1239 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1242 mr
->ops
= &unassigned_mem_ops
;
1244 mr
->romd_mode
= true;
1245 mr
->global_locking
= true;
1246 mr
->destructor
= memory_region_destructor_none
;
1247 QTAILQ_INIT(&mr
->subregions
);
1248 QTAILQ_INIT(&mr
->coalesced
);
1250 op
= object_property_add(OBJECT(mr
), "container",
1251 "link<" TYPE_MEMORY_REGION
">",
1252 memory_region_get_container
,
1253 NULL
, /* memory_region_set_container */
1254 NULL
, NULL
, &error_abort
);
1255 op
->resolve
= memory_region_resolve_container
;
1257 object_property_add(OBJECT(mr
), "addr", "uint64",
1258 memory_region_get_addr
,
1259 NULL
, /* memory_region_set_addr */
1260 NULL
, NULL
, &error_abort
);
1261 object_property_add(OBJECT(mr
), "priority", "uint32",
1262 memory_region_get_priority
,
1263 NULL
, /* memory_region_set_priority */
1264 NULL
, NULL
, &error_abort
);
1265 object_property_add(OBJECT(mr
), "size", "uint64",
1266 memory_region_get_size
,
1267 NULL
, /* memory_region_set_size, */
1268 NULL
, NULL
, &error_abort
);
1271 static void iommu_memory_region_initfn(Object
*obj
)
1273 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1275 mr
->is_iommu
= true;
1278 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1281 #ifdef DEBUG_UNASSIGNED
1282 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1284 if (current_cpu
!= NULL
) {
1285 bool is_exec
= current_cpu
->mem_io_access_type
== MMU_INST_FETCH
;
1286 cpu_unassigned_access(current_cpu
, addr
, false, is_exec
, 0, size
);
1291 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1292 uint64_t val
, unsigned size
)
1294 #ifdef DEBUG_UNASSIGNED
1295 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1297 if (current_cpu
!= NULL
) {
1298 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1302 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1303 unsigned size
, bool is_write
,
1309 const MemoryRegionOps unassigned_mem_ops
= {
1310 .valid
.accepts
= unassigned_mem_accepts
,
1311 .endianness
= DEVICE_NATIVE_ENDIAN
,
1314 static uint64_t memory_region_ram_device_read(void *opaque
,
1315 hwaddr addr
, unsigned size
)
1317 MemoryRegion
*mr
= opaque
;
1318 uint64_t data
= (uint64_t)~0;
1322 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1325 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1328 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1331 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1335 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1340 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1341 uint64_t data
, unsigned size
)
1343 MemoryRegion
*mr
= opaque
;
1345 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1349 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1352 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1355 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1358 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1363 static const MemoryRegionOps ram_device_mem_ops
= {
1364 .read
= memory_region_ram_device_read
,
1365 .write
= memory_region_ram_device_write
,
1366 .endianness
= DEVICE_HOST_ENDIAN
,
1368 .min_access_size
= 1,
1369 .max_access_size
= 8,
1373 .min_access_size
= 1,
1374 .max_access_size
= 8,
1379 bool memory_region_access_valid(MemoryRegion
*mr
,
1385 int access_size_min
, access_size_max
;
1388 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1392 if (!mr
->ops
->valid
.accepts
) {
1396 access_size_min
= mr
->ops
->valid
.min_access_size
;
1397 if (!mr
->ops
->valid
.min_access_size
) {
1398 access_size_min
= 1;
1401 access_size_max
= mr
->ops
->valid
.max_access_size
;
1402 if (!mr
->ops
->valid
.max_access_size
) {
1403 access_size_max
= 4;
1406 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1407 for (i
= 0; i
< size
; i
+= access_size
) {
1408 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1417 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1425 if (mr
->ops
->read
) {
1426 return access_with_adjusted_size(addr
, pval
, size
,
1427 mr
->ops
->impl
.min_access_size
,
1428 mr
->ops
->impl
.max_access_size
,
1429 memory_region_read_accessor
,
1432 return access_with_adjusted_size(addr
, pval
, size
,
1433 mr
->ops
->impl
.min_access_size
,
1434 mr
->ops
->impl
.max_access_size
,
1435 memory_region_read_with_attrs_accessor
,
1440 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1448 if (!memory_region_access_valid(mr
, addr
, size
, false, attrs
)) {
1449 *pval
= unassigned_mem_read(mr
, addr
, size
);
1450 return MEMTX_DECODE_ERROR
;
1453 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1454 adjust_endianness(mr
, pval
, size
);
1458 /* Return true if an eventfd was signalled */
1459 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1465 MemoryRegionIoeventfd ioeventfd
= {
1466 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1471 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1472 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1473 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1475 if (memory_region_ioeventfd_equal(&ioeventfd
, &mr
->ioeventfds
[i
])) {
1476 event_notifier_set(ioeventfd
.e
);
1484 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1490 if (!memory_region_access_valid(mr
, addr
, size
, true, attrs
)) {
1491 unassigned_mem_write(mr
, addr
, data
, size
);
1492 return MEMTX_DECODE_ERROR
;
1495 adjust_endianness(mr
, &data
, size
);
1497 if ((!kvm_eventfds_enabled()) &&
1498 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1502 if (mr
->ops
->write
) {
1503 return access_with_adjusted_size(addr
, &data
, size
,
1504 mr
->ops
->impl
.min_access_size
,
1505 mr
->ops
->impl
.max_access_size
,
1506 memory_region_write_accessor
, mr
,
1510 access_with_adjusted_size(addr
, &data
, size
,
1511 mr
->ops
->impl
.min_access_size
,
1512 mr
->ops
->impl
.max_access_size
,
1513 memory_region_write_with_attrs_accessor
,
1518 void memory_region_init_io(MemoryRegion
*mr
,
1520 const MemoryRegionOps
*ops
,
1525 memory_region_init(mr
, owner
, name
, size
);
1526 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1527 mr
->opaque
= opaque
;
1528 mr
->terminates
= true;
1531 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1537 memory_region_init_ram_shared_nomigrate(mr
, owner
, name
, size
, false, errp
);
1540 void memory_region_init_ram_shared_nomigrate(MemoryRegion
*mr
,
1548 memory_region_init(mr
, owner
, name
, size
);
1550 mr
->terminates
= true;
1551 mr
->destructor
= memory_region_destructor_ram
;
1552 mr
->ram_block
= qemu_ram_alloc(size
, share
, mr
, &err
);
1553 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1555 mr
->size
= int128_zero();
1556 object_unparent(OBJECT(mr
));
1557 error_propagate(errp
, err
);
1561 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1566 void (*resized
)(const char*,
1572 memory_region_init(mr
, owner
, name
, size
);
1574 mr
->terminates
= true;
1575 mr
->destructor
= memory_region_destructor_ram
;
1576 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1578 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1580 mr
->size
= int128_zero();
1581 object_unparent(OBJECT(mr
));
1582 error_propagate(errp
, err
);
1587 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1588 struct Object
*owner
,
1597 memory_region_init(mr
, owner
, name
, size
);
1599 mr
->terminates
= true;
1600 mr
->destructor
= memory_region_destructor_ram
;
1602 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, ram_flags
, path
, &err
);
1603 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1605 mr
->size
= int128_zero();
1606 object_unparent(OBJECT(mr
));
1607 error_propagate(errp
, err
);
1611 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1612 struct Object
*owner
,
1620 memory_region_init(mr
, owner
, name
, size
);
1622 mr
->terminates
= true;
1623 mr
->destructor
= memory_region_destructor_ram
;
1624 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
,
1625 share
? RAM_SHARED
: 0,
1627 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1629 mr
->size
= int128_zero();
1630 object_unparent(OBJECT(mr
));
1631 error_propagate(errp
, err
);
1636 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1642 memory_region_init(mr
, owner
, name
, size
);
1644 mr
->terminates
= true;
1645 mr
->destructor
= memory_region_destructor_ram
;
1646 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1648 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1649 assert(ptr
!= NULL
);
1650 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1653 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1659 memory_region_init(mr
, owner
, name
, size
);
1661 mr
->terminates
= true;
1662 mr
->ram_device
= true;
1663 mr
->ops
= &ram_device_mem_ops
;
1665 mr
->destructor
= memory_region_destructor_ram
;
1666 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1667 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1668 assert(ptr
!= NULL
);
1669 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1672 void memory_region_init_alias(MemoryRegion
*mr
,
1679 memory_region_init(mr
, owner
, name
, size
);
1681 mr
->alias_offset
= offset
;
1684 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1685 struct Object
*owner
,
1691 memory_region_init(mr
, owner
, name
, size
);
1693 mr
->readonly
= true;
1694 mr
->terminates
= true;
1695 mr
->destructor
= memory_region_destructor_ram
;
1696 mr
->ram_block
= qemu_ram_alloc(size
, false, mr
, &err
);
1697 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1699 mr
->size
= int128_zero();
1700 object_unparent(OBJECT(mr
));
1701 error_propagate(errp
, err
);
1705 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1707 const MemoryRegionOps
*ops
,
1715 memory_region_init(mr
, owner
, name
, size
);
1717 mr
->opaque
= opaque
;
1718 mr
->terminates
= true;
1719 mr
->rom_device
= true;
1720 mr
->destructor
= memory_region_destructor_ram
;
1721 mr
->ram_block
= qemu_ram_alloc(size
, false, mr
, &err
);
1723 mr
->size
= int128_zero();
1724 object_unparent(OBJECT(mr
));
1725 error_propagate(errp
, err
);
1729 void memory_region_init_iommu(void *_iommu_mr
,
1730 size_t instance_size
,
1731 const char *mrtypename
,
1736 struct IOMMUMemoryRegion
*iommu_mr
;
1737 struct MemoryRegion
*mr
;
1739 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1740 mr
= MEMORY_REGION(_iommu_mr
);
1741 memory_region_do_init(mr
, owner
, name
, size
);
1742 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1743 mr
->terminates
= true; /* then re-forwards */
1744 QLIST_INIT(&iommu_mr
->iommu_notify
);
1745 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1748 static void memory_region_finalize(Object
*obj
)
1750 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1752 assert(!mr
->container
);
1754 /* We know the region is not visible in any address space (it
1755 * does not have a container and cannot be a root either because
1756 * it has no references, so we can blindly clear mr->enabled.
1757 * memory_region_set_enabled instead could trigger a transaction
1758 * and cause an infinite loop.
1760 mr
->enabled
= false;
1761 memory_region_transaction_begin();
1762 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1763 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1764 memory_region_del_subregion(mr
, subregion
);
1766 memory_region_transaction_commit();
1769 memory_region_clear_coalescing(mr
);
1770 g_free((char *)mr
->name
);
1771 g_free(mr
->ioeventfds
);
1774 Object
*memory_region_owner(MemoryRegion
*mr
)
1776 Object
*obj
= OBJECT(mr
);
1780 void memory_region_ref(MemoryRegion
*mr
)
1782 /* MMIO callbacks most likely will access data that belongs
1783 * to the owner, hence the need to ref/unref the owner whenever
1784 * the memory region is in use.
1786 * The memory region is a child of its owner. As long as the
1787 * owner doesn't call unparent itself on the memory region,
1788 * ref-ing the owner will also keep the memory region alive.
1789 * Memory regions without an owner are supposed to never go away;
1790 * we do not ref/unref them because it slows down DMA sensibly.
1792 if (mr
&& mr
->owner
) {
1793 object_ref(mr
->owner
);
1797 void memory_region_unref(MemoryRegion
*mr
)
1799 if (mr
&& mr
->owner
) {
1800 object_unref(mr
->owner
);
1804 uint64_t memory_region_size(MemoryRegion
*mr
)
1806 if (int128_eq(mr
->size
, int128_2_64())) {
1809 return int128_get64(mr
->size
);
1812 const char *memory_region_name(const MemoryRegion
*mr
)
1815 ((MemoryRegion
*)mr
)->name
=
1816 object_get_canonical_path_component(OBJECT(mr
));
1821 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1823 return mr
->ram_device
;
1826 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1828 uint8_t mask
= mr
->dirty_log_mask
;
1829 if (global_dirty_log
&& mr
->ram_block
) {
1830 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1835 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1837 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1840 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
)
1842 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1843 IOMMUNotifier
*iommu_notifier
;
1844 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1846 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1847 flags
|= iommu_notifier
->notifier_flags
;
1850 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1851 imrc
->notify_flag_changed(iommu_mr
,
1852 iommu_mr
->iommu_notify_flags
,
1856 iommu_mr
->iommu_notify_flags
= flags
;
1859 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1862 IOMMUMemoryRegion
*iommu_mr
;
1865 memory_region_register_iommu_notifier(mr
->alias
, n
);
1869 /* We need to register for at least one bitfield */
1870 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1871 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1872 assert(n
->start
<= n
->end
);
1873 assert(n
->iommu_idx
>= 0 &&
1874 n
->iommu_idx
< memory_region_iommu_num_indexes(iommu_mr
));
1876 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1877 memory_region_update_iommu_notify_flags(iommu_mr
);
1880 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1882 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1884 if (imrc
->get_min_page_size
) {
1885 return imrc
->get_min_page_size(iommu_mr
);
1887 return TARGET_PAGE_SIZE
;
1890 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1892 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1893 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1894 hwaddr addr
, granularity
;
1895 IOMMUTLBEntry iotlb
;
1897 /* If the IOMMU has its own replay callback, override */
1899 imrc
->replay(iommu_mr
, n
);
1903 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
1905 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1906 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, n
->iommu_idx
);
1907 if (iotlb
.perm
!= IOMMU_NONE
) {
1908 n
->notify(n
, &iotlb
);
1911 /* if (2^64 - MR size) < granularity, it's possible to get an
1912 * infinite loop here. This should catch such a wraparound */
1913 if ((addr
+ granularity
) < addr
) {
1919 void memory_region_iommu_replay_all(IOMMUMemoryRegion
*iommu_mr
)
1921 IOMMUNotifier
*notifier
;
1923 IOMMU_NOTIFIER_FOREACH(notifier
, iommu_mr
) {
1924 memory_region_iommu_replay(iommu_mr
, notifier
);
1928 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1931 IOMMUMemoryRegion
*iommu_mr
;
1934 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1937 QLIST_REMOVE(n
, node
);
1938 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1939 memory_region_update_iommu_notify_flags(iommu_mr
);
1942 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1943 IOMMUTLBEntry
*entry
)
1945 IOMMUNotifierFlag request_flags
;
1948 * Skip the notification if the notification does not overlap
1949 * with registered range.
1951 if (notifier
->start
> entry
->iova
+ entry
->addr_mask
||
1952 notifier
->end
< entry
->iova
) {
1956 if (entry
->perm
& IOMMU_RW
) {
1957 request_flags
= IOMMU_NOTIFIER_MAP
;
1959 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1962 if (notifier
->notifier_flags
& request_flags
) {
1963 notifier
->notify(notifier
, entry
);
1967 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
1969 IOMMUTLBEntry entry
)
1971 IOMMUNotifier
*iommu_notifier
;
1973 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
1975 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1976 if (iommu_notifier
->iommu_idx
== iommu_idx
) {
1977 memory_region_notify_one(iommu_notifier
, &entry
);
1982 int memory_region_iommu_get_attr(IOMMUMemoryRegion
*iommu_mr
,
1983 enum IOMMUMemoryRegionAttr attr
,
1986 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1988 if (!imrc
->get_attr
) {
1992 return imrc
->get_attr(iommu_mr
, attr
, data
);
1995 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion
*iommu_mr
,
1998 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2000 if (!imrc
->attrs_to_index
) {
2004 return imrc
->attrs_to_index(iommu_mr
, attrs
);
2007 int memory_region_iommu_num_indexes(IOMMUMemoryRegion
*iommu_mr
)
2009 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2011 if (!imrc
->num_indexes
) {
2015 return imrc
->num_indexes(iommu_mr
);
2018 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
2020 uint8_t mask
= 1 << client
;
2021 uint8_t old_logging
;
2023 assert(client
== DIRTY_MEMORY_VGA
);
2024 old_logging
= mr
->vga_logging_count
;
2025 mr
->vga_logging_count
+= log
? 1 : -1;
2026 if (!!old_logging
== !!mr
->vga_logging_count
) {
2030 memory_region_transaction_begin();
2031 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
2032 memory_region_update_pending
|= mr
->enabled
;
2033 memory_region_transaction_commit();
2036 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2039 assert(mr
->ram_block
);
2040 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
2042 memory_region_get_dirty_log_mask(mr
));
2045 static void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
2047 MemoryListener
*listener
;
2052 /* If the same address space has multiple log_sync listeners, we
2053 * visit that address space's FlatView multiple times. But because
2054 * log_sync listeners are rare, it's still cheaper than walking each
2055 * address space once.
2057 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2058 if (!listener
->log_sync
) {
2061 as
= listener
->address_space
;
2062 view
= address_space_get_flatview(as
);
2063 FOR_EACH_FLAT_RANGE(fr
, view
) {
2064 if (fr
->dirty_log_mask
&& (!mr
|| fr
->mr
== mr
)) {
2065 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2066 listener
->log_sync(listener
, &mrs
);
2069 flatview_unref(view
);
2073 void memory_region_clear_dirty_bitmap(MemoryRegion
*mr
, hwaddr start
,
2076 MemoryRegionSection mrs
;
2077 MemoryListener
*listener
;
2081 hwaddr sec_start
, sec_end
, sec_size
;
2083 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2084 if (!listener
->log_clear
) {
2087 as
= listener
->address_space
;
2088 view
= address_space_get_flatview(as
);
2089 FOR_EACH_FLAT_RANGE(fr
, view
) {
2090 if (!fr
->dirty_log_mask
|| fr
->mr
!= mr
) {
2092 * Clear dirty bitmap operation only applies to those
2093 * regions whose dirty logging is at least enabled
2098 mrs
= section_from_flat_range(fr
, view
);
2100 sec_start
= MAX(mrs
.offset_within_region
, start
);
2101 sec_end
= mrs
.offset_within_region
+ int128_get64(mrs
.size
);
2102 sec_end
= MIN(sec_end
, start
+ len
);
2104 if (sec_start
>= sec_end
) {
2106 * If this memory region section has no intersection
2107 * with the requested range, skip.
2112 /* Valid case; shrink the section if needed */
2113 mrs
.offset_within_address_space
+=
2114 sec_start
- mrs
.offset_within_region
;
2115 mrs
.offset_within_region
= sec_start
;
2116 sec_size
= sec_end
- sec_start
;
2117 mrs
.size
= int128_make64(sec_size
);
2118 listener
->log_clear(listener
, &mrs
);
2120 flatview_unref(view
);
2124 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
2129 assert(mr
->ram_block
);
2130 memory_region_sync_dirty_bitmap(mr
);
2131 return cpu_physical_memory_snapshot_and_clear_dirty(mr
, addr
, size
, client
);
2134 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
2135 hwaddr addr
, hwaddr size
)
2137 assert(mr
->ram_block
);
2138 return cpu_physical_memory_snapshot_get_dirty(snap
,
2139 memory_region_get_ram_addr(mr
) + addr
, size
);
2142 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
2144 if (mr
->readonly
!= readonly
) {
2145 memory_region_transaction_begin();
2146 mr
->readonly
= readonly
;
2147 memory_region_update_pending
|= mr
->enabled
;
2148 memory_region_transaction_commit();
2152 void memory_region_set_nonvolatile(MemoryRegion
*mr
, bool nonvolatile
)
2154 if (mr
->nonvolatile
!= nonvolatile
) {
2155 memory_region_transaction_begin();
2156 mr
->nonvolatile
= nonvolatile
;
2157 memory_region_update_pending
|= mr
->enabled
;
2158 memory_region_transaction_commit();
2162 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
2164 if (mr
->romd_mode
!= romd_mode
) {
2165 memory_region_transaction_begin();
2166 mr
->romd_mode
= romd_mode
;
2167 memory_region_update_pending
|= mr
->enabled
;
2168 memory_region_transaction_commit();
2172 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
2173 hwaddr size
, unsigned client
)
2175 assert(mr
->ram_block
);
2176 cpu_physical_memory_test_and_clear_dirty(
2177 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2180 int memory_region_get_fd(MemoryRegion
*mr
)
2188 fd
= mr
->ram_block
->fd
;
2194 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
2197 uint64_t offset
= 0;
2201 offset
+= mr
->alias_offset
;
2204 assert(mr
->ram_block
);
2205 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
2211 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
2215 block
= qemu_ram_block_from_host(ptr
, false, offset
);
2223 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
2225 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
2228 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
2230 assert(mr
->ram_block
);
2232 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
2235 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
2240 view
= address_space_get_flatview(as
);
2241 FOR_EACH_FLAT_RANGE(fr
, view
) {
2243 flat_range_coalesced_io_del(fr
, as
);
2244 flat_range_coalesced_io_add(fr
, as
);
2247 flatview_unref(view
);
2250 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
2254 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2255 memory_region_update_coalesced_range_as(mr
, as
);
2259 void memory_region_set_coalescing(MemoryRegion
*mr
)
2261 memory_region_clear_coalescing(mr
);
2262 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2265 void memory_region_add_coalescing(MemoryRegion
*mr
,
2269 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2271 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2272 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2273 memory_region_update_coalesced_range(mr
);
2274 memory_region_set_flush_coalesced(mr
);
2277 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2279 CoalescedMemoryRange
*cmr
;
2280 bool updated
= false;
2282 qemu_flush_coalesced_mmio_buffer();
2283 mr
->flush_coalesced_mmio
= false;
2285 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2286 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2287 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2293 memory_region_update_coalesced_range(mr
);
2297 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2299 mr
->flush_coalesced_mmio
= true;
2302 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2304 qemu_flush_coalesced_mmio_buffer();
2305 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2306 mr
->flush_coalesced_mmio
= false;
2310 void memory_region_clear_global_locking(MemoryRegion
*mr
)
2312 mr
->global_locking
= false;
2315 static bool userspace_eventfd_warning
;
2317 void memory_region_add_eventfd(MemoryRegion
*mr
,
2324 MemoryRegionIoeventfd mrfd
= {
2325 .addr
.start
= int128_make64(addr
),
2326 .addr
.size
= int128_make64(size
),
2327 .match_data
= match_data
,
2333 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2334 userspace_eventfd_warning
))) {
2335 userspace_eventfd_warning
= true;
2336 error_report("Using eventfd without MMIO binding in KVM. "
2337 "Suboptimal performance expected");
2341 adjust_endianness(mr
, &mrfd
.data
, size
);
2343 memory_region_transaction_begin();
2344 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2345 if (memory_region_ioeventfd_before(&mrfd
, &mr
->ioeventfds
[i
])) {
2350 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2351 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2352 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2353 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2354 mr
->ioeventfds
[i
] = mrfd
;
2355 ioeventfd_update_pending
|= mr
->enabled
;
2356 memory_region_transaction_commit();
2359 void memory_region_del_eventfd(MemoryRegion
*mr
,
2366 MemoryRegionIoeventfd mrfd
= {
2367 .addr
.start
= int128_make64(addr
),
2368 .addr
.size
= int128_make64(size
),
2369 .match_data
= match_data
,
2376 adjust_endianness(mr
, &mrfd
.data
, size
);
2378 memory_region_transaction_begin();
2379 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2380 if (memory_region_ioeventfd_equal(&mrfd
, &mr
->ioeventfds
[i
])) {
2384 assert(i
!= mr
->ioeventfd_nb
);
2385 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2386 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2388 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2389 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2390 ioeventfd_update_pending
|= mr
->enabled
;
2391 memory_region_transaction_commit();
2394 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2396 MemoryRegion
*mr
= subregion
->container
;
2397 MemoryRegion
*other
;
2399 memory_region_transaction_begin();
2401 memory_region_ref(subregion
);
2402 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2403 if (subregion
->priority
>= other
->priority
) {
2404 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2408 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2410 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2411 memory_region_transaction_commit();
2414 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2416 MemoryRegion
*subregion
)
2418 assert(!subregion
->container
);
2419 subregion
->container
= mr
;
2420 subregion
->addr
= offset
;
2421 memory_region_update_container_subregions(subregion
);
2424 void memory_region_add_subregion(MemoryRegion
*mr
,
2426 MemoryRegion
*subregion
)
2428 subregion
->priority
= 0;
2429 memory_region_add_subregion_common(mr
, offset
, subregion
);
2432 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2434 MemoryRegion
*subregion
,
2437 subregion
->priority
= priority
;
2438 memory_region_add_subregion_common(mr
, offset
, subregion
);
2441 void memory_region_del_subregion(MemoryRegion
*mr
,
2442 MemoryRegion
*subregion
)
2444 memory_region_transaction_begin();
2445 assert(subregion
->container
== mr
);
2446 subregion
->container
= NULL
;
2447 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2448 memory_region_unref(subregion
);
2449 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2450 memory_region_transaction_commit();
2453 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2455 if (enabled
== mr
->enabled
) {
2458 memory_region_transaction_begin();
2459 mr
->enabled
= enabled
;
2460 memory_region_update_pending
= true;
2461 memory_region_transaction_commit();
2464 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2466 Int128 s
= int128_make64(size
);
2468 if (size
== UINT64_MAX
) {
2471 if (int128_eq(s
, mr
->size
)) {
2474 memory_region_transaction_begin();
2476 memory_region_update_pending
= true;
2477 memory_region_transaction_commit();
2480 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2482 MemoryRegion
*container
= mr
->container
;
2485 memory_region_transaction_begin();
2486 memory_region_ref(mr
);
2487 memory_region_del_subregion(container
, mr
);
2488 mr
->container
= container
;
2489 memory_region_update_container_subregions(mr
);
2490 memory_region_unref(mr
);
2491 memory_region_transaction_commit();
2495 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2497 if (addr
!= mr
->addr
) {
2499 memory_region_readd_subregion(mr
);
2503 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2507 if (offset
== mr
->alias_offset
) {
2511 memory_region_transaction_begin();
2512 mr
->alias_offset
= offset
;
2513 memory_region_update_pending
|= mr
->enabled
;
2514 memory_region_transaction_commit();
2517 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2522 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2524 const AddrRange
*addr
= addr_
;
2525 const FlatRange
*fr
= fr_
;
2527 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2529 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2535 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2537 return bsearch(&addr
, view
->ranges
, view
->nr
,
2538 sizeof(FlatRange
), cmp_flatrange_addr
);
2541 bool memory_region_is_mapped(MemoryRegion
*mr
)
2543 return mr
->container
? true : false;
2546 /* Same as memory_region_find, but it does not add a reference to the
2547 * returned region. It must be called from an RCU critical section.
2549 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2550 hwaddr addr
, uint64_t size
)
2552 MemoryRegionSection ret
= { .mr
= NULL
};
2560 for (root
= mr
; root
->container
; ) {
2561 root
= root
->container
;
2565 as
= memory_region_to_address_space(root
);
2569 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2571 view
= address_space_to_flatview(as
);
2572 fr
= flatview_lookup(view
, range
);
2577 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2583 range
= addrrange_intersection(range
, fr
->addr
);
2584 ret
.offset_within_region
= fr
->offset_in_region
;
2585 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2587 ret
.size
= range
.size
;
2588 ret
.offset_within_address_space
= int128_get64(range
.start
);
2589 ret
.readonly
= fr
->readonly
;
2590 ret
.nonvolatile
= fr
->nonvolatile
;
2594 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2595 hwaddr addr
, uint64_t size
)
2597 MemoryRegionSection ret
;
2599 ret
= memory_region_find_rcu(mr
, addr
, size
);
2601 memory_region_ref(ret
.mr
);
2607 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2612 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2614 return mr
&& mr
!= container
;
2617 void memory_global_dirty_log_sync(void)
2619 memory_region_sync_dirty_bitmap(NULL
);
2622 static VMChangeStateEntry
*vmstate_change
;
2624 void memory_global_dirty_log_start(void)
2626 if (vmstate_change
) {
2627 qemu_del_vm_change_state_handler(vmstate_change
);
2628 vmstate_change
= NULL
;
2631 global_dirty_log
= true;
2633 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2635 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2636 memory_region_transaction_begin();
2637 memory_region_update_pending
= true;
2638 memory_region_transaction_commit();
2641 static void memory_global_dirty_log_do_stop(void)
2643 global_dirty_log
= false;
2645 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2646 memory_region_transaction_begin();
2647 memory_region_update_pending
= true;
2648 memory_region_transaction_commit();
2650 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2653 static void memory_vm_change_state_handler(void *opaque
, int running
,
2657 memory_global_dirty_log_do_stop();
2659 if (vmstate_change
) {
2660 qemu_del_vm_change_state_handler(vmstate_change
);
2661 vmstate_change
= NULL
;
2666 void memory_global_dirty_log_stop(void)
2668 if (!runstate_is_running()) {
2669 if (vmstate_change
) {
2672 vmstate_change
= qemu_add_vm_change_state_handler(
2673 memory_vm_change_state_handler
, NULL
);
2677 memory_global_dirty_log_do_stop();
2680 static void listener_add_address_space(MemoryListener
*listener
,
2686 if (listener
->begin
) {
2687 listener
->begin(listener
);
2689 if (global_dirty_log
) {
2690 if (listener
->log_global_start
) {
2691 listener
->log_global_start(listener
);
2695 view
= address_space_get_flatview(as
);
2696 FOR_EACH_FLAT_RANGE(fr
, view
) {
2697 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2699 if (listener
->region_add
) {
2700 listener
->region_add(listener
, §ion
);
2702 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2703 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2706 if (listener
->commit
) {
2707 listener
->commit(listener
);
2709 flatview_unref(view
);
2712 static void listener_del_address_space(MemoryListener
*listener
,
2718 if (listener
->begin
) {
2719 listener
->begin(listener
);
2721 view
= address_space_get_flatview(as
);
2722 FOR_EACH_FLAT_RANGE(fr
, view
) {
2723 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2725 if (fr
->dirty_log_mask
&& listener
->log_stop
) {
2726 listener
->log_stop(listener
, §ion
, fr
->dirty_log_mask
, 0);
2728 if (listener
->region_del
) {
2729 listener
->region_del(listener
, §ion
);
2732 if (listener
->commit
) {
2733 listener
->commit(listener
);
2735 flatview_unref(view
);
2738 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2740 MemoryListener
*other
= NULL
;
2742 listener
->address_space
= as
;
2743 if (QTAILQ_EMPTY(&memory_listeners
)
2744 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
)->priority
) {
2745 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2747 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2748 if (listener
->priority
< other
->priority
) {
2752 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2755 if (QTAILQ_EMPTY(&as
->listeners
)
2756 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
)->priority
) {
2757 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2759 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2760 if (listener
->priority
< other
->priority
) {
2764 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2767 listener_add_address_space(listener
, as
);
2770 void memory_listener_unregister(MemoryListener
*listener
)
2772 if (!listener
->address_space
) {
2776 listener_del_address_space(listener
, listener
->address_space
);
2777 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2778 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2779 listener
->address_space
= NULL
;
2782 void address_space_remove_listeners(AddressSpace
*as
)
2784 while (!QTAILQ_EMPTY(&as
->listeners
)) {
2785 memory_listener_unregister(QTAILQ_FIRST(&as
->listeners
));
2789 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2791 memory_region_ref(root
);
2793 as
->current_map
= NULL
;
2794 as
->ioeventfd_nb
= 0;
2795 as
->ioeventfds
= NULL
;
2796 QTAILQ_INIT(&as
->listeners
);
2797 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2798 as
->name
= g_strdup(name
? name
: "anonymous");
2799 address_space_update_topology(as
);
2800 address_space_update_ioeventfds(as
);
2803 static void do_address_space_destroy(AddressSpace
*as
)
2805 assert(QTAILQ_EMPTY(&as
->listeners
));
2807 flatview_unref(as
->current_map
);
2809 g_free(as
->ioeventfds
);
2810 memory_region_unref(as
->root
);
2813 void address_space_destroy(AddressSpace
*as
)
2815 MemoryRegion
*root
= as
->root
;
2817 /* Flush out anything from MemoryListeners listening in on this */
2818 memory_region_transaction_begin();
2820 memory_region_transaction_commit();
2821 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2823 /* At this point, as->dispatch and as->current_map are dummy
2824 * entries that the guest should never use. Wait for the old
2825 * values to expire before freeing the data.
2828 call_rcu(as
, do_address_space_destroy
, rcu
);
2831 static const char *memory_region_type(MemoryRegion
*mr
)
2833 if (memory_region_is_ram_device(mr
)) {
2835 } else if (memory_region_is_romd(mr
)) {
2837 } else if (memory_region_is_rom(mr
)) {
2839 } else if (memory_region_is_ram(mr
)) {
2846 typedef struct MemoryRegionList MemoryRegionList
;
2848 struct MemoryRegionList
{
2849 const MemoryRegion
*mr
;
2850 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
2853 typedef QTAILQ_HEAD(, MemoryRegionList
) MemoryRegionListHead
;
2855 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2856 int128_sub((size), int128_one())) : 0)
2857 #define MTREE_INDENT " "
2859 static void mtree_expand_owner(const char *label
, Object
*obj
)
2861 DeviceState
*dev
= (DeviceState
*) object_dynamic_cast(obj
, TYPE_DEVICE
);
2863 qemu_printf(" %s:{%s", label
, dev
? "dev" : "obj");
2864 if (dev
&& dev
->id
) {
2865 qemu_printf(" id=%s", dev
->id
);
2867 gchar
*canonical_path
= object_get_canonical_path(obj
);
2868 if (canonical_path
) {
2869 qemu_printf(" path=%s", canonical_path
);
2870 g_free(canonical_path
);
2872 qemu_printf(" type=%s", object_get_typename(obj
));
2878 static void mtree_print_mr_owner(const MemoryRegion
*mr
)
2880 Object
*owner
= mr
->owner
;
2881 Object
*parent
= memory_region_owner((MemoryRegion
*)mr
);
2883 if (!owner
&& !parent
) {
2884 qemu_printf(" orphan");
2888 mtree_expand_owner("owner", owner
);
2890 if (parent
&& parent
!= owner
) {
2891 mtree_expand_owner("parent", parent
);
2895 static void mtree_print_mr(const MemoryRegion
*mr
, unsigned int level
,
2897 MemoryRegionListHead
*alias_print_queue
,
2900 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2901 MemoryRegionListHead submr_print_queue
;
2902 const MemoryRegion
*submr
;
2904 hwaddr cur_start
, cur_end
;
2910 for (i
= 0; i
< level
; i
++) {
2911 qemu_printf(MTREE_INDENT
);
2914 cur_start
= base
+ mr
->addr
;
2915 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2918 * Try to detect overflow of memory region. This should never
2919 * happen normally. When it happens, we dump something to warn the
2920 * user who is observing this.
2922 if (cur_start
< base
|| cur_end
< cur_start
) {
2923 qemu_printf("[DETECTED OVERFLOW!] ");
2927 MemoryRegionList
*ml
;
2930 /* check if the alias is already in the queue */
2931 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
2932 if (ml
->mr
== mr
->alias
) {
2938 ml
= g_new(MemoryRegionList
, 1);
2940 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
2942 qemu_printf(TARGET_FMT_plx
"-" TARGET_FMT_plx
2943 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2944 "-" TARGET_FMT_plx
"%s",
2947 mr
->nonvolatile
? "nv-" : "",
2948 memory_region_type((MemoryRegion
*)mr
),
2949 memory_region_name(mr
),
2950 memory_region_name(mr
->alias
),
2952 mr
->alias_offset
+ MR_SIZE(mr
->size
),
2953 mr
->enabled
? "" : " [disabled]");
2955 mtree_print_mr_owner(mr
);
2958 qemu_printf(TARGET_FMT_plx
"-" TARGET_FMT_plx
2959 " (prio %d, %s%s): %s%s",
2962 mr
->nonvolatile
? "nv-" : "",
2963 memory_region_type((MemoryRegion
*)mr
),
2964 memory_region_name(mr
),
2965 mr
->enabled
? "" : " [disabled]");
2967 mtree_print_mr_owner(mr
);
2972 QTAILQ_INIT(&submr_print_queue
);
2974 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2975 new_ml
= g_new(MemoryRegionList
, 1);
2977 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2978 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2979 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2980 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2981 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
2987 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
2991 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2992 mtree_print_mr(ml
->mr
, level
+ 1, cur_start
,
2993 alias_print_queue
, owner
);
2996 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
3001 struct FlatViewInfo
{
3006 const char *ac_name
;
3009 static void mtree_print_flatview(gpointer key
, gpointer value
,
3012 FlatView
*view
= key
;
3013 GArray
*fv_address_spaces
= value
;
3014 struct FlatViewInfo
*fvi
= user_data
;
3015 FlatRange
*range
= &view
->ranges
[0];
3021 qemu_printf("FlatView #%d\n", fvi
->counter
);
3024 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3025 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3026 qemu_printf(" AS \"%s\", root: %s",
3027 as
->name
, memory_region_name(as
->root
));
3028 if (as
->root
->alias
) {
3029 qemu_printf(", alias %s", memory_region_name(as
->root
->alias
));
3034 qemu_printf(" Root memory region: %s\n",
3035 view
->root
? memory_region_name(view
->root
) : "(none)");
3038 qemu_printf(MTREE_INDENT
"No rendered FlatView\n\n");
3044 if (range
->offset_in_region
) {
3045 qemu_printf(MTREE_INDENT TARGET_FMT_plx
"-" TARGET_FMT_plx
3046 " (prio %d, %s%s): %s @" TARGET_FMT_plx
,
3047 int128_get64(range
->addr
.start
),
3048 int128_get64(range
->addr
.start
)
3049 + MR_SIZE(range
->addr
.size
),
3051 range
->nonvolatile
? "nv-" : "",
3052 range
->readonly
? "rom" : memory_region_type(mr
),
3053 memory_region_name(mr
),
3054 range
->offset_in_region
);
3056 qemu_printf(MTREE_INDENT TARGET_FMT_plx
"-" TARGET_FMT_plx
3057 " (prio %d, %s%s): %s",
3058 int128_get64(range
->addr
.start
),
3059 int128_get64(range
->addr
.start
)
3060 + MR_SIZE(range
->addr
.size
),
3062 range
->nonvolatile
? "nv-" : "",
3063 range
->readonly
? "rom" : memory_region_type(mr
),
3064 memory_region_name(mr
));
3067 mtree_print_mr_owner(mr
);
3071 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
3072 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
3073 if (fvi
->ac
->has_memory(current_machine
, as
,
3074 int128_get64(range
->addr
.start
),
3075 MR_SIZE(range
->addr
.size
) + 1)) {
3076 qemu_printf(" %s", fvi
->ac_name
);
3084 #if !defined(CONFIG_USER_ONLY)
3085 if (fvi
->dispatch_tree
&& view
->root
) {
3086 mtree_print_dispatch(view
->dispatch
, view
->root
);
3093 static gboolean
mtree_info_flatview_free(gpointer key
, gpointer value
,
3096 FlatView
*view
= key
;
3097 GArray
*fv_address_spaces
= value
;
3099 g_array_unref(fv_address_spaces
);
3100 flatview_unref(view
);
3105 void mtree_info(bool flatview
, bool dispatch_tree
, bool owner
)
3107 MemoryRegionListHead ml_head
;
3108 MemoryRegionList
*ml
, *ml2
;
3113 struct FlatViewInfo fvi
= {
3115 .dispatch_tree
= dispatch_tree
,
3118 GArray
*fv_address_spaces
;
3119 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3120 AccelClass
*ac
= ACCEL_GET_CLASS(current_machine
->accelerator
);
3122 if (ac
->has_memory
) {
3124 fvi
.ac_name
= current_machine
->accel
? current_machine
->accel
:
3125 object_class_get_name(OBJECT_CLASS(ac
));
3128 /* Gather all FVs in one table */
3129 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3130 view
= address_space_get_flatview(as
);
3132 fv_address_spaces
= g_hash_table_lookup(views
, view
);
3133 if (!fv_address_spaces
) {
3134 fv_address_spaces
= g_array_new(false, false, sizeof(as
));
3135 g_hash_table_insert(views
, view
, fv_address_spaces
);
3138 g_array_append_val(fv_address_spaces
, as
);
3142 g_hash_table_foreach(views
, mtree_print_flatview
, &fvi
);
3145 g_hash_table_foreach_remove(views
, mtree_info_flatview_free
, 0);
3146 g_hash_table_unref(views
);
3151 QTAILQ_INIT(&ml_head
);
3153 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3154 qemu_printf("address-space: %s\n", as
->name
);
3155 mtree_print_mr(as
->root
, 1, 0, &ml_head
, owner
);
3159 /* print aliased regions */
3160 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
3161 qemu_printf("memory-region: %s\n", memory_region_name(ml
->mr
));
3162 mtree_print_mr(ml
->mr
, 1, 0, &ml_head
, owner
);
3166 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
3171 void memory_region_init_ram(MemoryRegion
*mr
,
3172 struct Object
*owner
,
3177 DeviceState
*owner_dev
;
3180 memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, &err
);
3182 error_propagate(errp
, err
);
3185 /* This will assert if owner is neither NULL nor a DeviceState.
3186 * We only want the owner here for the purposes of defining a
3187 * unique name for migration. TODO: Ideally we should implement
3188 * a naming scheme for Objects which are not DeviceStates, in
3189 * which case we can relax this restriction.
3191 owner_dev
= DEVICE(owner
);
3192 vmstate_register_ram(mr
, owner_dev
);
3195 void memory_region_init_rom(MemoryRegion
*mr
,
3196 struct Object
*owner
,
3201 DeviceState
*owner_dev
;
3204 memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, &err
);
3206 error_propagate(errp
, err
);
3209 /* This will assert if owner is neither NULL nor a DeviceState.
3210 * We only want the owner here for the purposes of defining a
3211 * unique name for migration. TODO: Ideally we should implement
3212 * a naming scheme for Objects which are not DeviceStates, in
3213 * which case we can relax this restriction.
3215 owner_dev
= DEVICE(owner
);
3216 vmstate_register_ram(mr
, owner_dev
);
3219 void memory_region_init_rom_device(MemoryRegion
*mr
,
3220 struct Object
*owner
,
3221 const MemoryRegionOps
*ops
,
3227 DeviceState
*owner_dev
;
3230 memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
3233 error_propagate(errp
, err
);
3236 /* This will assert if owner is neither NULL nor a DeviceState.
3237 * We only want the owner here for the purposes of defining a
3238 * unique name for migration. TODO: Ideally we should implement
3239 * a naming scheme for Objects which are not DeviceStates, in
3240 * which case we can relax this restriction.
3242 owner_dev
= DEVICE(owner
);
3243 vmstate_register_ram(mr
, owner_dev
);
3246 static const TypeInfo memory_region_info
= {
3247 .parent
= TYPE_OBJECT
,
3248 .name
= TYPE_MEMORY_REGION
,
3249 .class_size
= sizeof(MemoryRegionClass
),
3250 .instance_size
= sizeof(MemoryRegion
),
3251 .instance_init
= memory_region_initfn
,
3252 .instance_finalize
= memory_region_finalize
,
3255 static const TypeInfo iommu_memory_region_info
= {
3256 .parent
= TYPE_MEMORY_REGION
,
3257 .name
= TYPE_IOMMU_MEMORY_REGION
,
3258 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3259 .instance_size
= sizeof(IOMMUMemoryRegion
),
3260 .instance_init
= iommu_memory_region_initfn
,
3264 static void memory_register_types(void)
3266 type_register_static(&memory_region_info
);
3267 type_register_static(&iommu_memory_region_info
);
3270 type_init(memory_register_types
)