2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
22 #include "qapi/visitor.h"
23 #include "qemu/bitops.h"
24 #include "qemu/error-report.h"
25 #include "qom/object.h"
26 #include "trace-root.h"
28 #include "exec/memory-internal.h"
29 #include "exec/ram_addr.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/sysemu.h"
32 #include "hw/qdev-properties.h"
33 #include "migration/vmstate.h"
35 //#define DEBUG_UNASSIGNED
37 static unsigned memory_region_transaction_depth
;
38 static bool memory_region_update_pending
;
39 static bool ioeventfd_update_pending
;
40 static bool global_dirty_log
= false;
42 static QTAILQ_HEAD(memory_listeners
, MemoryListener
) memory_listeners
43 = QTAILQ_HEAD_INITIALIZER(memory_listeners
);
45 static QTAILQ_HEAD(, AddressSpace
) address_spaces
46 = QTAILQ_HEAD_INITIALIZER(address_spaces
);
48 static GHashTable
*flat_views
;
50 typedef struct AddrRange AddrRange
;
53 * Note that signed integers are needed for negative offsetting in aliases
54 * (large MemoryRegion::alias_offset).
61 static AddrRange
addrrange_make(Int128 start
, Int128 size
)
63 return (AddrRange
) { start
, size
};
66 static bool addrrange_equal(AddrRange r1
, AddrRange r2
)
68 return int128_eq(r1
.start
, r2
.start
) && int128_eq(r1
.size
, r2
.size
);
71 static Int128
addrrange_end(AddrRange r
)
73 return int128_add(r
.start
, r
.size
);
76 static AddrRange
addrrange_shift(AddrRange range
, Int128 delta
)
78 int128_addto(&range
.start
, delta
);
82 static bool addrrange_contains(AddrRange range
, Int128 addr
)
84 return int128_ge(addr
, range
.start
)
85 && int128_lt(addr
, addrrange_end(range
));
88 static bool addrrange_intersects(AddrRange r1
, AddrRange r2
)
90 return addrrange_contains(r1
, r2
.start
)
91 || addrrange_contains(r2
, r1
.start
);
94 static AddrRange
addrrange_intersection(AddrRange r1
, AddrRange r2
)
96 Int128 start
= int128_max(r1
.start
, r2
.start
);
97 Int128 end
= int128_min(addrrange_end(r1
), addrrange_end(r2
));
98 return addrrange_make(start
, int128_sub(end
, start
));
101 enum ListenerDirection
{ Forward
, Reverse
};
103 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
105 MemoryListener *_listener; \
107 switch (_direction) { \
109 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
110 if (_listener->_callback) { \
111 _listener->_callback(_listener, ##_args); \
116 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \
117 memory_listeners, link) { \
118 if (_listener->_callback) { \
119 _listener->_callback(_listener, ##_args); \
128 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
130 MemoryListener *_listener; \
131 struct memory_listeners_as *list = &(_as)->listeners; \
133 switch (_direction) { \
135 QTAILQ_FOREACH(_listener, list, link_as) { \
136 if (_listener->_callback) { \
137 _listener->_callback(_listener, _section, ##_args); \
142 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
162 struct CoalescedMemoryRange
{
164 QTAILQ_ENTRY(CoalescedMemoryRange
) link
;
167 struct MemoryRegionIoeventfd
{
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd
*a
,
175 MemoryRegionIoeventfd
*b
)
177 if (int128_lt(a
->addr
.start
, b
->addr
.start
)) {
179 } else if (int128_gt(a
->addr
.start
, b
->addr
.start
)) {
181 } else if (int128_lt(a
->addr
.size
, b
->addr
.size
)) {
183 } else if (int128_gt(a
->addr
.size
, b
->addr
.size
)) {
185 } else if (a
->match_data
< b
->match_data
) {
187 } else if (a
->match_data
> b
->match_data
) {
189 } else if (a
->match_data
) {
190 if (a
->data
< b
->data
) {
192 } else if (a
->data
> b
->data
) {
198 } else if (a
->e
> b
->e
) {
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd
*a
,
205 MemoryRegionIoeventfd
*b
)
207 return !memory_region_ioeventfd_before(a
, b
)
208 && !memory_region_ioeventfd_before(b
, a
);
211 /* Range of memory in the global map. Addresses are absolute. */
214 hwaddr offset_in_region
;
216 uint8_t dirty_log_mask
;
221 #define FOR_EACH_FLAT_RANGE(var, view) \
222 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224 static inline MemoryRegionSection
225 section_from_flat_range(FlatRange
*fr
, FlatView
*fv
)
227 return (MemoryRegionSection
) {
230 .offset_within_region
= fr
->offset_in_region
,
231 .size
= fr
->addr
.size
,
232 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
233 .readonly
= fr
->readonly
,
237 static bool flatrange_equal(FlatRange
*a
, FlatRange
*b
)
239 return a
->mr
== b
->mr
240 && addrrange_equal(a
->addr
, b
->addr
)
241 && a
->offset_in_region
== b
->offset_in_region
242 && a
->romd_mode
== b
->romd_mode
243 && a
->readonly
== b
->readonly
;
246 static FlatView
*flatview_new(MemoryRegion
*mr_root
)
250 view
= g_new0(FlatView
, 1);
252 view
->root
= mr_root
;
253 memory_region_ref(mr_root
);
254 trace_flatview_new(view
, mr_root
);
259 /* Insert a range into a given position. Caller is responsible for maintaining
262 static void flatview_insert(FlatView
*view
, unsigned pos
, FlatRange
*range
)
264 if (view
->nr
== view
->nr_allocated
) {
265 view
->nr_allocated
= MAX(2 * view
->nr
, 10);
266 view
->ranges
= g_realloc(view
->ranges
,
267 view
->nr_allocated
* sizeof(*view
->ranges
));
269 memmove(view
->ranges
+ pos
+ 1, view
->ranges
+ pos
,
270 (view
->nr
- pos
) * sizeof(FlatRange
));
271 view
->ranges
[pos
] = *range
;
272 memory_region_ref(range
->mr
);
276 static void flatview_destroy(FlatView
*view
)
280 trace_flatview_destroy(view
, view
->root
);
281 if (view
->dispatch
) {
282 address_space_dispatch_free(view
->dispatch
);
284 for (i
= 0; i
< view
->nr
; i
++) {
285 memory_region_unref(view
->ranges
[i
].mr
);
287 g_free(view
->ranges
);
288 memory_region_unref(view
->root
);
292 static bool flatview_ref(FlatView
*view
)
294 return atomic_fetch_inc_nonzero(&view
->ref
) > 0;
297 void flatview_unref(FlatView
*view
)
299 if (atomic_fetch_dec(&view
->ref
) == 1) {
300 trace_flatview_destroy_rcu(view
, view
->root
);
302 call_rcu(view
, flatview_destroy
, rcu
);
306 static bool can_merge(FlatRange
*r1
, FlatRange
*r2
)
308 return int128_eq(addrrange_end(r1
->addr
), r2
->addr
.start
)
310 && int128_eq(int128_add(int128_make64(r1
->offset_in_region
),
312 int128_make64(r2
->offset_in_region
))
313 && r1
->dirty_log_mask
== r2
->dirty_log_mask
314 && r1
->romd_mode
== r2
->romd_mode
315 && r1
->readonly
== r2
->readonly
;
318 /* Attempt to simplify a view by merging adjacent ranges */
319 static void flatview_simplify(FlatView
*view
)
324 while (i
< view
->nr
) {
327 && can_merge(&view
->ranges
[j
-1], &view
->ranges
[j
])) {
328 int128_addto(&view
->ranges
[i
].addr
.size
, view
->ranges
[j
].addr
.size
);
332 memmove(&view
->ranges
[i
], &view
->ranges
[j
],
333 (view
->nr
- j
) * sizeof(view
->ranges
[j
]));
338 static bool memory_region_big_endian(MemoryRegion
*mr
)
340 #ifdef TARGET_WORDS_BIGENDIAN
341 return mr
->ops
->endianness
!= DEVICE_LITTLE_ENDIAN
;
343 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
347 static bool memory_region_wrong_endianness(MemoryRegion
*mr
)
349 #ifdef TARGET_WORDS_BIGENDIAN
350 return mr
->ops
->endianness
== DEVICE_LITTLE_ENDIAN
;
352 return mr
->ops
->endianness
== DEVICE_BIG_ENDIAN
;
356 static void adjust_endianness(MemoryRegion
*mr
, uint64_t *data
, unsigned size
)
358 if (memory_region_wrong_endianness(mr
)) {
363 *data
= bswap16(*data
);
366 *data
= bswap32(*data
);
369 *data
= bswap64(*data
);
377 static inline void memory_region_shift_read_access(uint64_t *value
,
383 *value
|= (tmp
& mask
) << shift
;
385 *value
|= (tmp
& mask
) >> -shift
;
389 static inline uint64_t memory_region_shift_write_access(uint64_t *value
,
396 tmp
= (*value
>> shift
) & mask
;
398 tmp
= (*value
<< -shift
) & mask
;
404 static hwaddr
memory_region_to_absolute_addr(MemoryRegion
*mr
, hwaddr offset
)
407 hwaddr abs_addr
= offset
;
409 abs_addr
+= mr
->addr
;
410 for (root
= mr
; root
->container
; ) {
411 root
= root
->container
;
412 abs_addr
+= root
->addr
;
418 static int get_cpu_index(void)
421 return current_cpu
->cpu_index
;
426 static MemTxResult
memory_region_oldmmio_read_accessor(MemoryRegion
*mr
,
436 tmp
= mr
->ops
->old_mmio
.read
[ctz32(size
)](mr
->opaque
, addr
);
438 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
439 } else if (mr
== &io_mem_notdirty
) {
440 /* Accesses to code which has previously been translated into a TB show
441 * up in the MMIO path, as accesses to the io_mem_notdirty
443 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
444 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
445 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
446 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
448 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
452 static MemTxResult
memory_region_read_accessor(MemoryRegion
*mr
,
462 tmp
= mr
->ops
->read(mr
->opaque
, addr
, size
);
464 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
465 } else if (mr
== &io_mem_notdirty
) {
466 /* Accesses to code which has previously been translated into a TB show
467 * up in the MMIO path, as accesses to the io_mem_notdirty
469 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
470 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
471 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
472 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
474 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
478 static MemTxResult
memory_region_read_with_attrs_accessor(MemoryRegion
*mr
,
489 r
= mr
->ops
->read_with_attrs(mr
->opaque
, addr
, &tmp
, size
, attrs
);
491 trace_memory_region_subpage_read(get_cpu_index(), mr
, addr
, tmp
, size
);
492 } else if (mr
== &io_mem_notdirty
) {
493 /* Accesses to code which has previously been translated into a TB show
494 * up in the MMIO path, as accesses to the io_mem_notdirty
496 trace_memory_region_tb_read(get_cpu_index(), addr
, tmp
, size
);
497 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED
) {
498 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
499 trace_memory_region_ops_read(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
501 memory_region_shift_read_access(value
, shift
, mask
, tmp
);
505 static MemTxResult
memory_region_oldmmio_write_accessor(MemoryRegion
*mr
,
513 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
516 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
517 } else if (mr
== &io_mem_notdirty
) {
518 /* Accesses to code which has previously been translated into a TB show
519 * up in the MMIO path, as accesses to the io_mem_notdirty
521 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
522 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
523 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
524 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
526 mr
->ops
->old_mmio
.write
[ctz32(size
)](mr
->opaque
, addr
, tmp
);
530 static MemTxResult
memory_region_write_accessor(MemoryRegion
*mr
,
538 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
541 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
542 } else if (mr
== &io_mem_notdirty
) {
543 /* Accesses to code which has previously been translated into a TB show
544 * up in the MMIO path, as accesses to the io_mem_notdirty
546 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
547 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
548 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
549 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
551 mr
->ops
->write(mr
->opaque
, addr
, tmp
, size
);
555 static MemTxResult
memory_region_write_with_attrs_accessor(MemoryRegion
*mr
,
563 uint64_t tmp
= memory_region_shift_write_access(value
, shift
, mask
);
566 trace_memory_region_subpage_write(get_cpu_index(), mr
, addr
, tmp
, size
);
567 } else if (mr
== &io_mem_notdirty
) {
568 /* Accesses to code which has previously been translated into a TB show
569 * up in the MMIO path, as accesses to the io_mem_notdirty
571 trace_memory_region_tb_write(get_cpu_index(), addr
, tmp
, size
);
572 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED
) {
573 hwaddr abs_addr
= memory_region_to_absolute_addr(mr
, addr
);
574 trace_memory_region_ops_write(get_cpu_index(), mr
, abs_addr
, tmp
, size
);
576 return mr
->ops
->write_with_attrs(mr
->opaque
, addr
, tmp
, size
, attrs
);
579 static MemTxResult
access_with_adjusted_size(hwaddr addr
,
582 unsigned access_size_min
,
583 unsigned access_size_max
,
584 MemTxResult (*access_fn
)
595 uint64_t access_mask
;
596 unsigned access_size
;
598 MemTxResult r
= MEMTX_OK
;
600 if (!access_size_min
) {
603 if (!access_size_max
) {
607 /* FIXME: support unaligned access? */
608 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
609 access_mask
= MAKE_64BIT_MASK(0, access_size
* 8);
610 if (memory_region_big_endian(mr
)) {
611 for (i
= 0; i
< size
; i
+= access_size
) {
612 r
|= access_fn(mr
, addr
+ i
, value
, access_size
,
613 (size
- access_size
- i
) * 8, access_mask
, attrs
);
616 for (i
= 0; i
< size
; i
+= access_size
) {
617 r
|= access_fn(mr
, addr
+ i
, value
, access_size
, i
* 8,
624 static AddressSpace
*memory_region_to_address_space(MemoryRegion
*mr
)
628 while (mr
->container
) {
631 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
632 if (mr
== as
->root
) {
639 /* Render a memory region into the global view. Ranges in @view obscure
642 static void render_memory_region(FlatView
*view
,
648 MemoryRegion
*subregion
;
650 hwaddr offset_in_region
;
660 int128_addto(&base
, int128_make64(mr
->addr
));
661 readonly
|= mr
->readonly
;
663 tmp
= addrrange_make(base
, mr
->size
);
665 if (!addrrange_intersects(tmp
, clip
)) {
669 clip
= addrrange_intersection(tmp
, clip
);
672 int128_subfrom(&base
, int128_make64(mr
->alias
->addr
));
673 int128_subfrom(&base
, int128_make64(mr
->alias_offset
));
674 render_memory_region(view
, mr
->alias
, base
, clip
, readonly
);
678 /* Render subregions in priority order. */
679 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
680 render_memory_region(view
, subregion
, base
, clip
, readonly
);
683 if (!mr
->terminates
) {
687 offset_in_region
= int128_get64(int128_sub(clip
.start
, base
));
692 fr
.dirty_log_mask
= memory_region_get_dirty_log_mask(mr
);
693 fr
.romd_mode
= mr
->romd_mode
;
694 fr
.readonly
= readonly
;
696 /* Render the region itself into any gaps left by the current view. */
697 for (i
= 0; i
< view
->nr
&& int128_nz(remain
); ++i
) {
698 if (int128_ge(base
, addrrange_end(view
->ranges
[i
].addr
))) {
701 if (int128_lt(base
, view
->ranges
[i
].addr
.start
)) {
702 now
= int128_min(remain
,
703 int128_sub(view
->ranges
[i
].addr
.start
, base
));
704 fr
.offset_in_region
= offset_in_region
;
705 fr
.addr
= addrrange_make(base
, now
);
706 flatview_insert(view
, i
, &fr
);
708 int128_addto(&base
, now
);
709 offset_in_region
+= int128_get64(now
);
710 int128_subfrom(&remain
, now
);
712 now
= int128_sub(int128_min(int128_add(base
, remain
),
713 addrrange_end(view
->ranges
[i
].addr
)),
715 int128_addto(&base
, now
);
716 offset_in_region
+= int128_get64(now
);
717 int128_subfrom(&remain
, now
);
719 if (int128_nz(remain
)) {
720 fr
.offset_in_region
= offset_in_region
;
721 fr
.addr
= addrrange_make(base
, remain
);
722 flatview_insert(view
, i
, &fr
);
726 static MemoryRegion
*memory_region_get_flatview_root(MemoryRegion
*mr
)
728 while (mr
->enabled
) {
730 if (!mr
->alias_offset
&& int128_ge(mr
->size
, mr
->alias
->size
)) {
731 /* The alias is included in its entirety. Use it as
732 * the "real" root, so that we can share more FlatViews.
737 } else if (!mr
->terminates
) {
738 unsigned int found
= 0;
739 MemoryRegion
*child
, *next
= NULL
;
740 QTAILQ_FOREACH(child
, &mr
->subregions
, subregions_link
) {
741 if (child
->enabled
) {
746 if (!child
->addr
&& int128_ge(mr
->size
, child
->size
)) {
747 /* A child is included in its entirety. If it's the only
748 * enabled one, use it in the hope of finding an alias down the
749 * way. This will also let us share FlatViews.
770 /* Render a memory topology into a list of disjoint absolute ranges. */
771 static FlatView
*generate_memory_topology(MemoryRegion
*mr
)
776 view
= flatview_new(mr
);
779 render_memory_region(view
, mr
, int128_zero(),
780 addrrange_make(int128_zero(), int128_2_64()), false);
782 flatview_simplify(view
);
784 view
->dispatch
= address_space_dispatch_new(view
);
785 for (i
= 0; i
< view
->nr
; i
++) {
786 MemoryRegionSection mrs
=
787 section_from_flat_range(&view
->ranges
[i
], view
);
788 flatview_add_to_dispatch(view
, &mrs
);
790 address_space_dispatch_compact(view
->dispatch
);
791 g_hash_table_replace(flat_views
, mr
, view
);
796 static void address_space_add_del_ioeventfds(AddressSpace
*as
,
797 MemoryRegionIoeventfd
*fds_new
,
799 MemoryRegionIoeventfd
*fds_old
,
803 MemoryRegionIoeventfd
*fd
;
804 MemoryRegionSection section
;
806 /* Generate a symmetric difference of the old and new fd sets, adding
807 * and deleting as necessary.
811 while (iold
< fds_old_nb
|| inew
< fds_new_nb
) {
812 if (iold
< fds_old_nb
813 && (inew
== fds_new_nb
814 || memory_region_ioeventfd_before(&fds_old
[iold
],
817 section
= (MemoryRegionSection
) {
818 .fv
= address_space_to_flatview(as
),
819 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
820 .size
= fd
->addr
.size
,
822 MEMORY_LISTENER_CALL(as
, eventfd_del
, Forward
, §ion
,
823 fd
->match_data
, fd
->data
, fd
->e
);
825 } else if (inew
< fds_new_nb
826 && (iold
== fds_old_nb
827 || memory_region_ioeventfd_before(&fds_new
[inew
],
830 section
= (MemoryRegionSection
) {
831 .fv
= address_space_to_flatview(as
),
832 .offset_within_address_space
= int128_get64(fd
->addr
.start
),
833 .size
= fd
->addr
.size
,
835 MEMORY_LISTENER_CALL(as
, eventfd_add
, Reverse
, §ion
,
836 fd
->match_data
, fd
->data
, fd
->e
);
845 FlatView
*address_space_get_flatview(AddressSpace
*as
)
851 view
= address_space_to_flatview(as
);
852 /* If somebody has replaced as->current_map concurrently,
853 * flatview_ref returns false.
855 } while (!flatview_ref(view
));
860 static void address_space_update_ioeventfds(AddressSpace
*as
)
864 unsigned ioeventfd_nb
= 0;
865 MemoryRegionIoeventfd
*ioeventfds
= NULL
;
869 view
= address_space_get_flatview(as
);
870 FOR_EACH_FLAT_RANGE(fr
, view
) {
871 for (i
= 0; i
< fr
->mr
->ioeventfd_nb
; ++i
) {
872 tmp
= addrrange_shift(fr
->mr
->ioeventfds
[i
].addr
,
873 int128_sub(fr
->addr
.start
,
874 int128_make64(fr
->offset_in_region
)));
875 if (addrrange_intersects(fr
->addr
, tmp
)) {
877 ioeventfds
= g_realloc(ioeventfds
,
878 ioeventfd_nb
* sizeof(*ioeventfds
));
879 ioeventfds
[ioeventfd_nb
-1] = fr
->mr
->ioeventfds
[i
];
880 ioeventfds
[ioeventfd_nb
-1].addr
= tmp
;
885 address_space_add_del_ioeventfds(as
, ioeventfds
, ioeventfd_nb
,
886 as
->ioeventfds
, as
->ioeventfd_nb
);
888 g_free(as
->ioeventfds
);
889 as
->ioeventfds
= ioeventfds
;
890 as
->ioeventfd_nb
= ioeventfd_nb
;
891 flatview_unref(view
);
894 static void address_space_update_topology_pass(AddressSpace
*as
,
895 const FlatView
*old_view
,
896 const FlatView
*new_view
,
900 FlatRange
*frold
, *frnew
;
902 /* Generate a symmetric difference of the old and new memory maps.
903 * Kill ranges in the old map, and instantiate ranges in the new map.
906 while (iold
< old_view
->nr
|| inew
< new_view
->nr
) {
907 if (iold
< old_view
->nr
) {
908 frold
= &old_view
->ranges
[iold
];
912 if (inew
< new_view
->nr
) {
913 frnew
= &new_view
->ranges
[inew
];
920 || int128_lt(frold
->addr
.start
, frnew
->addr
.start
)
921 || (int128_eq(frold
->addr
.start
, frnew
->addr
.start
)
922 && !flatrange_equal(frold
, frnew
)))) {
923 /* In old but not in new, or in both but attributes changed. */
926 MEMORY_LISTENER_UPDATE_REGION(frold
, as
, Reverse
, region_del
);
930 } else if (frold
&& frnew
&& flatrange_equal(frold
, frnew
)) {
931 /* In both and unchanged (except logging may have changed) */
934 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_nop
);
935 if (frnew
->dirty_log_mask
& ~frold
->dirty_log_mask
) {
936 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, log_start
,
937 frold
->dirty_log_mask
,
938 frnew
->dirty_log_mask
);
940 if (frold
->dirty_log_mask
& ~frnew
->dirty_log_mask
) {
941 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Reverse
, log_stop
,
942 frold
->dirty_log_mask
,
943 frnew
->dirty_log_mask
);
953 MEMORY_LISTENER_UPDATE_REGION(frnew
, as
, Forward
, region_add
);
961 static void flatviews_init(void)
963 static FlatView
*empty_view
;
969 flat_views
= g_hash_table_new_full(g_direct_hash
, g_direct_equal
, NULL
,
970 (GDestroyNotify
) flatview_unref
);
972 empty_view
= generate_memory_topology(NULL
);
973 /* We keep it alive forever in the global variable. */
974 flatview_ref(empty_view
);
976 g_hash_table_replace(flat_views
, NULL
, empty_view
);
977 flatview_ref(empty_view
);
981 static void flatviews_reset(void)
986 g_hash_table_unref(flat_views
);
991 /* Render unique FVs */
992 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
993 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
995 if (g_hash_table_lookup(flat_views
, physmr
)) {
999 generate_memory_topology(physmr
);
1003 static void address_space_set_flatview(AddressSpace
*as
)
1005 FlatView
*old_view
= address_space_to_flatview(as
);
1006 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1007 FlatView
*new_view
= g_hash_table_lookup(flat_views
, physmr
);
1011 if (old_view
== new_view
) {
1016 flatview_ref(old_view
);
1019 flatview_ref(new_view
);
1021 if (!QTAILQ_EMPTY(&as
->listeners
)) {
1022 FlatView tmpview
= { .nr
= 0 }, *old_view2
= old_view
;
1025 old_view2
= &tmpview
;
1027 address_space_update_topology_pass(as
, old_view2
, new_view
, false);
1028 address_space_update_topology_pass(as
, old_view2
, new_view
, true);
1031 /* Writes are protected by the BQL. */
1032 atomic_rcu_set(&as
->current_map
, new_view
);
1034 flatview_unref(old_view
);
1037 /* Note that all the old MemoryRegions are still alive up to this
1038 * point. This relieves most MemoryListeners from the need to
1039 * ref/unref the MemoryRegions they get---unless they use them
1040 * outside the iothread mutex, in which case precise reference
1041 * counting is necessary.
1044 flatview_unref(old_view
);
1048 static void address_space_update_topology(AddressSpace
*as
)
1050 MemoryRegion
*physmr
= memory_region_get_flatview_root(as
->root
);
1053 if (!g_hash_table_lookup(flat_views
, physmr
)) {
1054 generate_memory_topology(physmr
);
1056 address_space_set_flatview(as
);
1059 void memory_region_transaction_begin(void)
1061 qemu_flush_coalesced_mmio_buffer();
1062 ++memory_region_transaction_depth
;
1065 void memory_region_transaction_commit(void)
1069 assert(memory_region_transaction_depth
);
1070 assert(qemu_mutex_iothread_locked());
1072 --memory_region_transaction_depth
;
1073 if (!memory_region_transaction_depth
) {
1074 if (memory_region_update_pending
) {
1077 MEMORY_LISTENER_CALL_GLOBAL(begin
, Forward
);
1079 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1080 address_space_set_flatview(as
);
1081 address_space_update_ioeventfds(as
);
1083 memory_region_update_pending
= false;
1084 ioeventfd_update_pending
= false;
1085 MEMORY_LISTENER_CALL_GLOBAL(commit
, Forward
);
1086 } else if (ioeventfd_update_pending
) {
1087 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
1088 address_space_update_ioeventfds(as
);
1090 ioeventfd_update_pending
= false;
1095 static void memory_region_destructor_none(MemoryRegion
*mr
)
1099 static void memory_region_destructor_ram(MemoryRegion
*mr
)
1101 qemu_ram_free(mr
->ram_block
);
1104 static bool memory_region_need_escape(char c
)
1106 return c
== '/' || c
== '[' || c
== '\\' || c
== ']';
1109 static char *memory_region_escape_name(const char *name
)
1116 for (p
= name
; *p
; p
++) {
1117 bytes
+= memory_region_need_escape(*p
) ? 4 : 1;
1119 if (bytes
== p
- name
) {
1120 return g_memdup(name
, bytes
+ 1);
1123 escaped
= g_malloc(bytes
+ 1);
1124 for (p
= name
, q
= escaped
; *p
; p
++) {
1126 if (unlikely(memory_region_need_escape(c
))) {
1129 *q
++ = "0123456789abcdef"[c
>> 4];
1130 c
= "0123456789abcdef"[c
& 15];
1138 static void memory_region_do_init(MemoryRegion
*mr
,
1143 mr
->size
= int128_make64(size
);
1144 if (size
== UINT64_MAX
) {
1145 mr
->size
= int128_2_64();
1147 mr
->name
= g_strdup(name
);
1149 mr
->ram_block
= NULL
;
1152 char *escaped_name
= memory_region_escape_name(name
);
1153 char *name_array
= g_strdup_printf("%s[*]", escaped_name
);
1156 owner
= container_get(qdev_get_machine(), "/unattached");
1159 object_property_add_child(owner
, name_array
, OBJECT(mr
), &error_abort
);
1160 object_unref(OBJECT(mr
));
1162 g_free(escaped_name
);
1166 void memory_region_init(MemoryRegion
*mr
,
1171 object_initialize(mr
, sizeof(*mr
), TYPE_MEMORY_REGION
);
1172 memory_region_do_init(mr
, owner
, name
, size
);
1175 static void memory_region_get_addr(Object
*obj
, Visitor
*v
, const char *name
,
1176 void *opaque
, Error
**errp
)
1178 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1179 uint64_t value
= mr
->addr
;
1181 visit_type_uint64(v
, name
, &value
, errp
);
1184 static void memory_region_get_container(Object
*obj
, Visitor
*v
,
1185 const char *name
, void *opaque
,
1188 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1189 gchar
*path
= (gchar
*)"";
1191 if (mr
->container
) {
1192 path
= object_get_canonical_path(OBJECT(mr
->container
));
1194 visit_type_str(v
, name
, &path
, errp
);
1195 if (mr
->container
) {
1200 static Object
*memory_region_resolve_container(Object
*obj
, void *opaque
,
1203 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1205 return OBJECT(mr
->container
);
1208 static void memory_region_get_priority(Object
*obj
, Visitor
*v
,
1209 const char *name
, void *opaque
,
1212 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1213 int32_t value
= mr
->priority
;
1215 visit_type_int32(v
, name
, &value
, errp
);
1218 static void memory_region_get_size(Object
*obj
, Visitor
*v
, const char *name
,
1219 void *opaque
, Error
**errp
)
1221 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1222 uint64_t value
= memory_region_size(mr
);
1224 visit_type_uint64(v
, name
, &value
, errp
);
1227 static void memory_region_initfn(Object
*obj
)
1229 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1232 mr
->ops
= &unassigned_mem_ops
;
1234 mr
->romd_mode
= true;
1235 mr
->global_locking
= true;
1236 mr
->destructor
= memory_region_destructor_none
;
1237 QTAILQ_INIT(&mr
->subregions
);
1238 QTAILQ_INIT(&mr
->coalesced
);
1240 op
= object_property_add(OBJECT(mr
), "container",
1241 "link<" TYPE_MEMORY_REGION
">",
1242 memory_region_get_container
,
1243 NULL
, /* memory_region_set_container */
1244 NULL
, NULL
, &error_abort
);
1245 op
->resolve
= memory_region_resolve_container
;
1247 object_property_add(OBJECT(mr
), "addr", "uint64",
1248 memory_region_get_addr
,
1249 NULL
, /* memory_region_set_addr */
1250 NULL
, NULL
, &error_abort
);
1251 object_property_add(OBJECT(mr
), "priority", "uint32",
1252 memory_region_get_priority
,
1253 NULL
, /* memory_region_set_priority */
1254 NULL
, NULL
, &error_abort
);
1255 object_property_add(OBJECT(mr
), "size", "uint64",
1256 memory_region_get_size
,
1257 NULL
, /* memory_region_set_size, */
1258 NULL
, NULL
, &error_abort
);
1261 static void iommu_memory_region_initfn(Object
*obj
)
1263 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1265 mr
->is_iommu
= true;
1268 static uint64_t unassigned_mem_read(void *opaque
, hwaddr addr
,
1271 #ifdef DEBUG_UNASSIGNED
1272 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
1274 if (current_cpu
!= NULL
) {
1275 bool is_exec
= current_cpu
->mem_io_access_type
== MMU_INST_FETCH
;
1276 cpu_unassigned_access(current_cpu
, addr
, false, is_exec
, 0, size
);
1281 static void unassigned_mem_write(void *opaque
, hwaddr addr
,
1282 uint64_t val
, unsigned size
)
1284 #ifdef DEBUG_UNASSIGNED
1285 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
1287 if (current_cpu
!= NULL
) {
1288 cpu_unassigned_access(current_cpu
, addr
, true, false, 0, size
);
1292 static bool unassigned_mem_accepts(void *opaque
, hwaddr addr
,
1293 unsigned size
, bool is_write
,
1299 const MemoryRegionOps unassigned_mem_ops
= {
1300 .valid
.accepts
= unassigned_mem_accepts
,
1301 .endianness
= DEVICE_NATIVE_ENDIAN
,
1304 static uint64_t memory_region_ram_device_read(void *opaque
,
1305 hwaddr addr
, unsigned size
)
1307 MemoryRegion
*mr
= opaque
;
1308 uint64_t data
= (uint64_t)~0;
1312 data
= *(uint8_t *)(mr
->ram_block
->host
+ addr
);
1315 data
= *(uint16_t *)(mr
->ram_block
->host
+ addr
);
1318 data
= *(uint32_t *)(mr
->ram_block
->host
+ addr
);
1321 data
= *(uint64_t *)(mr
->ram_block
->host
+ addr
);
1325 trace_memory_region_ram_device_read(get_cpu_index(), mr
, addr
, data
, size
);
1330 static void memory_region_ram_device_write(void *opaque
, hwaddr addr
,
1331 uint64_t data
, unsigned size
)
1333 MemoryRegion
*mr
= opaque
;
1335 trace_memory_region_ram_device_write(get_cpu_index(), mr
, addr
, data
, size
);
1339 *(uint8_t *)(mr
->ram_block
->host
+ addr
) = (uint8_t)data
;
1342 *(uint16_t *)(mr
->ram_block
->host
+ addr
) = (uint16_t)data
;
1345 *(uint32_t *)(mr
->ram_block
->host
+ addr
) = (uint32_t)data
;
1348 *(uint64_t *)(mr
->ram_block
->host
+ addr
) = data
;
1353 static const MemoryRegionOps ram_device_mem_ops
= {
1354 .read
= memory_region_ram_device_read
,
1355 .write
= memory_region_ram_device_write
,
1356 .endianness
= DEVICE_HOST_ENDIAN
,
1358 .min_access_size
= 1,
1359 .max_access_size
= 8,
1363 .min_access_size
= 1,
1364 .max_access_size
= 8,
1369 bool memory_region_access_valid(MemoryRegion
*mr
,
1375 int access_size_min
, access_size_max
;
1378 if (!mr
->ops
->valid
.unaligned
&& (addr
& (size
- 1))) {
1382 if (!mr
->ops
->valid
.accepts
) {
1386 access_size_min
= mr
->ops
->valid
.min_access_size
;
1387 if (!mr
->ops
->valid
.min_access_size
) {
1388 access_size_min
= 1;
1391 access_size_max
= mr
->ops
->valid
.max_access_size
;
1392 if (!mr
->ops
->valid
.max_access_size
) {
1393 access_size_max
= 4;
1396 access_size
= MAX(MIN(size
, access_size_max
), access_size_min
);
1397 for (i
= 0; i
< size
; i
+= access_size
) {
1398 if (!mr
->ops
->valid
.accepts(mr
->opaque
, addr
+ i
, access_size
,
1407 static MemTxResult
memory_region_dispatch_read1(MemoryRegion
*mr
,
1415 if (mr
->ops
->read
) {
1416 return access_with_adjusted_size(addr
, pval
, size
,
1417 mr
->ops
->impl
.min_access_size
,
1418 mr
->ops
->impl
.max_access_size
,
1419 memory_region_read_accessor
,
1421 } else if (mr
->ops
->read_with_attrs
) {
1422 return access_with_adjusted_size(addr
, pval
, size
,
1423 mr
->ops
->impl
.min_access_size
,
1424 mr
->ops
->impl
.max_access_size
,
1425 memory_region_read_with_attrs_accessor
,
1428 return access_with_adjusted_size(addr
, pval
, size
, 1, 4,
1429 memory_region_oldmmio_read_accessor
,
1434 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1442 if (!memory_region_access_valid(mr
, addr
, size
, false, attrs
)) {
1443 *pval
= unassigned_mem_read(mr
, addr
, size
);
1444 return MEMTX_DECODE_ERROR
;
1447 r
= memory_region_dispatch_read1(mr
, addr
, pval
, size
, attrs
);
1448 adjust_endianness(mr
, pval
, size
);
1452 /* Return true if an eventfd was signalled */
1453 static bool memory_region_dispatch_write_eventfds(MemoryRegion
*mr
,
1459 MemoryRegionIoeventfd ioeventfd
= {
1460 .addr
= addrrange_make(int128_make64(addr
), int128_make64(size
)),
1465 for (i
= 0; i
< mr
->ioeventfd_nb
; i
++) {
1466 ioeventfd
.match_data
= mr
->ioeventfds
[i
].match_data
;
1467 ioeventfd
.e
= mr
->ioeventfds
[i
].e
;
1469 if (memory_region_ioeventfd_equal(&ioeventfd
, &mr
->ioeventfds
[i
])) {
1470 event_notifier_set(ioeventfd
.e
);
1478 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1484 if (!memory_region_access_valid(mr
, addr
, size
, true, attrs
)) {
1485 unassigned_mem_write(mr
, addr
, data
, size
);
1486 return MEMTX_DECODE_ERROR
;
1489 adjust_endianness(mr
, &data
, size
);
1491 if ((!kvm_eventfds_enabled()) &&
1492 memory_region_dispatch_write_eventfds(mr
, addr
, data
, size
, attrs
)) {
1496 if (mr
->ops
->write
) {
1497 return access_with_adjusted_size(addr
, &data
, size
,
1498 mr
->ops
->impl
.min_access_size
,
1499 mr
->ops
->impl
.max_access_size
,
1500 memory_region_write_accessor
, mr
,
1502 } else if (mr
->ops
->write_with_attrs
) {
1504 access_with_adjusted_size(addr
, &data
, size
,
1505 mr
->ops
->impl
.min_access_size
,
1506 mr
->ops
->impl
.max_access_size
,
1507 memory_region_write_with_attrs_accessor
,
1510 return access_with_adjusted_size(addr
, &data
, size
, 1, 4,
1511 memory_region_oldmmio_write_accessor
,
1516 void memory_region_init_io(MemoryRegion
*mr
,
1518 const MemoryRegionOps
*ops
,
1523 memory_region_init(mr
, owner
, name
, size
);
1524 mr
->ops
= ops
? ops
: &unassigned_mem_ops
;
1525 mr
->opaque
= opaque
;
1526 mr
->terminates
= true;
1529 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
1535 memory_region_init_ram_shared_nomigrate(mr
, owner
, name
, size
, false, errp
);
1538 void memory_region_init_ram_shared_nomigrate(MemoryRegion
*mr
,
1546 memory_region_init(mr
, owner
, name
, size
);
1548 mr
->terminates
= true;
1549 mr
->destructor
= memory_region_destructor_ram
;
1550 mr
->ram_block
= qemu_ram_alloc(size
, share
, mr
, &err
);
1551 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1553 mr
->size
= int128_zero();
1554 object_unparent(OBJECT(mr
));
1555 error_propagate(errp
, err
);
1559 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
1564 void (*resized
)(const char*,
1570 memory_region_init(mr
, owner
, name
, size
);
1572 mr
->terminates
= true;
1573 mr
->destructor
= memory_region_destructor_ram
;
1574 mr
->ram_block
= qemu_ram_alloc_resizeable(size
, max_size
, resized
,
1576 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1578 mr
->size
= int128_zero();
1579 object_unparent(OBJECT(mr
));
1580 error_propagate(errp
, err
);
1585 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
1586 struct Object
*owner
,
1595 memory_region_init(mr
, owner
, name
, size
);
1597 mr
->terminates
= true;
1598 mr
->destructor
= memory_region_destructor_ram
;
1600 mr
->ram_block
= qemu_ram_alloc_from_file(size
, mr
, ram_flags
, path
, &err
);
1601 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1603 mr
->size
= int128_zero();
1604 object_unparent(OBJECT(mr
));
1605 error_propagate(errp
, err
);
1609 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
1610 struct Object
*owner
,
1618 memory_region_init(mr
, owner
, name
, size
);
1620 mr
->terminates
= true;
1621 mr
->destructor
= memory_region_destructor_ram
;
1622 mr
->ram_block
= qemu_ram_alloc_from_fd(size
, mr
,
1623 share
? RAM_SHARED
: 0,
1625 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1627 mr
->size
= int128_zero();
1628 object_unparent(OBJECT(mr
));
1629 error_propagate(errp
, err
);
1634 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
1640 memory_region_init(mr
, owner
, name
, size
);
1642 mr
->terminates
= true;
1643 mr
->destructor
= memory_region_destructor_ram
;
1644 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1646 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1647 assert(ptr
!= NULL
);
1648 mr
->ram_block
= qemu_ram_alloc_from_ptr(size
, ptr
, mr
, &error_fatal
);
1651 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
1657 memory_region_init_ram_ptr(mr
, owner
, name
, size
, ptr
);
1658 mr
->ram_device
= true;
1659 mr
->ops
= &ram_device_mem_ops
;
1663 void memory_region_init_alias(MemoryRegion
*mr
,
1670 memory_region_init(mr
, owner
, name
, size
);
1672 mr
->alias_offset
= offset
;
1675 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
1676 struct Object
*owner
,
1682 memory_region_init(mr
, owner
, name
, size
);
1684 mr
->readonly
= true;
1685 mr
->terminates
= true;
1686 mr
->destructor
= memory_region_destructor_ram
;
1687 mr
->ram_block
= qemu_ram_alloc(size
, false, mr
, &err
);
1688 mr
->dirty_log_mask
= tcg_enabled() ? (1 << DIRTY_MEMORY_CODE
) : 0;
1690 mr
->size
= int128_zero();
1691 object_unparent(OBJECT(mr
));
1692 error_propagate(errp
, err
);
1696 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
1698 const MemoryRegionOps
*ops
,
1706 memory_region_init(mr
, owner
, name
, size
);
1708 mr
->opaque
= opaque
;
1709 mr
->terminates
= true;
1710 mr
->rom_device
= true;
1711 mr
->destructor
= memory_region_destructor_ram
;
1712 mr
->ram_block
= qemu_ram_alloc(size
, false, mr
, &err
);
1714 mr
->size
= int128_zero();
1715 object_unparent(OBJECT(mr
));
1716 error_propagate(errp
, err
);
1720 void memory_region_init_iommu(void *_iommu_mr
,
1721 size_t instance_size
,
1722 const char *mrtypename
,
1727 struct IOMMUMemoryRegion
*iommu_mr
;
1728 struct MemoryRegion
*mr
;
1730 object_initialize(_iommu_mr
, instance_size
, mrtypename
);
1731 mr
= MEMORY_REGION(_iommu_mr
);
1732 memory_region_do_init(mr
, owner
, name
, size
);
1733 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1734 mr
->terminates
= true; /* then re-forwards */
1735 QLIST_INIT(&iommu_mr
->iommu_notify
);
1736 iommu_mr
->iommu_notify_flags
= IOMMU_NOTIFIER_NONE
;
1739 static void memory_region_finalize(Object
*obj
)
1741 MemoryRegion
*mr
= MEMORY_REGION(obj
);
1743 assert(!mr
->container
);
1745 /* We know the region is not visible in any address space (it
1746 * does not have a container and cannot be a root either because
1747 * it has no references, so we can blindly clear mr->enabled.
1748 * memory_region_set_enabled instead could trigger a transaction
1749 * and cause an infinite loop.
1751 mr
->enabled
= false;
1752 memory_region_transaction_begin();
1753 while (!QTAILQ_EMPTY(&mr
->subregions
)) {
1754 MemoryRegion
*subregion
= QTAILQ_FIRST(&mr
->subregions
);
1755 memory_region_del_subregion(mr
, subregion
);
1757 memory_region_transaction_commit();
1760 memory_region_clear_coalescing(mr
);
1761 g_free((char *)mr
->name
);
1762 g_free(mr
->ioeventfds
);
1765 Object
*memory_region_owner(MemoryRegion
*mr
)
1767 Object
*obj
= OBJECT(mr
);
1771 void memory_region_ref(MemoryRegion
*mr
)
1773 /* MMIO callbacks most likely will access data that belongs
1774 * to the owner, hence the need to ref/unref the owner whenever
1775 * the memory region is in use.
1777 * The memory region is a child of its owner. As long as the
1778 * owner doesn't call unparent itself on the memory region,
1779 * ref-ing the owner will also keep the memory region alive.
1780 * Memory regions without an owner are supposed to never go away;
1781 * we do not ref/unref them because it slows down DMA sensibly.
1783 if (mr
&& mr
->owner
) {
1784 object_ref(mr
->owner
);
1788 void memory_region_unref(MemoryRegion
*mr
)
1790 if (mr
&& mr
->owner
) {
1791 object_unref(mr
->owner
);
1795 uint64_t memory_region_size(MemoryRegion
*mr
)
1797 if (int128_eq(mr
->size
, int128_2_64())) {
1800 return int128_get64(mr
->size
);
1803 const char *memory_region_name(const MemoryRegion
*mr
)
1806 ((MemoryRegion
*)mr
)->name
=
1807 object_get_canonical_path_component(OBJECT(mr
));
1812 bool memory_region_is_ram_device(MemoryRegion
*mr
)
1814 return mr
->ram_device
;
1817 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
)
1819 uint8_t mask
= mr
->dirty_log_mask
;
1820 if (global_dirty_log
&& mr
->ram_block
) {
1821 mask
|= (1 << DIRTY_MEMORY_MIGRATION
);
1826 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
)
1828 return memory_region_get_dirty_log_mask(mr
) & (1 << client
);
1831 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion
*iommu_mr
)
1833 IOMMUNotifierFlag flags
= IOMMU_NOTIFIER_NONE
;
1834 IOMMUNotifier
*iommu_notifier
;
1835 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1837 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1838 flags
|= iommu_notifier
->notifier_flags
;
1841 if (flags
!= iommu_mr
->iommu_notify_flags
&& imrc
->notify_flag_changed
) {
1842 imrc
->notify_flag_changed(iommu_mr
,
1843 iommu_mr
->iommu_notify_flags
,
1847 iommu_mr
->iommu_notify_flags
= flags
;
1850 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1853 IOMMUMemoryRegion
*iommu_mr
;
1856 memory_region_register_iommu_notifier(mr
->alias
, n
);
1860 /* We need to register for at least one bitfield */
1861 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1862 assert(n
->notifier_flags
!= IOMMU_NOTIFIER_NONE
);
1863 assert(n
->start
<= n
->end
);
1864 assert(n
->iommu_idx
>= 0 &&
1865 n
->iommu_idx
< memory_region_iommu_num_indexes(iommu_mr
));
1867 QLIST_INSERT_HEAD(&iommu_mr
->iommu_notify
, n
, node
);
1868 memory_region_update_iommu_notify_flags(iommu_mr
);
1871 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
)
1873 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1875 if (imrc
->get_min_page_size
) {
1876 return imrc
->get_min_page_size(iommu_mr
);
1878 return TARGET_PAGE_SIZE
;
1881 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
1883 MemoryRegion
*mr
= MEMORY_REGION(iommu_mr
);
1884 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1885 hwaddr addr
, granularity
;
1886 IOMMUTLBEntry iotlb
;
1888 /* If the IOMMU has its own replay callback, override */
1890 imrc
->replay(iommu_mr
, n
);
1894 granularity
= memory_region_iommu_get_min_page_size(iommu_mr
);
1896 for (addr
= 0; addr
< memory_region_size(mr
); addr
+= granularity
) {
1897 iotlb
= imrc
->translate(iommu_mr
, addr
, IOMMU_NONE
, n
->iommu_idx
);
1898 if (iotlb
.perm
!= IOMMU_NONE
) {
1899 n
->notify(n
, &iotlb
);
1902 /* if (2^64 - MR size) < granularity, it's possible to get an
1903 * infinite loop here. This should catch such a wraparound */
1904 if ((addr
+ granularity
) < addr
) {
1910 void memory_region_iommu_replay_all(IOMMUMemoryRegion
*iommu_mr
)
1912 IOMMUNotifier
*notifier
;
1914 IOMMU_NOTIFIER_FOREACH(notifier
, iommu_mr
) {
1915 memory_region_iommu_replay(iommu_mr
, notifier
);
1919 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1922 IOMMUMemoryRegion
*iommu_mr
;
1925 memory_region_unregister_iommu_notifier(mr
->alias
, n
);
1928 QLIST_REMOVE(n
, node
);
1929 iommu_mr
= IOMMU_MEMORY_REGION(mr
);
1930 memory_region_update_iommu_notify_flags(iommu_mr
);
1933 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1934 IOMMUTLBEntry
*entry
)
1936 IOMMUNotifierFlag request_flags
;
1939 * Skip the notification if the notification does not overlap
1940 * with registered range.
1942 if (notifier
->start
> entry
->iova
+ entry
->addr_mask
||
1943 notifier
->end
< entry
->iova
) {
1947 if (entry
->perm
& IOMMU_RW
) {
1948 request_flags
= IOMMU_NOTIFIER_MAP
;
1950 request_flags
= IOMMU_NOTIFIER_UNMAP
;
1953 if (notifier
->notifier_flags
& request_flags
) {
1954 notifier
->notify(notifier
, entry
);
1958 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
1960 IOMMUTLBEntry entry
)
1962 IOMMUNotifier
*iommu_notifier
;
1964 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr
)));
1966 IOMMU_NOTIFIER_FOREACH(iommu_notifier
, iommu_mr
) {
1967 if (iommu_notifier
->iommu_idx
== iommu_idx
) {
1968 memory_region_notify_one(iommu_notifier
, &entry
);
1973 int memory_region_iommu_get_attr(IOMMUMemoryRegion
*iommu_mr
,
1974 enum IOMMUMemoryRegionAttr attr
,
1977 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1979 if (!imrc
->get_attr
) {
1983 return imrc
->get_attr(iommu_mr
, attr
, data
);
1986 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion
*iommu_mr
,
1989 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
1991 if (!imrc
->attrs_to_index
) {
1995 return imrc
->attrs_to_index(iommu_mr
, attrs
);
1998 int memory_region_iommu_num_indexes(IOMMUMemoryRegion
*iommu_mr
)
2000 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
2002 if (!imrc
->num_indexes
) {
2006 return imrc
->num_indexes(iommu_mr
);
2009 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
)
2011 uint8_t mask
= 1 << client
;
2012 uint8_t old_logging
;
2014 assert(client
== DIRTY_MEMORY_VGA
);
2015 old_logging
= mr
->vga_logging_count
;
2016 mr
->vga_logging_count
+= log
? 1 : -1;
2017 if (!!old_logging
== !!mr
->vga_logging_count
) {
2021 memory_region_transaction_begin();
2022 mr
->dirty_log_mask
= (mr
->dirty_log_mask
& ~mask
) | (log
* mask
);
2023 memory_region_update_pending
|= mr
->enabled
;
2024 memory_region_transaction_commit();
2027 bool memory_region_get_dirty(MemoryRegion
*mr
, hwaddr addr
,
2028 hwaddr size
, unsigned client
)
2030 assert(mr
->ram_block
);
2031 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr
) + addr
,
2035 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
2038 assert(mr
->ram_block
);
2039 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr
) + addr
,
2041 memory_region_get_dirty_log_mask(mr
));
2044 static void memory_region_sync_dirty_bitmap(MemoryRegion
*mr
)
2046 MemoryListener
*listener
;
2051 /* If the same address space has multiple log_sync listeners, we
2052 * visit that address space's FlatView multiple times. But because
2053 * log_sync listeners are rare, it's still cheaper than walking each
2054 * address space once.
2056 QTAILQ_FOREACH(listener
, &memory_listeners
, link
) {
2057 if (!listener
->log_sync
) {
2060 as
= listener
->address_space
;
2061 view
= address_space_get_flatview(as
);
2062 FOR_EACH_FLAT_RANGE(fr
, view
) {
2063 if (fr
->dirty_log_mask
&& (!mr
|| fr
->mr
== mr
)) {
2064 MemoryRegionSection mrs
= section_from_flat_range(fr
, view
);
2065 listener
->log_sync(listener
, &mrs
);
2068 flatview_unref(view
);
2072 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
2077 assert(mr
->ram_block
);
2078 memory_region_sync_dirty_bitmap(mr
);
2079 return cpu_physical_memory_snapshot_and_clear_dirty(
2080 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2083 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
, DirtyBitmapSnapshot
*snap
,
2084 hwaddr addr
, hwaddr size
)
2086 assert(mr
->ram_block
);
2087 return cpu_physical_memory_snapshot_get_dirty(snap
,
2088 memory_region_get_ram_addr(mr
) + addr
, size
);
2091 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
)
2093 if (mr
->readonly
!= readonly
) {
2094 memory_region_transaction_begin();
2095 mr
->readonly
= readonly
;
2096 memory_region_update_pending
|= mr
->enabled
;
2097 memory_region_transaction_commit();
2101 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
)
2103 if (mr
->romd_mode
!= romd_mode
) {
2104 memory_region_transaction_begin();
2105 mr
->romd_mode
= romd_mode
;
2106 memory_region_update_pending
|= mr
->enabled
;
2107 memory_region_transaction_commit();
2111 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
2112 hwaddr size
, unsigned client
)
2114 assert(mr
->ram_block
);
2115 cpu_physical_memory_test_and_clear_dirty(
2116 memory_region_get_ram_addr(mr
) + addr
, size
, client
);
2119 int memory_region_get_fd(MemoryRegion
*mr
)
2127 fd
= mr
->ram_block
->fd
;
2133 void *memory_region_get_ram_ptr(MemoryRegion
*mr
)
2136 uint64_t offset
= 0;
2140 offset
+= mr
->alias_offset
;
2143 assert(mr
->ram_block
);
2144 ptr
= qemu_map_ram_ptr(mr
->ram_block
, offset
);
2150 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
)
2154 block
= qemu_ram_block_from_host(ptr
, false, offset
);
2162 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
)
2164 return mr
->ram_block
? mr
->ram_block
->offset
: RAM_ADDR_INVALID
;
2167 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
, Error
**errp
)
2169 assert(mr
->ram_block
);
2171 qemu_ram_resize(mr
->ram_block
, newsize
, errp
);
2174 static void memory_region_update_coalesced_range_as(MemoryRegion
*mr
, AddressSpace
*as
)
2178 CoalescedMemoryRange
*cmr
;
2180 MemoryRegionSection section
;
2182 view
= address_space_get_flatview(as
);
2183 FOR_EACH_FLAT_RANGE(fr
, view
) {
2185 section
= (MemoryRegionSection
) {
2187 .offset_within_address_space
= int128_get64(fr
->addr
.start
),
2188 .size
= fr
->addr
.size
,
2191 MEMORY_LISTENER_CALL(as
, coalesced_mmio_del
, Reverse
, §ion
,
2192 int128_get64(fr
->addr
.start
),
2193 int128_get64(fr
->addr
.size
));
2194 QTAILQ_FOREACH(cmr
, &mr
->coalesced
, link
) {
2195 tmp
= addrrange_shift(cmr
->addr
,
2196 int128_sub(fr
->addr
.start
,
2197 int128_make64(fr
->offset_in_region
)));
2198 if (!addrrange_intersects(tmp
, fr
->addr
)) {
2201 tmp
= addrrange_intersection(tmp
, fr
->addr
);
2202 MEMORY_LISTENER_CALL(as
, coalesced_mmio_add
, Forward
, §ion
,
2203 int128_get64(tmp
.start
),
2204 int128_get64(tmp
.size
));
2208 flatview_unref(view
);
2211 static void memory_region_update_coalesced_range(MemoryRegion
*mr
)
2215 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
2216 memory_region_update_coalesced_range_as(mr
, as
);
2220 void memory_region_set_coalescing(MemoryRegion
*mr
)
2222 memory_region_clear_coalescing(mr
);
2223 memory_region_add_coalescing(mr
, 0, int128_get64(mr
->size
));
2226 void memory_region_add_coalescing(MemoryRegion
*mr
,
2230 CoalescedMemoryRange
*cmr
= g_malloc(sizeof(*cmr
));
2232 cmr
->addr
= addrrange_make(int128_make64(offset
), int128_make64(size
));
2233 QTAILQ_INSERT_TAIL(&mr
->coalesced
, cmr
, link
);
2234 memory_region_update_coalesced_range(mr
);
2235 memory_region_set_flush_coalesced(mr
);
2238 void memory_region_clear_coalescing(MemoryRegion
*mr
)
2240 CoalescedMemoryRange
*cmr
;
2241 bool updated
= false;
2243 qemu_flush_coalesced_mmio_buffer();
2244 mr
->flush_coalesced_mmio
= false;
2246 while (!QTAILQ_EMPTY(&mr
->coalesced
)) {
2247 cmr
= QTAILQ_FIRST(&mr
->coalesced
);
2248 QTAILQ_REMOVE(&mr
->coalesced
, cmr
, link
);
2254 memory_region_update_coalesced_range(mr
);
2258 void memory_region_set_flush_coalesced(MemoryRegion
*mr
)
2260 mr
->flush_coalesced_mmio
= true;
2263 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
)
2265 qemu_flush_coalesced_mmio_buffer();
2266 if (QTAILQ_EMPTY(&mr
->coalesced
)) {
2267 mr
->flush_coalesced_mmio
= false;
2271 void memory_region_clear_global_locking(MemoryRegion
*mr
)
2273 mr
->global_locking
= false;
2276 static bool userspace_eventfd_warning
;
2278 void memory_region_add_eventfd(MemoryRegion
*mr
,
2285 MemoryRegionIoeventfd mrfd
= {
2286 .addr
.start
= int128_make64(addr
),
2287 .addr
.size
= int128_make64(size
),
2288 .match_data
= match_data
,
2294 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2295 userspace_eventfd_warning
))) {
2296 userspace_eventfd_warning
= true;
2297 error_report("Using eventfd without MMIO binding in KVM. "
2298 "Suboptimal performance expected");
2302 adjust_endianness(mr
, &mrfd
.data
, size
);
2304 memory_region_transaction_begin();
2305 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2306 if (memory_region_ioeventfd_before(&mrfd
, &mr
->ioeventfds
[i
])) {
2311 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2312 sizeof(*mr
->ioeventfds
) * mr
->ioeventfd_nb
);
2313 memmove(&mr
->ioeventfds
[i
+1], &mr
->ioeventfds
[i
],
2314 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
-1 - i
));
2315 mr
->ioeventfds
[i
] = mrfd
;
2316 ioeventfd_update_pending
|= mr
->enabled
;
2317 memory_region_transaction_commit();
2320 void memory_region_del_eventfd(MemoryRegion
*mr
,
2327 MemoryRegionIoeventfd mrfd
= {
2328 .addr
.start
= int128_make64(addr
),
2329 .addr
.size
= int128_make64(size
),
2330 .match_data
= match_data
,
2337 adjust_endianness(mr
, &mrfd
.data
, size
);
2339 memory_region_transaction_begin();
2340 for (i
= 0; i
< mr
->ioeventfd_nb
; ++i
) {
2341 if (memory_region_ioeventfd_equal(&mrfd
, &mr
->ioeventfds
[i
])) {
2345 assert(i
!= mr
->ioeventfd_nb
);
2346 memmove(&mr
->ioeventfds
[i
], &mr
->ioeventfds
[i
+1],
2347 sizeof(*mr
->ioeventfds
) * (mr
->ioeventfd_nb
- (i
+1)));
2349 mr
->ioeventfds
= g_realloc(mr
->ioeventfds
,
2350 sizeof(*mr
->ioeventfds
)*mr
->ioeventfd_nb
+ 1);
2351 ioeventfd_update_pending
|= mr
->enabled
;
2352 memory_region_transaction_commit();
2355 static void memory_region_update_container_subregions(MemoryRegion
*subregion
)
2357 MemoryRegion
*mr
= subregion
->container
;
2358 MemoryRegion
*other
;
2360 memory_region_transaction_begin();
2362 memory_region_ref(subregion
);
2363 QTAILQ_FOREACH(other
, &mr
->subregions
, subregions_link
) {
2364 if (subregion
->priority
>= other
->priority
) {
2365 QTAILQ_INSERT_BEFORE(other
, subregion
, subregions_link
);
2369 QTAILQ_INSERT_TAIL(&mr
->subregions
, subregion
, subregions_link
);
2371 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2372 memory_region_transaction_commit();
2375 static void memory_region_add_subregion_common(MemoryRegion
*mr
,
2377 MemoryRegion
*subregion
)
2379 assert(!subregion
->container
);
2380 subregion
->container
= mr
;
2381 subregion
->addr
= offset
;
2382 memory_region_update_container_subregions(subregion
);
2385 void memory_region_add_subregion(MemoryRegion
*mr
,
2387 MemoryRegion
*subregion
)
2389 subregion
->priority
= 0;
2390 memory_region_add_subregion_common(mr
, offset
, subregion
);
2393 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
2395 MemoryRegion
*subregion
,
2398 subregion
->priority
= priority
;
2399 memory_region_add_subregion_common(mr
, offset
, subregion
);
2402 void memory_region_del_subregion(MemoryRegion
*mr
,
2403 MemoryRegion
*subregion
)
2405 memory_region_transaction_begin();
2406 assert(subregion
->container
== mr
);
2407 subregion
->container
= NULL
;
2408 QTAILQ_REMOVE(&mr
->subregions
, subregion
, subregions_link
);
2409 memory_region_unref(subregion
);
2410 memory_region_update_pending
|= mr
->enabled
&& subregion
->enabled
;
2411 memory_region_transaction_commit();
2414 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
)
2416 if (enabled
== mr
->enabled
) {
2419 memory_region_transaction_begin();
2420 mr
->enabled
= enabled
;
2421 memory_region_update_pending
= true;
2422 memory_region_transaction_commit();
2425 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
)
2427 Int128 s
= int128_make64(size
);
2429 if (size
== UINT64_MAX
) {
2432 if (int128_eq(s
, mr
->size
)) {
2435 memory_region_transaction_begin();
2437 memory_region_update_pending
= true;
2438 memory_region_transaction_commit();
2441 static void memory_region_readd_subregion(MemoryRegion
*mr
)
2443 MemoryRegion
*container
= mr
->container
;
2446 memory_region_transaction_begin();
2447 memory_region_ref(mr
);
2448 memory_region_del_subregion(container
, mr
);
2449 mr
->container
= container
;
2450 memory_region_update_container_subregions(mr
);
2451 memory_region_unref(mr
);
2452 memory_region_transaction_commit();
2456 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
)
2458 if (addr
!= mr
->addr
) {
2460 memory_region_readd_subregion(mr
);
2464 void memory_region_set_alias_offset(MemoryRegion
*mr
, hwaddr offset
)
2468 if (offset
== mr
->alias_offset
) {
2472 memory_region_transaction_begin();
2473 mr
->alias_offset
= offset
;
2474 memory_region_update_pending
|= mr
->enabled
;
2475 memory_region_transaction_commit();
2478 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
)
2483 static int cmp_flatrange_addr(const void *addr_
, const void *fr_
)
2485 const AddrRange
*addr
= addr_
;
2486 const FlatRange
*fr
= fr_
;
2488 if (int128_le(addrrange_end(*addr
), fr
->addr
.start
)) {
2490 } else if (int128_ge(addr
->start
, addrrange_end(fr
->addr
))) {
2496 static FlatRange
*flatview_lookup(FlatView
*view
, AddrRange addr
)
2498 return bsearch(&addr
, view
->ranges
, view
->nr
,
2499 sizeof(FlatRange
), cmp_flatrange_addr
);
2502 bool memory_region_is_mapped(MemoryRegion
*mr
)
2504 return mr
->container
? true : false;
2507 /* Same as memory_region_find, but it does not add a reference to the
2508 * returned region. It must be called from an RCU critical section.
2510 static MemoryRegionSection
memory_region_find_rcu(MemoryRegion
*mr
,
2511 hwaddr addr
, uint64_t size
)
2513 MemoryRegionSection ret
= { .mr
= NULL
};
2521 for (root
= mr
; root
->container
; ) {
2522 root
= root
->container
;
2526 as
= memory_region_to_address_space(root
);
2530 range
= addrrange_make(int128_make64(addr
), int128_make64(size
));
2532 view
= address_space_to_flatview(as
);
2533 fr
= flatview_lookup(view
, range
);
2538 while (fr
> view
->ranges
&& addrrange_intersects(fr
[-1].addr
, range
)) {
2544 range
= addrrange_intersection(range
, fr
->addr
);
2545 ret
.offset_within_region
= fr
->offset_in_region
;
2546 ret
.offset_within_region
+= int128_get64(int128_sub(range
.start
,
2548 ret
.size
= range
.size
;
2549 ret
.offset_within_address_space
= int128_get64(range
.start
);
2550 ret
.readonly
= fr
->readonly
;
2554 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
2555 hwaddr addr
, uint64_t size
)
2557 MemoryRegionSection ret
;
2559 ret
= memory_region_find_rcu(mr
, addr
, size
);
2561 memory_region_ref(ret
.mr
);
2567 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
)
2572 mr
= memory_region_find_rcu(container
, addr
, 1).mr
;
2574 return mr
&& mr
!= container
;
2577 void memory_global_dirty_log_sync(void)
2579 memory_region_sync_dirty_bitmap(NULL
);
2582 static VMChangeStateEntry
*vmstate_change
;
2584 void memory_global_dirty_log_start(void)
2586 if (vmstate_change
) {
2587 qemu_del_vm_change_state_handler(vmstate_change
);
2588 vmstate_change
= NULL
;
2591 global_dirty_log
= true;
2593 MEMORY_LISTENER_CALL_GLOBAL(log_global_start
, Forward
);
2595 /* Refresh DIRTY_LOG_MIGRATION bit. */
2596 memory_region_transaction_begin();
2597 memory_region_update_pending
= true;
2598 memory_region_transaction_commit();
2601 static void memory_global_dirty_log_do_stop(void)
2603 global_dirty_log
= false;
2605 /* Refresh DIRTY_LOG_MIGRATION bit. */
2606 memory_region_transaction_begin();
2607 memory_region_update_pending
= true;
2608 memory_region_transaction_commit();
2610 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop
, Reverse
);
2613 static void memory_vm_change_state_handler(void *opaque
, int running
,
2617 memory_global_dirty_log_do_stop();
2619 if (vmstate_change
) {
2620 qemu_del_vm_change_state_handler(vmstate_change
);
2621 vmstate_change
= NULL
;
2626 void memory_global_dirty_log_stop(void)
2628 if (!runstate_is_running()) {
2629 if (vmstate_change
) {
2632 vmstate_change
= qemu_add_vm_change_state_handler(
2633 memory_vm_change_state_handler
, NULL
);
2637 memory_global_dirty_log_do_stop();
2640 static void listener_add_address_space(MemoryListener
*listener
,
2646 if (listener
->begin
) {
2647 listener
->begin(listener
);
2649 if (global_dirty_log
) {
2650 if (listener
->log_global_start
) {
2651 listener
->log_global_start(listener
);
2655 view
= address_space_get_flatview(as
);
2656 FOR_EACH_FLAT_RANGE(fr
, view
) {
2657 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2659 if (listener
->region_add
) {
2660 listener
->region_add(listener
, §ion
);
2662 if (fr
->dirty_log_mask
&& listener
->log_start
) {
2663 listener
->log_start(listener
, §ion
, 0, fr
->dirty_log_mask
);
2666 if (listener
->commit
) {
2667 listener
->commit(listener
);
2669 flatview_unref(view
);
2672 static void listener_del_address_space(MemoryListener
*listener
,
2678 if (listener
->begin
) {
2679 listener
->begin(listener
);
2681 view
= address_space_get_flatview(as
);
2682 FOR_EACH_FLAT_RANGE(fr
, view
) {
2683 MemoryRegionSection section
= section_from_flat_range(fr
, view
);
2685 if (fr
->dirty_log_mask
&& listener
->log_stop
) {
2686 listener
->log_stop(listener
, §ion
, fr
->dirty_log_mask
, 0);
2688 if (listener
->region_del
) {
2689 listener
->region_del(listener
, §ion
);
2692 if (listener
->commit
) {
2693 listener
->commit(listener
);
2695 flatview_unref(view
);
2698 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*as
)
2700 MemoryListener
*other
= NULL
;
2702 listener
->address_space
= as
;
2703 if (QTAILQ_EMPTY(&memory_listeners
)
2704 || listener
->priority
>= QTAILQ_LAST(&memory_listeners
,
2705 memory_listeners
)->priority
) {
2706 QTAILQ_INSERT_TAIL(&memory_listeners
, listener
, link
);
2708 QTAILQ_FOREACH(other
, &memory_listeners
, link
) {
2709 if (listener
->priority
< other
->priority
) {
2713 QTAILQ_INSERT_BEFORE(other
, listener
, link
);
2716 if (QTAILQ_EMPTY(&as
->listeners
)
2717 || listener
->priority
>= QTAILQ_LAST(&as
->listeners
,
2718 memory_listeners
)->priority
) {
2719 QTAILQ_INSERT_TAIL(&as
->listeners
, listener
, link_as
);
2721 QTAILQ_FOREACH(other
, &as
->listeners
, link_as
) {
2722 if (listener
->priority
< other
->priority
) {
2726 QTAILQ_INSERT_BEFORE(other
, listener
, link_as
);
2729 listener_add_address_space(listener
, as
);
2732 void memory_listener_unregister(MemoryListener
*listener
)
2734 if (!listener
->address_space
) {
2738 listener_del_address_space(listener
, listener
->address_space
);
2739 QTAILQ_REMOVE(&memory_listeners
, listener
, link
);
2740 QTAILQ_REMOVE(&listener
->address_space
->listeners
, listener
, link_as
);
2741 listener
->address_space
= NULL
;
2744 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
)
2746 memory_region_ref(root
);
2748 as
->current_map
= NULL
;
2749 as
->ioeventfd_nb
= 0;
2750 as
->ioeventfds
= NULL
;
2751 QTAILQ_INIT(&as
->listeners
);
2752 QTAILQ_INSERT_TAIL(&address_spaces
, as
, address_spaces_link
);
2753 as
->name
= g_strdup(name
? name
: "anonymous");
2754 address_space_update_topology(as
);
2755 address_space_update_ioeventfds(as
);
2758 static void do_address_space_destroy(AddressSpace
*as
)
2760 assert(QTAILQ_EMPTY(&as
->listeners
));
2762 flatview_unref(as
->current_map
);
2764 g_free(as
->ioeventfds
);
2765 memory_region_unref(as
->root
);
2768 void address_space_destroy(AddressSpace
*as
)
2770 MemoryRegion
*root
= as
->root
;
2772 /* Flush out anything from MemoryListeners listening in on this */
2773 memory_region_transaction_begin();
2775 memory_region_transaction_commit();
2776 QTAILQ_REMOVE(&address_spaces
, as
, address_spaces_link
);
2778 /* At this point, as->dispatch and as->current_map are dummy
2779 * entries that the guest should never use. Wait for the old
2780 * values to expire before freeing the data.
2783 call_rcu(as
, do_address_space_destroy
, rcu
);
2786 static const char *memory_region_type(MemoryRegion
*mr
)
2788 if (memory_region_is_ram_device(mr
)) {
2790 } else if (memory_region_is_romd(mr
)) {
2792 } else if (memory_region_is_rom(mr
)) {
2794 } else if (memory_region_is_ram(mr
)) {
2801 typedef struct MemoryRegionList MemoryRegionList
;
2803 struct MemoryRegionList
{
2804 const MemoryRegion
*mr
;
2805 QTAILQ_ENTRY(MemoryRegionList
) mrqueue
;
2808 typedef QTAILQ_HEAD(mrqueue
, MemoryRegionList
) MemoryRegionListHead
;
2810 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2811 int128_sub((size), int128_one())) : 0)
2812 #define MTREE_INDENT " "
2814 static void mtree_expand_owner(fprintf_function mon_printf
, void *f
,
2815 const char *label
, Object
*obj
)
2817 DeviceState
*dev
= (DeviceState
*) object_dynamic_cast(obj
, TYPE_DEVICE
);
2819 mon_printf(f
, " %s:{%s", label
, dev
? "dev" : "obj");
2820 if (dev
&& dev
->id
) {
2821 mon_printf(f
, " id=%s", dev
->id
);
2823 gchar
*canonical_path
= object_get_canonical_path(obj
);
2824 if (canonical_path
) {
2825 mon_printf(f
, " path=%s", canonical_path
);
2826 g_free(canonical_path
);
2828 mon_printf(f
, " type=%s", object_get_typename(obj
));
2834 static void mtree_print_mr_owner(fprintf_function mon_printf
, void *f
,
2835 const MemoryRegion
*mr
)
2837 Object
*owner
= mr
->owner
;
2838 Object
*parent
= memory_region_owner((MemoryRegion
*)mr
);
2840 if (!owner
&& !parent
) {
2841 mon_printf(f
, " orphan");
2845 mtree_expand_owner(mon_printf
, f
, "owner", owner
);
2847 if (parent
&& parent
!= owner
) {
2848 mtree_expand_owner(mon_printf
, f
, "parent", parent
);
2852 static void mtree_print_mr(fprintf_function mon_printf
, void *f
,
2853 const MemoryRegion
*mr
, unsigned int level
,
2855 MemoryRegionListHead
*alias_print_queue
,
2858 MemoryRegionList
*new_ml
, *ml
, *next_ml
;
2859 MemoryRegionListHead submr_print_queue
;
2860 const MemoryRegion
*submr
;
2862 hwaddr cur_start
, cur_end
;
2868 for (i
= 0; i
< level
; i
++) {
2869 mon_printf(f
, MTREE_INDENT
);
2872 cur_start
= base
+ mr
->addr
;
2873 cur_end
= cur_start
+ MR_SIZE(mr
->size
);
2876 * Try to detect overflow of memory region. This should never
2877 * happen normally. When it happens, we dump something to warn the
2878 * user who is observing this.
2880 if (cur_start
< base
|| cur_end
< cur_start
) {
2881 mon_printf(f
, "[DETECTED OVERFLOW!] ");
2885 MemoryRegionList
*ml
;
2888 /* check if the alias is already in the queue */
2889 QTAILQ_FOREACH(ml
, alias_print_queue
, mrqueue
) {
2890 if (ml
->mr
== mr
->alias
) {
2896 ml
= g_new(MemoryRegionList
, 1);
2898 QTAILQ_INSERT_TAIL(alias_print_queue
, ml
, mrqueue
);
2900 mon_printf(f
, TARGET_FMT_plx
"-" TARGET_FMT_plx
2901 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx
2902 "-" TARGET_FMT_plx
"%s",
2905 memory_region_type((MemoryRegion
*)mr
),
2906 memory_region_name(mr
),
2907 memory_region_name(mr
->alias
),
2909 mr
->alias_offset
+ MR_SIZE(mr
->size
),
2910 mr
->enabled
? "" : " [disabled]");
2912 mtree_print_mr_owner(mon_printf
, f
, mr
);
2916 TARGET_FMT_plx
"-" TARGET_FMT_plx
" (prio %d, %s): %s%s",
2919 memory_region_type((MemoryRegion
*)mr
),
2920 memory_region_name(mr
),
2921 mr
->enabled
? "" : " [disabled]");
2923 mtree_print_mr_owner(mon_printf
, f
, mr
);
2926 mon_printf(f
, "\n");
2928 QTAILQ_INIT(&submr_print_queue
);
2930 QTAILQ_FOREACH(submr
, &mr
->subregions
, subregions_link
) {
2931 new_ml
= g_new(MemoryRegionList
, 1);
2933 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2934 if (new_ml
->mr
->addr
< ml
->mr
->addr
||
2935 (new_ml
->mr
->addr
== ml
->mr
->addr
&&
2936 new_ml
->mr
->priority
> ml
->mr
->priority
)) {
2937 QTAILQ_INSERT_BEFORE(ml
, new_ml
, mrqueue
);
2943 QTAILQ_INSERT_TAIL(&submr_print_queue
, new_ml
, mrqueue
);
2947 QTAILQ_FOREACH(ml
, &submr_print_queue
, mrqueue
) {
2948 mtree_print_mr(mon_printf
, f
, ml
->mr
, level
+ 1, cur_start
,
2949 alias_print_queue
, owner
);
2952 QTAILQ_FOREACH_SAFE(ml
, &submr_print_queue
, mrqueue
, next_ml
) {
2957 struct FlatViewInfo
{
2958 fprintf_function mon_printf
;
2965 static void mtree_print_flatview(gpointer key
, gpointer value
,
2968 FlatView
*view
= key
;
2969 GArray
*fv_address_spaces
= value
;
2970 struct FlatViewInfo
*fvi
= user_data
;
2971 fprintf_function p
= fvi
->mon_printf
;
2973 FlatRange
*range
= &view
->ranges
[0];
2979 p(f
, "FlatView #%d\n", fvi
->counter
);
2982 for (i
= 0; i
< fv_address_spaces
->len
; ++i
) {
2983 as
= g_array_index(fv_address_spaces
, AddressSpace
*, i
);
2984 p(f
, " AS \"%s\", root: %s", as
->name
, memory_region_name(as
->root
));
2985 if (as
->root
->alias
) {
2986 p(f
, ", alias %s", memory_region_name(as
->root
->alias
));
2991 p(f
, " Root memory region: %s\n",
2992 view
->root
? memory_region_name(view
->root
) : "(none)");
2995 p(f
, MTREE_INDENT
"No rendered FlatView\n\n");
3001 if (range
->offset_in_region
) {
3002 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
3003 TARGET_FMT_plx
" (prio %d, %s): %s @" TARGET_FMT_plx
,
3004 int128_get64(range
->addr
.start
),
3005 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
3007 range
->readonly
? "rom" : memory_region_type(mr
),
3008 memory_region_name(mr
),
3009 range
->offset_in_region
);
3011 p(f
, MTREE_INDENT TARGET_FMT_plx
"-"
3012 TARGET_FMT_plx
" (prio %d, %s): %s",
3013 int128_get64(range
->addr
.start
),
3014 int128_get64(range
->addr
.start
) + MR_SIZE(range
->addr
.size
),
3016 range
->readonly
? "rom" : memory_region_type(mr
),
3017 memory_region_name(mr
));
3020 mtree_print_mr_owner(p
, f
, mr
);
3026 #if !defined(CONFIG_USER_ONLY)
3027 if (fvi
->dispatch_tree
&& view
->root
) {
3028 mtree_print_dispatch(p
, f
, view
->dispatch
, view
->root
);
3035 static gboolean
mtree_info_flatview_free(gpointer key
, gpointer value
,
3038 FlatView
*view
= key
;
3039 GArray
*fv_address_spaces
= value
;
3041 g_array_unref(fv_address_spaces
);
3042 flatview_unref(view
);
3047 void mtree_info(fprintf_function mon_printf
, void *f
, bool flatview
,
3048 bool dispatch_tree
, bool owner
)
3050 MemoryRegionListHead ml_head
;
3051 MemoryRegionList
*ml
, *ml2
;
3056 struct FlatViewInfo fvi
= {
3057 .mon_printf
= mon_printf
,
3060 .dispatch_tree
= dispatch_tree
,
3063 GArray
*fv_address_spaces
;
3064 GHashTable
*views
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
3066 /* Gather all FVs in one table */
3067 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3068 view
= address_space_get_flatview(as
);
3070 fv_address_spaces
= g_hash_table_lookup(views
, view
);
3071 if (!fv_address_spaces
) {
3072 fv_address_spaces
= g_array_new(false, false, sizeof(as
));
3073 g_hash_table_insert(views
, view
, fv_address_spaces
);
3076 g_array_append_val(fv_address_spaces
, as
);
3080 g_hash_table_foreach(views
, mtree_print_flatview
, &fvi
);
3083 g_hash_table_foreach_remove(views
, mtree_info_flatview_free
, 0);
3084 g_hash_table_unref(views
);
3089 QTAILQ_INIT(&ml_head
);
3091 QTAILQ_FOREACH(as
, &address_spaces
, address_spaces_link
) {
3092 mon_printf(f
, "address-space: %s\n", as
->name
);
3093 mtree_print_mr(mon_printf
, f
, as
->root
, 1, 0, &ml_head
, owner
);
3094 mon_printf(f
, "\n");
3097 /* print aliased regions */
3098 QTAILQ_FOREACH(ml
, &ml_head
, mrqueue
) {
3099 mon_printf(f
, "memory-region: %s\n", memory_region_name(ml
->mr
));
3100 mtree_print_mr(mon_printf
, f
, ml
->mr
, 1, 0, &ml_head
, owner
);
3101 mon_printf(f
, "\n");
3104 QTAILQ_FOREACH_SAFE(ml
, &ml_head
, mrqueue
, ml2
) {
3109 void memory_region_init_ram(MemoryRegion
*mr
,
3110 struct Object
*owner
,
3115 DeviceState
*owner_dev
;
3118 memory_region_init_ram_nomigrate(mr
, owner
, name
, size
, &err
);
3120 error_propagate(errp
, err
);
3123 /* This will assert if owner is neither NULL nor a DeviceState.
3124 * We only want the owner here for the purposes of defining a
3125 * unique name for migration. TODO: Ideally we should implement
3126 * a naming scheme for Objects which are not DeviceStates, in
3127 * which case we can relax this restriction.
3129 owner_dev
= DEVICE(owner
);
3130 vmstate_register_ram(mr
, owner_dev
);
3133 void memory_region_init_rom(MemoryRegion
*mr
,
3134 struct Object
*owner
,
3139 DeviceState
*owner_dev
;
3142 memory_region_init_rom_nomigrate(mr
, owner
, name
, size
, &err
);
3144 error_propagate(errp
, err
);
3147 /* This will assert if owner is neither NULL nor a DeviceState.
3148 * We only want the owner here for the purposes of defining a
3149 * unique name for migration. TODO: Ideally we should implement
3150 * a naming scheme for Objects which are not DeviceStates, in
3151 * which case we can relax this restriction.
3153 owner_dev
= DEVICE(owner
);
3154 vmstate_register_ram(mr
, owner_dev
);
3157 void memory_region_init_rom_device(MemoryRegion
*mr
,
3158 struct Object
*owner
,
3159 const MemoryRegionOps
*ops
,
3165 DeviceState
*owner_dev
;
3168 memory_region_init_rom_device_nomigrate(mr
, owner
, ops
, opaque
,
3171 error_propagate(errp
, err
);
3174 /* This will assert if owner is neither NULL nor a DeviceState.
3175 * We only want the owner here for the purposes of defining a
3176 * unique name for migration. TODO: Ideally we should implement
3177 * a naming scheme for Objects which are not DeviceStates, in
3178 * which case we can relax this restriction.
3180 owner_dev
= DEVICE(owner
);
3181 vmstate_register_ram(mr
, owner_dev
);
3184 static const TypeInfo memory_region_info
= {
3185 .parent
= TYPE_OBJECT
,
3186 .name
= TYPE_MEMORY_REGION
,
3187 .instance_size
= sizeof(MemoryRegion
),
3188 .instance_init
= memory_region_initfn
,
3189 .instance_finalize
= memory_region_finalize
,
3192 static const TypeInfo iommu_memory_region_info
= {
3193 .parent
= TYPE_MEMORY_REGION
,
3194 .name
= TYPE_IOMMU_MEMORY_REGION
,
3195 .class_size
= sizeof(IOMMUMemoryRegionClass
),
3196 .instance_size
= sizeof(IOMMUMemoryRegion
),
3197 .instance_init
= iommu_memory_region_initfn
,
3201 static void memory_register_types(void)
3203 type_register_static(&memory_region_info
);
3204 type_register_static(&iommu_memory_region_info
);
3207 type_init(memory_register_types
)