]> git.ipfire.org Git - thirdparty/qemu.git/blob - memory.c
memory: Single byte swap along the I/O path
[thirdparty/qemu.git] / memory.c
1 /*
2 * Physical memory management
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
14 */
15
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "cpu.h"
19 #include "exec/memory.h"
20 #include "exec/address-spaces.h"
21 #include "qapi/visitor.h"
22 #include "qemu/bitops.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
25 #include "qemu/qemu-print.h"
26 #include "qom/object.h"
27 #include "trace-root.h"
28
29 #include "exec/memory-internal.h"
30 #include "exec/ram_addr.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/runstate.h"
33 #include "sysemu/tcg.h"
34 #include "sysemu/accel.h"
35 #include "hw/boards.h"
36 #include "migration/vmstate.h"
37
38 //#define DEBUG_UNASSIGNED
39
40 static unsigned memory_region_transaction_depth;
41 static bool memory_region_update_pending;
42 static bool ioeventfd_update_pending;
43 bool global_dirty_log;
44
45 static QTAILQ_HEAD(, MemoryListener) memory_listeners
46 = QTAILQ_HEAD_INITIALIZER(memory_listeners);
47
48 static QTAILQ_HEAD(, AddressSpace) address_spaces
49 = QTAILQ_HEAD_INITIALIZER(address_spaces);
50
51 static GHashTable *flat_views;
52
53 typedef struct AddrRange AddrRange;
54
55 /*
56 * Note that signed integers are needed for negative offsetting in aliases
57 * (large MemoryRegion::alias_offset).
58 */
59 struct AddrRange {
60 Int128 start;
61 Int128 size;
62 };
63
64 static AddrRange addrrange_make(Int128 start, Int128 size)
65 {
66 return (AddrRange) { start, size };
67 }
68
69 static bool addrrange_equal(AddrRange r1, AddrRange r2)
70 {
71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
72 }
73
74 static Int128 addrrange_end(AddrRange r)
75 {
76 return int128_add(r.start, r.size);
77 }
78
79 static AddrRange addrrange_shift(AddrRange range, Int128 delta)
80 {
81 int128_addto(&range.start, delta);
82 return range;
83 }
84
85 static bool addrrange_contains(AddrRange range, Int128 addr)
86 {
87 return int128_ge(addr, range.start)
88 && int128_lt(addr, addrrange_end(range));
89 }
90
91 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
92 {
93 return addrrange_contains(r1, r2.start)
94 || addrrange_contains(r2, r1.start);
95 }
96
97 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
98 {
99 Int128 start = int128_max(r1.start, r2.start);
100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
101 return addrrange_make(start, int128_sub(end, start));
102 }
103
104 enum ListenerDirection { Forward, Reverse };
105
106 #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \
107 do { \
108 MemoryListener *_listener; \
109 \
110 switch (_direction) { \
111 case Forward: \
112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \
113 if (_listener->_callback) { \
114 _listener->_callback(_listener, ##_args); \
115 } \
116 } \
117 break; \
118 case Reverse: \
119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \
120 if (_listener->_callback) { \
121 _listener->_callback(_listener, ##_args); \
122 } \
123 } \
124 break; \
125 default: \
126 abort(); \
127 } \
128 } while (0)
129
130 #define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \
131 do { \
132 MemoryListener *_listener; \
133 \
134 switch (_direction) { \
135 case Forward: \
136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \
137 if (_listener->_callback) { \
138 _listener->_callback(_listener, _section, ##_args); \
139 } \
140 } \
141 break; \
142 case Reverse: \
143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \
144 if (_listener->_callback) { \
145 _listener->_callback(_listener, _section, ##_args); \
146 } \
147 } \
148 break; \
149 default: \
150 abort(); \
151 } \
152 } while (0)
153
154 /* No need to ref/unref .mr, the FlatRange keeps it alive. */
155 #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \
156 do { \
157 MemoryRegionSection mrs = section_from_flat_range(fr, \
158 address_space_to_flatview(as)); \
159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \
160 } while(0)
161
162 struct CoalescedMemoryRange {
163 AddrRange addr;
164 QTAILQ_ENTRY(CoalescedMemoryRange) link;
165 };
166
167 struct MemoryRegionIoeventfd {
168 AddrRange addr;
169 bool match_data;
170 uint64_t data;
171 EventNotifier *e;
172 };
173
174 static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a,
175 MemoryRegionIoeventfd *b)
176 {
177 if (int128_lt(a->addr.start, b->addr.start)) {
178 return true;
179 } else if (int128_gt(a->addr.start, b->addr.start)) {
180 return false;
181 } else if (int128_lt(a->addr.size, b->addr.size)) {
182 return true;
183 } else if (int128_gt(a->addr.size, b->addr.size)) {
184 return false;
185 } else if (a->match_data < b->match_data) {
186 return true;
187 } else if (a->match_data > b->match_data) {
188 return false;
189 } else if (a->match_data) {
190 if (a->data < b->data) {
191 return true;
192 } else if (a->data > b->data) {
193 return false;
194 }
195 }
196 if (a->e < b->e) {
197 return true;
198 } else if (a->e > b->e) {
199 return false;
200 }
201 return false;
202 }
203
204 static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a,
205 MemoryRegionIoeventfd *b)
206 {
207 return !memory_region_ioeventfd_before(a, b)
208 && !memory_region_ioeventfd_before(b, a);
209 }
210
211 /* Range of memory in the global map. Addresses are absolute. */
212 struct FlatRange {
213 MemoryRegion *mr;
214 hwaddr offset_in_region;
215 AddrRange addr;
216 uint8_t dirty_log_mask;
217 bool romd_mode;
218 bool readonly;
219 bool nonvolatile;
220 };
221
222 #define FOR_EACH_FLAT_RANGE(var, view) \
223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
224
225 static inline MemoryRegionSection
226 section_from_flat_range(FlatRange *fr, FlatView *fv)
227 {
228 return (MemoryRegionSection) {
229 .mr = fr->mr,
230 .fv = fv,
231 .offset_within_region = fr->offset_in_region,
232 .size = fr->addr.size,
233 .offset_within_address_space = int128_get64(fr->addr.start),
234 .readonly = fr->readonly,
235 .nonvolatile = fr->nonvolatile,
236 };
237 }
238
239 static bool flatrange_equal(FlatRange *a, FlatRange *b)
240 {
241 return a->mr == b->mr
242 && addrrange_equal(a->addr, b->addr)
243 && a->offset_in_region == b->offset_in_region
244 && a->romd_mode == b->romd_mode
245 && a->readonly == b->readonly
246 && a->nonvolatile == b->nonvolatile;
247 }
248
249 static FlatView *flatview_new(MemoryRegion *mr_root)
250 {
251 FlatView *view;
252
253 view = g_new0(FlatView, 1);
254 view->ref = 1;
255 view->root = mr_root;
256 memory_region_ref(mr_root);
257 trace_flatview_new(view, mr_root);
258
259 return view;
260 }
261
262 /* Insert a range into a given position. Caller is responsible for maintaining
263 * sorting order.
264 */
265 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
266 {
267 if (view->nr == view->nr_allocated) {
268 view->nr_allocated = MAX(2 * view->nr, 10);
269 view->ranges = g_realloc(view->ranges,
270 view->nr_allocated * sizeof(*view->ranges));
271 }
272 memmove(view->ranges + pos + 1, view->ranges + pos,
273 (view->nr - pos) * sizeof(FlatRange));
274 view->ranges[pos] = *range;
275 memory_region_ref(range->mr);
276 ++view->nr;
277 }
278
279 static void flatview_destroy(FlatView *view)
280 {
281 int i;
282
283 trace_flatview_destroy(view, view->root);
284 if (view->dispatch) {
285 address_space_dispatch_free(view->dispatch);
286 }
287 for (i = 0; i < view->nr; i++) {
288 memory_region_unref(view->ranges[i].mr);
289 }
290 g_free(view->ranges);
291 memory_region_unref(view->root);
292 g_free(view);
293 }
294
295 static bool flatview_ref(FlatView *view)
296 {
297 return atomic_fetch_inc_nonzero(&view->ref) > 0;
298 }
299
300 void flatview_unref(FlatView *view)
301 {
302 if (atomic_fetch_dec(&view->ref) == 1) {
303 trace_flatview_destroy_rcu(view, view->root);
304 assert(view->root);
305 call_rcu(view, flatview_destroy, rcu);
306 }
307 }
308
309 static bool can_merge(FlatRange *r1, FlatRange *r2)
310 {
311 return int128_eq(addrrange_end(r1->addr), r2->addr.start)
312 && r1->mr == r2->mr
313 && int128_eq(int128_add(int128_make64(r1->offset_in_region),
314 r1->addr.size),
315 int128_make64(r2->offset_in_region))
316 && r1->dirty_log_mask == r2->dirty_log_mask
317 && r1->romd_mode == r2->romd_mode
318 && r1->readonly == r2->readonly
319 && r1->nonvolatile == r2->nonvolatile;
320 }
321
322 /* Attempt to simplify a view by merging adjacent ranges */
323 static void flatview_simplify(FlatView *view)
324 {
325 unsigned i, j, k;
326
327 i = 0;
328 while (i < view->nr) {
329 j = i + 1;
330 while (j < view->nr
331 && can_merge(&view->ranges[j-1], &view->ranges[j])) {
332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
333 ++j;
334 }
335 ++i;
336 for (k = i; k < j; k++) {
337 memory_region_unref(view->ranges[k].mr);
338 }
339 memmove(&view->ranges[i], &view->ranges[j],
340 (view->nr - j) * sizeof(view->ranges[j]));
341 view->nr -= j - i;
342 }
343 }
344
345 static bool memory_region_big_endian(MemoryRegion *mr)
346 {
347 #ifdef TARGET_WORDS_BIGENDIAN
348 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
349 #else
350 return mr->ops->endianness == DEVICE_BIG_ENDIAN;
351 #endif
352 }
353
354 static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op)
355 {
356 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) {
357 switch (op & MO_SIZE) {
358 case MO_8:
359 break;
360 case MO_16:
361 *data = bswap16(*data);
362 break;
363 case MO_32:
364 *data = bswap32(*data);
365 break;
366 case MO_64:
367 *data = bswap64(*data);
368 break;
369 default:
370 g_assert_not_reached();
371 }
372 }
373 }
374
375 static inline void memory_region_shift_read_access(uint64_t *value,
376 signed shift,
377 uint64_t mask,
378 uint64_t tmp)
379 {
380 if (shift >= 0) {
381 *value |= (tmp & mask) << shift;
382 } else {
383 *value |= (tmp & mask) >> -shift;
384 }
385 }
386
387 static inline uint64_t memory_region_shift_write_access(uint64_t *value,
388 signed shift,
389 uint64_t mask)
390 {
391 uint64_t tmp;
392
393 if (shift >= 0) {
394 tmp = (*value >> shift) & mask;
395 } else {
396 tmp = (*value << -shift) & mask;
397 }
398
399 return tmp;
400 }
401
402 static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset)
403 {
404 MemoryRegion *root;
405 hwaddr abs_addr = offset;
406
407 abs_addr += mr->addr;
408 for (root = mr; root->container; ) {
409 root = root->container;
410 abs_addr += root->addr;
411 }
412
413 return abs_addr;
414 }
415
416 static int get_cpu_index(void)
417 {
418 if (current_cpu) {
419 return current_cpu->cpu_index;
420 }
421 return -1;
422 }
423
424 static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
425 hwaddr addr,
426 uint64_t *value,
427 unsigned size,
428 signed shift,
429 uint64_t mask,
430 MemTxAttrs attrs)
431 {
432 uint64_t tmp;
433
434 tmp = mr->ops->read(mr->opaque, addr, size);
435 if (mr->subpage) {
436 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
437 } else if (mr == &io_mem_notdirty) {
438 /* Accesses to code which has previously been translated into a TB show
439 * up in the MMIO path, as accesses to the io_mem_notdirty
440 * MemoryRegion. */
441 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
442 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
443 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
444 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
445 }
446 memory_region_shift_read_access(value, shift, mask, tmp);
447 return MEMTX_OK;
448 }
449
450 static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
451 hwaddr addr,
452 uint64_t *value,
453 unsigned size,
454 signed shift,
455 uint64_t mask,
456 MemTxAttrs attrs)
457 {
458 uint64_t tmp = 0;
459 MemTxResult r;
460
461 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs);
462 if (mr->subpage) {
463 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size);
464 } else if (mr == &io_mem_notdirty) {
465 /* Accesses to code which has previously been translated into a TB show
466 * up in the MMIO path, as accesses to the io_mem_notdirty
467 * MemoryRegion. */
468 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size);
469 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) {
470 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
471 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size);
472 }
473 memory_region_shift_read_access(value, shift, mask, tmp);
474 return r;
475 }
476
477 static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
478 hwaddr addr,
479 uint64_t *value,
480 unsigned size,
481 signed shift,
482 uint64_t mask,
483 MemTxAttrs attrs)
484 {
485 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
486
487 if (mr->subpage) {
488 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
489 } else if (mr == &io_mem_notdirty) {
490 /* Accesses to code which has previously been translated into a TB show
491 * up in the MMIO path, as accesses to the io_mem_notdirty
492 * MemoryRegion. */
493 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
494 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
495 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
496 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
497 }
498 mr->ops->write(mr->opaque, addr, tmp, size);
499 return MEMTX_OK;
500 }
501
502 static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
503 hwaddr addr,
504 uint64_t *value,
505 unsigned size,
506 signed shift,
507 uint64_t mask,
508 MemTxAttrs attrs)
509 {
510 uint64_t tmp = memory_region_shift_write_access(value, shift, mask);
511
512 if (mr->subpage) {
513 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size);
514 } else if (mr == &io_mem_notdirty) {
515 /* Accesses to code which has previously been translated into a TB show
516 * up in the MMIO path, as accesses to the io_mem_notdirty
517 * MemoryRegion. */
518 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size);
519 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) {
520 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
521 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size);
522 }
523 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs);
524 }
525
526 static MemTxResult access_with_adjusted_size(hwaddr addr,
527 uint64_t *value,
528 unsigned size,
529 unsigned access_size_min,
530 unsigned access_size_max,
531 MemTxResult (*access_fn)
532 (MemoryRegion *mr,
533 hwaddr addr,
534 uint64_t *value,
535 unsigned size,
536 signed shift,
537 uint64_t mask,
538 MemTxAttrs attrs),
539 MemoryRegion *mr,
540 MemTxAttrs attrs)
541 {
542 uint64_t access_mask;
543 unsigned access_size;
544 unsigned i;
545 MemTxResult r = MEMTX_OK;
546
547 if (!access_size_min) {
548 access_size_min = 1;
549 }
550 if (!access_size_max) {
551 access_size_max = 4;
552 }
553
554 /* FIXME: support unaligned access? */
555 access_size = MAX(MIN(size, access_size_max), access_size_min);
556 access_mask = MAKE_64BIT_MASK(0, access_size * 8);
557 if (memory_region_big_endian(mr)) {
558 for (i = 0; i < size; i += access_size) {
559 r |= access_fn(mr, addr + i, value, access_size,
560 (size - access_size - i) * 8, access_mask, attrs);
561 }
562 } else {
563 for (i = 0; i < size; i += access_size) {
564 r |= access_fn(mr, addr + i, value, access_size, i * 8,
565 access_mask, attrs);
566 }
567 }
568 return r;
569 }
570
571 static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
572 {
573 AddressSpace *as;
574
575 while (mr->container) {
576 mr = mr->container;
577 }
578 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
579 if (mr == as->root) {
580 return as;
581 }
582 }
583 return NULL;
584 }
585
586 /* Render a memory region into the global view. Ranges in @view obscure
587 * ranges in @mr.
588 */
589 static void render_memory_region(FlatView *view,
590 MemoryRegion *mr,
591 Int128 base,
592 AddrRange clip,
593 bool readonly,
594 bool nonvolatile)
595 {
596 MemoryRegion *subregion;
597 unsigned i;
598 hwaddr offset_in_region;
599 Int128 remain;
600 Int128 now;
601 FlatRange fr;
602 AddrRange tmp;
603
604 if (!mr->enabled) {
605 return;
606 }
607
608 int128_addto(&base, int128_make64(mr->addr));
609 readonly |= mr->readonly;
610 nonvolatile |= mr->nonvolatile;
611
612 tmp = addrrange_make(base, mr->size);
613
614 if (!addrrange_intersects(tmp, clip)) {
615 return;
616 }
617
618 clip = addrrange_intersection(tmp, clip);
619
620 if (mr->alias) {
621 int128_subfrom(&base, int128_make64(mr->alias->addr));
622 int128_subfrom(&base, int128_make64(mr->alias_offset));
623 render_memory_region(view, mr->alias, base, clip,
624 readonly, nonvolatile);
625 return;
626 }
627
628 /* Render subregions in priority order. */
629 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
630 render_memory_region(view, subregion, base, clip,
631 readonly, nonvolatile);
632 }
633
634 if (!mr->terminates) {
635 return;
636 }
637
638 offset_in_region = int128_get64(int128_sub(clip.start, base));
639 base = clip.start;
640 remain = clip.size;
641
642 fr.mr = mr;
643 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr);
644 fr.romd_mode = mr->romd_mode;
645 fr.readonly = readonly;
646 fr.nonvolatile = nonvolatile;
647
648 /* Render the region itself into any gaps left by the current view. */
649 for (i = 0; i < view->nr && int128_nz(remain); ++i) {
650 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
651 continue;
652 }
653 if (int128_lt(base, view->ranges[i].addr.start)) {
654 now = int128_min(remain,
655 int128_sub(view->ranges[i].addr.start, base));
656 fr.offset_in_region = offset_in_region;
657 fr.addr = addrrange_make(base, now);
658 flatview_insert(view, i, &fr);
659 ++i;
660 int128_addto(&base, now);
661 offset_in_region += int128_get64(now);
662 int128_subfrom(&remain, now);
663 }
664 now = int128_sub(int128_min(int128_add(base, remain),
665 addrrange_end(view->ranges[i].addr)),
666 base);
667 int128_addto(&base, now);
668 offset_in_region += int128_get64(now);
669 int128_subfrom(&remain, now);
670 }
671 if (int128_nz(remain)) {
672 fr.offset_in_region = offset_in_region;
673 fr.addr = addrrange_make(base, remain);
674 flatview_insert(view, i, &fr);
675 }
676 }
677
678 static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr)
679 {
680 while (mr->enabled) {
681 if (mr->alias) {
682 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) {
683 /* The alias is included in its entirety. Use it as
684 * the "real" root, so that we can share more FlatViews.
685 */
686 mr = mr->alias;
687 continue;
688 }
689 } else if (!mr->terminates) {
690 unsigned int found = 0;
691 MemoryRegion *child, *next = NULL;
692 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) {
693 if (child->enabled) {
694 if (++found > 1) {
695 next = NULL;
696 break;
697 }
698 if (!child->addr && int128_ge(mr->size, child->size)) {
699 /* A child is included in its entirety. If it's the only
700 * enabled one, use it in the hope of finding an alias down the
701 * way. This will also let us share FlatViews.
702 */
703 next = child;
704 }
705 }
706 }
707 if (found == 0) {
708 return NULL;
709 }
710 if (next) {
711 mr = next;
712 continue;
713 }
714 }
715
716 return mr;
717 }
718
719 return NULL;
720 }
721
722 /* Render a memory topology into a list of disjoint absolute ranges. */
723 static FlatView *generate_memory_topology(MemoryRegion *mr)
724 {
725 int i;
726 FlatView *view;
727
728 view = flatview_new(mr);
729
730 if (mr) {
731 render_memory_region(view, mr, int128_zero(),
732 addrrange_make(int128_zero(), int128_2_64()),
733 false, false);
734 }
735 flatview_simplify(view);
736
737 view->dispatch = address_space_dispatch_new(view);
738 for (i = 0; i < view->nr; i++) {
739 MemoryRegionSection mrs =
740 section_from_flat_range(&view->ranges[i], view);
741 flatview_add_to_dispatch(view, &mrs);
742 }
743 address_space_dispatch_compact(view->dispatch);
744 g_hash_table_replace(flat_views, mr, view);
745
746 return view;
747 }
748
749 static void address_space_add_del_ioeventfds(AddressSpace *as,
750 MemoryRegionIoeventfd *fds_new,
751 unsigned fds_new_nb,
752 MemoryRegionIoeventfd *fds_old,
753 unsigned fds_old_nb)
754 {
755 unsigned iold, inew;
756 MemoryRegionIoeventfd *fd;
757 MemoryRegionSection section;
758
759 /* Generate a symmetric difference of the old and new fd sets, adding
760 * and deleting as necessary.
761 */
762
763 iold = inew = 0;
764 while (iold < fds_old_nb || inew < fds_new_nb) {
765 if (iold < fds_old_nb
766 && (inew == fds_new_nb
767 || memory_region_ioeventfd_before(&fds_old[iold],
768 &fds_new[inew]))) {
769 fd = &fds_old[iold];
770 section = (MemoryRegionSection) {
771 .fv = address_space_to_flatview(as),
772 .offset_within_address_space = int128_get64(fd->addr.start),
773 .size = fd->addr.size,
774 };
775 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section,
776 fd->match_data, fd->data, fd->e);
777 ++iold;
778 } else if (inew < fds_new_nb
779 && (iold == fds_old_nb
780 || memory_region_ioeventfd_before(&fds_new[inew],
781 &fds_old[iold]))) {
782 fd = &fds_new[inew];
783 section = (MemoryRegionSection) {
784 .fv = address_space_to_flatview(as),
785 .offset_within_address_space = int128_get64(fd->addr.start),
786 .size = fd->addr.size,
787 };
788 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section,
789 fd->match_data, fd->data, fd->e);
790 ++inew;
791 } else {
792 ++iold;
793 ++inew;
794 }
795 }
796 }
797
798 FlatView *address_space_get_flatview(AddressSpace *as)
799 {
800 FlatView *view;
801
802 rcu_read_lock();
803 do {
804 view = address_space_to_flatview(as);
805 /* If somebody has replaced as->current_map concurrently,
806 * flatview_ref returns false.
807 */
808 } while (!flatview_ref(view));
809 rcu_read_unlock();
810 return view;
811 }
812
813 static void address_space_update_ioeventfds(AddressSpace *as)
814 {
815 FlatView *view;
816 FlatRange *fr;
817 unsigned ioeventfd_nb = 0;
818 MemoryRegionIoeventfd *ioeventfds = NULL;
819 AddrRange tmp;
820 unsigned i;
821
822 view = address_space_get_flatview(as);
823 FOR_EACH_FLAT_RANGE(fr, view) {
824 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
825 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
826 int128_sub(fr->addr.start,
827 int128_make64(fr->offset_in_region)));
828 if (addrrange_intersects(fr->addr, tmp)) {
829 ++ioeventfd_nb;
830 ioeventfds = g_realloc(ioeventfds,
831 ioeventfd_nb * sizeof(*ioeventfds));
832 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
833 ioeventfds[ioeventfd_nb-1].addr = tmp;
834 }
835 }
836 }
837
838 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
839 as->ioeventfds, as->ioeventfd_nb);
840
841 g_free(as->ioeventfds);
842 as->ioeventfds = ioeventfds;
843 as->ioeventfd_nb = ioeventfd_nb;
844 flatview_unref(view);
845 }
846
847 /*
848 * Notify the memory listeners about the coalesced IO change events of
849 * range `cmr'. Only the part that has intersection of the specified
850 * FlatRange will be sent.
851 */
852 static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as,
853 CoalescedMemoryRange *cmr, bool add)
854 {
855 AddrRange tmp;
856
857 tmp = addrrange_shift(cmr->addr,
858 int128_sub(fr->addr.start,
859 int128_make64(fr->offset_in_region)));
860 if (!addrrange_intersects(tmp, fr->addr)) {
861 return;
862 }
863 tmp = addrrange_intersection(tmp, fr->addr);
864
865 if (add) {
866 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add,
867 int128_get64(tmp.start),
868 int128_get64(tmp.size));
869 } else {
870 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del,
871 int128_get64(tmp.start),
872 int128_get64(tmp.size));
873 }
874 }
875
876 static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as)
877 {
878 CoalescedMemoryRange *cmr;
879
880 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) {
881 flat_range_coalesced_io_notify(fr, as, cmr, false);
882 }
883 }
884
885 static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
886 {
887 MemoryRegion *mr = fr->mr;
888 CoalescedMemoryRange *cmr;
889
890 if (QTAILQ_EMPTY(&mr->coalesced)) {
891 return;
892 }
893
894 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
895 flat_range_coalesced_io_notify(fr, as, cmr, true);
896 }
897 }
898
899 static void address_space_update_topology_pass(AddressSpace *as,
900 const FlatView *old_view,
901 const FlatView *new_view,
902 bool adding)
903 {
904 unsigned iold, inew;
905 FlatRange *frold, *frnew;
906
907 /* Generate a symmetric difference of the old and new memory maps.
908 * Kill ranges in the old map, and instantiate ranges in the new map.
909 */
910 iold = inew = 0;
911 while (iold < old_view->nr || inew < new_view->nr) {
912 if (iold < old_view->nr) {
913 frold = &old_view->ranges[iold];
914 } else {
915 frold = NULL;
916 }
917 if (inew < new_view->nr) {
918 frnew = &new_view->ranges[inew];
919 } else {
920 frnew = NULL;
921 }
922
923 if (frold
924 && (!frnew
925 || int128_lt(frold->addr.start, frnew->addr.start)
926 || (int128_eq(frold->addr.start, frnew->addr.start)
927 && !flatrange_equal(frold, frnew)))) {
928 /* In old but not in new, or in both but attributes changed. */
929
930 if (!adding) {
931 flat_range_coalesced_io_del(frold, as);
932 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
933 }
934
935 ++iold;
936 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
937 /* In both and unchanged (except logging may have changed) */
938
939 if (adding) {
940 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
941 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) {
942 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start,
943 frold->dirty_log_mask,
944 frnew->dirty_log_mask);
945 }
946 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) {
947 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop,
948 frold->dirty_log_mask,
949 frnew->dirty_log_mask);
950 }
951 }
952
953 ++iold;
954 ++inew;
955 } else {
956 /* In new */
957
958 if (adding) {
959 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
960 flat_range_coalesced_io_add(frnew, as);
961 }
962
963 ++inew;
964 }
965 }
966 }
967
968 static void flatviews_init(void)
969 {
970 static FlatView *empty_view;
971
972 if (flat_views) {
973 return;
974 }
975
976 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
977 (GDestroyNotify) flatview_unref);
978 if (!empty_view) {
979 empty_view = generate_memory_topology(NULL);
980 /* We keep it alive forever in the global variable. */
981 flatview_ref(empty_view);
982 } else {
983 g_hash_table_replace(flat_views, NULL, empty_view);
984 flatview_ref(empty_view);
985 }
986 }
987
988 static void flatviews_reset(void)
989 {
990 AddressSpace *as;
991
992 if (flat_views) {
993 g_hash_table_unref(flat_views);
994 flat_views = NULL;
995 }
996 flatviews_init();
997
998 /* Render unique FVs */
999 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1000 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1001
1002 if (g_hash_table_lookup(flat_views, physmr)) {
1003 continue;
1004 }
1005
1006 generate_memory_topology(physmr);
1007 }
1008 }
1009
1010 static void address_space_set_flatview(AddressSpace *as)
1011 {
1012 FlatView *old_view = address_space_to_flatview(as);
1013 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1014 FlatView *new_view = g_hash_table_lookup(flat_views, physmr);
1015
1016 assert(new_view);
1017
1018 if (old_view == new_view) {
1019 return;
1020 }
1021
1022 if (old_view) {
1023 flatview_ref(old_view);
1024 }
1025
1026 flatview_ref(new_view);
1027
1028 if (!QTAILQ_EMPTY(&as->listeners)) {
1029 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view;
1030
1031 if (!old_view2) {
1032 old_view2 = &tmpview;
1033 }
1034 address_space_update_topology_pass(as, old_view2, new_view, false);
1035 address_space_update_topology_pass(as, old_view2, new_view, true);
1036 }
1037
1038 /* Writes are protected by the BQL. */
1039 atomic_rcu_set(&as->current_map, new_view);
1040 if (old_view) {
1041 flatview_unref(old_view);
1042 }
1043
1044 /* Note that all the old MemoryRegions are still alive up to this
1045 * point. This relieves most MemoryListeners from the need to
1046 * ref/unref the MemoryRegions they get---unless they use them
1047 * outside the iothread mutex, in which case precise reference
1048 * counting is necessary.
1049 */
1050 if (old_view) {
1051 flatview_unref(old_view);
1052 }
1053 }
1054
1055 static void address_space_update_topology(AddressSpace *as)
1056 {
1057 MemoryRegion *physmr = memory_region_get_flatview_root(as->root);
1058
1059 flatviews_init();
1060 if (!g_hash_table_lookup(flat_views, physmr)) {
1061 generate_memory_topology(physmr);
1062 }
1063 address_space_set_flatview(as);
1064 }
1065
1066 void memory_region_transaction_begin(void)
1067 {
1068 qemu_flush_coalesced_mmio_buffer();
1069 ++memory_region_transaction_depth;
1070 }
1071
1072 void memory_region_transaction_commit(void)
1073 {
1074 AddressSpace *as;
1075
1076 assert(memory_region_transaction_depth);
1077 assert(qemu_mutex_iothread_locked());
1078
1079 --memory_region_transaction_depth;
1080 if (!memory_region_transaction_depth) {
1081 if (memory_region_update_pending) {
1082 flatviews_reset();
1083
1084 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
1085
1086 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1087 address_space_set_flatview(as);
1088 address_space_update_ioeventfds(as);
1089 }
1090 memory_region_update_pending = false;
1091 ioeventfd_update_pending = false;
1092 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
1093 } else if (ioeventfd_update_pending) {
1094 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
1095 address_space_update_ioeventfds(as);
1096 }
1097 ioeventfd_update_pending = false;
1098 }
1099 }
1100 }
1101
1102 static void memory_region_destructor_none(MemoryRegion *mr)
1103 {
1104 }
1105
1106 static void memory_region_destructor_ram(MemoryRegion *mr)
1107 {
1108 qemu_ram_free(mr->ram_block);
1109 }
1110
1111 static bool memory_region_need_escape(char c)
1112 {
1113 return c == '/' || c == '[' || c == '\\' || c == ']';
1114 }
1115
1116 static char *memory_region_escape_name(const char *name)
1117 {
1118 const char *p;
1119 char *escaped, *q;
1120 uint8_t c;
1121 size_t bytes = 0;
1122
1123 for (p = name; *p; p++) {
1124 bytes += memory_region_need_escape(*p) ? 4 : 1;
1125 }
1126 if (bytes == p - name) {
1127 return g_memdup(name, bytes + 1);
1128 }
1129
1130 escaped = g_malloc(bytes + 1);
1131 for (p = name, q = escaped; *p; p++) {
1132 c = *p;
1133 if (unlikely(memory_region_need_escape(c))) {
1134 *q++ = '\\';
1135 *q++ = 'x';
1136 *q++ = "0123456789abcdef"[c >> 4];
1137 c = "0123456789abcdef"[c & 15];
1138 }
1139 *q++ = c;
1140 }
1141 *q = 0;
1142 return escaped;
1143 }
1144
1145 static void memory_region_do_init(MemoryRegion *mr,
1146 Object *owner,
1147 const char *name,
1148 uint64_t size)
1149 {
1150 mr->size = int128_make64(size);
1151 if (size == UINT64_MAX) {
1152 mr->size = int128_2_64();
1153 }
1154 mr->name = g_strdup(name);
1155 mr->owner = owner;
1156 mr->ram_block = NULL;
1157
1158 if (name) {
1159 char *escaped_name = memory_region_escape_name(name);
1160 char *name_array = g_strdup_printf("%s[*]", escaped_name);
1161
1162 if (!owner) {
1163 owner = container_get(qdev_get_machine(), "/unattached");
1164 }
1165
1166 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort);
1167 object_unref(OBJECT(mr));
1168 g_free(name_array);
1169 g_free(escaped_name);
1170 }
1171 }
1172
1173 void memory_region_init(MemoryRegion *mr,
1174 Object *owner,
1175 const char *name,
1176 uint64_t size)
1177 {
1178 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION);
1179 memory_region_do_init(mr, owner, name, size);
1180 }
1181
1182 static void memory_region_get_addr(Object *obj, Visitor *v, const char *name,
1183 void *opaque, Error **errp)
1184 {
1185 MemoryRegion *mr = MEMORY_REGION(obj);
1186 uint64_t value = mr->addr;
1187
1188 visit_type_uint64(v, name, &value, errp);
1189 }
1190
1191 static void memory_region_get_container(Object *obj, Visitor *v,
1192 const char *name, void *opaque,
1193 Error **errp)
1194 {
1195 MemoryRegion *mr = MEMORY_REGION(obj);
1196 gchar *path = (gchar *)"";
1197
1198 if (mr->container) {
1199 path = object_get_canonical_path(OBJECT(mr->container));
1200 }
1201 visit_type_str(v, name, &path, errp);
1202 if (mr->container) {
1203 g_free(path);
1204 }
1205 }
1206
1207 static Object *memory_region_resolve_container(Object *obj, void *opaque,
1208 const char *part)
1209 {
1210 MemoryRegion *mr = MEMORY_REGION(obj);
1211
1212 return OBJECT(mr->container);
1213 }
1214
1215 static void memory_region_get_priority(Object *obj, Visitor *v,
1216 const char *name, void *opaque,
1217 Error **errp)
1218 {
1219 MemoryRegion *mr = MEMORY_REGION(obj);
1220 int32_t value = mr->priority;
1221
1222 visit_type_int32(v, name, &value, errp);
1223 }
1224
1225 static void memory_region_get_size(Object *obj, Visitor *v, const char *name,
1226 void *opaque, Error **errp)
1227 {
1228 MemoryRegion *mr = MEMORY_REGION(obj);
1229 uint64_t value = memory_region_size(mr);
1230
1231 visit_type_uint64(v, name, &value, errp);
1232 }
1233
1234 static void memory_region_initfn(Object *obj)
1235 {
1236 MemoryRegion *mr = MEMORY_REGION(obj);
1237 ObjectProperty *op;
1238
1239 mr->ops = &unassigned_mem_ops;
1240 mr->enabled = true;
1241 mr->romd_mode = true;
1242 mr->global_locking = true;
1243 mr->destructor = memory_region_destructor_none;
1244 QTAILQ_INIT(&mr->subregions);
1245 QTAILQ_INIT(&mr->coalesced);
1246
1247 op = object_property_add(OBJECT(mr), "container",
1248 "link<" TYPE_MEMORY_REGION ">",
1249 memory_region_get_container,
1250 NULL, /* memory_region_set_container */
1251 NULL, NULL, &error_abort);
1252 op->resolve = memory_region_resolve_container;
1253
1254 object_property_add(OBJECT(mr), "addr", "uint64",
1255 memory_region_get_addr,
1256 NULL, /* memory_region_set_addr */
1257 NULL, NULL, &error_abort);
1258 object_property_add(OBJECT(mr), "priority", "uint32",
1259 memory_region_get_priority,
1260 NULL, /* memory_region_set_priority */
1261 NULL, NULL, &error_abort);
1262 object_property_add(OBJECT(mr), "size", "uint64",
1263 memory_region_get_size,
1264 NULL, /* memory_region_set_size, */
1265 NULL, NULL, &error_abort);
1266 }
1267
1268 static void iommu_memory_region_initfn(Object *obj)
1269 {
1270 MemoryRegion *mr = MEMORY_REGION(obj);
1271
1272 mr->is_iommu = true;
1273 }
1274
1275 static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
1276 unsigned size)
1277 {
1278 #ifdef DEBUG_UNASSIGNED
1279 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1280 #endif
1281 if (current_cpu != NULL) {
1282 bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
1283 cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
1284 }
1285 return 0;
1286 }
1287
1288 static void unassigned_mem_write(void *opaque, hwaddr addr,
1289 uint64_t val, unsigned size)
1290 {
1291 #ifdef DEBUG_UNASSIGNED
1292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
1293 #endif
1294 if (current_cpu != NULL) {
1295 cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
1296 }
1297 }
1298
1299 static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1300 unsigned size, bool is_write,
1301 MemTxAttrs attrs)
1302 {
1303 return false;
1304 }
1305
1306 const MemoryRegionOps unassigned_mem_ops = {
1307 .valid.accepts = unassigned_mem_accepts,
1308 .endianness = DEVICE_NATIVE_ENDIAN,
1309 };
1310
1311 static uint64_t memory_region_ram_device_read(void *opaque,
1312 hwaddr addr, unsigned size)
1313 {
1314 MemoryRegion *mr = opaque;
1315 uint64_t data = (uint64_t)~0;
1316
1317 switch (size) {
1318 case 1:
1319 data = *(uint8_t *)(mr->ram_block->host + addr);
1320 break;
1321 case 2:
1322 data = *(uint16_t *)(mr->ram_block->host + addr);
1323 break;
1324 case 4:
1325 data = *(uint32_t *)(mr->ram_block->host + addr);
1326 break;
1327 case 8:
1328 data = *(uint64_t *)(mr->ram_block->host + addr);
1329 break;
1330 }
1331
1332 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
1333
1334 return data;
1335 }
1336
1337 static void memory_region_ram_device_write(void *opaque, hwaddr addr,
1338 uint64_t data, unsigned size)
1339 {
1340 MemoryRegion *mr = opaque;
1341
1342 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
1343
1344 switch (size) {
1345 case 1:
1346 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
1347 break;
1348 case 2:
1349 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
1350 break;
1351 case 4:
1352 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
1353 break;
1354 case 8:
1355 *(uint64_t *)(mr->ram_block->host + addr) = data;
1356 break;
1357 }
1358 }
1359
1360 static const MemoryRegionOps ram_device_mem_ops = {
1361 .read = memory_region_ram_device_read,
1362 .write = memory_region_ram_device_write,
1363 .endianness = DEVICE_HOST_ENDIAN,
1364 .valid = {
1365 .min_access_size = 1,
1366 .max_access_size = 8,
1367 .unaligned = true,
1368 },
1369 .impl = {
1370 .min_access_size = 1,
1371 .max_access_size = 8,
1372 .unaligned = true,
1373 },
1374 };
1375
1376 bool memory_region_access_valid(MemoryRegion *mr,
1377 hwaddr addr,
1378 unsigned size,
1379 bool is_write,
1380 MemTxAttrs attrs)
1381 {
1382 int access_size_min, access_size_max;
1383 int access_size, i;
1384
1385 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
1386 return false;
1387 }
1388
1389 if (!mr->ops->valid.accepts) {
1390 return true;
1391 }
1392
1393 access_size_min = mr->ops->valid.min_access_size;
1394 if (!mr->ops->valid.min_access_size) {
1395 access_size_min = 1;
1396 }
1397
1398 access_size_max = mr->ops->valid.max_access_size;
1399 if (!mr->ops->valid.max_access_size) {
1400 access_size_max = 4;
1401 }
1402
1403 access_size = MAX(MIN(size, access_size_max), access_size_min);
1404 for (i = 0; i < size; i += access_size) {
1405 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
1406 is_write, attrs)) {
1407 return false;
1408 }
1409 }
1410
1411 return true;
1412 }
1413
1414 static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr,
1415 hwaddr addr,
1416 uint64_t *pval,
1417 unsigned size,
1418 MemTxAttrs attrs)
1419 {
1420 *pval = 0;
1421
1422 if (mr->ops->read) {
1423 return access_with_adjusted_size(addr, pval, size,
1424 mr->ops->impl.min_access_size,
1425 mr->ops->impl.max_access_size,
1426 memory_region_read_accessor,
1427 mr, attrs);
1428 } else {
1429 return access_with_adjusted_size(addr, pval, size,
1430 mr->ops->impl.min_access_size,
1431 mr->ops->impl.max_access_size,
1432 memory_region_read_with_attrs_accessor,
1433 mr, attrs);
1434 }
1435 }
1436
1437 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1438 hwaddr addr,
1439 uint64_t *pval,
1440 MemOp op,
1441 MemTxAttrs attrs)
1442 {
1443 unsigned size = memop_size(op);
1444 MemTxResult r;
1445
1446 if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
1447 *pval = unassigned_mem_read(mr, addr, size);
1448 return MEMTX_DECODE_ERROR;
1449 }
1450
1451 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs);
1452 adjust_endianness(mr, pval, op);
1453 return r;
1454 }
1455
1456 /* Return true if an eventfd was signalled */
1457 static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
1458 hwaddr addr,
1459 uint64_t data,
1460 unsigned size,
1461 MemTxAttrs attrs)
1462 {
1463 MemoryRegionIoeventfd ioeventfd = {
1464 .addr = addrrange_make(int128_make64(addr), int128_make64(size)),
1465 .data = data,
1466 };
1467 unsigned i;
1468
1469 for (i = 0; i < mr->ioeventfd_nb; i++) {
1470 ioeventfd.match_data = mr->ioeventfds[i].match_data;
1471 ioeventfd.e = mr->ioeventfds[i].e;
1472
1473 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) {
1474 event_notifier_set(ioeventfd.e);
1475 return true;
1476 }
1477 }
1478
1479 return false;
1480 }
1481
1482 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1483 hwaddr addr,
1484 uint64_t data,
1485 MemOp op,
1486 MemTxAttrs attrs)
1487 {
1488 unsigned size = memop_size(op);
1489
1490 if (!memory_region_access_valid(mr, addr, size, true, attrs)) {
1491 unassigned_mem_write(mr, addr, data, size);
1492 return MEMTX_DECODE_ERROR;
1493 }
1494
1495 adjust_endianness(mr, &data, op);
1496
1497 if ((!kvm_eventfds_enabled()) &&
1498 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) {
1499 return MEMTX_OK;
1500 }
1501
1502 if (mr->ops->write) {
1503 return access_with_adjusted_size(addr, &data, size,
1504 mr->ops->impl.min_access_size,
1505 mr->ops->impl.max_access_size,
1506 memory_region_write_accessor, mr,
1507 attrs);
1508 } else {
1509 return
1510 access_with_adjusted_size(addr, &data, size,
1511 mr->ops->impl.min_access_size,
1512 mr->ops->impl.max_access_size,
1513 memory_region_write_with_attrs_accessor,
1514 mr, attrs);
1515 }
1516 }
1517
1518 void memory_region_init_io(MemoryRegion *mr,
1519 Object *owner,
1520 const MemoryRegionOps *ops,
1521 void *opaque,
1522 const char *name,
1523 uint64_t size)
1524 {
1525 memory_region_init(mr, owner, name, size);
1526 mr->ops = ops ? ops : &unassigned_mem_ops;
1527 mr->opaque = opaque;
1528 mr->terminates = true;
1529 }
1530
1531 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
1532 Object *owner,
1533 const char *name,
1534 uint64_t size,
1535 Error **errp)
1536 {
1537 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp);
1538 }
1539
1540 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
1541 Object *owner,
1542 const char *name,
1543 uint64_t size,
1544 bool share,
1545 Error **errp)
1546 {
1547 Error *err = NULL;
1548 memory_region_init(mr, owner, name, size);
1549 mr->ram = true;
1550 mr->terminates = true;
1551 mr->destructor = memory_region_destructor_ram;
1552 mr->ram_block = qemu_ram_alloc(size, share, mr, &err);
1553 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1554 if (err) {
1555 mr->size = int128_zero();
1556 object_unparent(OBJECT(mr));
1557 error_propagate(errp, err);
1558 }
1559 }
1560
1561 void memory_region_init_resizeable_ram(MemoryRegion *mr,
1562 Object *owner,
1563 const char *name,
1564 uint64_t size,
1565 uint64_t max_size,
1566 void (*resized)(const char*,
1567 uint64_t length,
1568 void *host),
1569 Error **errp)
1570 {
1571 Error *err = NULL;
1572 memory_region_init(mr, owner, name, size);
1573 mr->ram = true;
1574 mr->terminates = true;
1575 mr->destructor = memory_region_destructor_ram;
1576 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized,
1577 mr, &err);
1578 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1579 if (err) {
1580 mr->size = int128_zero();
1581 object_unparent(OBJECT(mr));
1582 error_propagate(errp, err);
1583 }
1584 }
1585
1586 #ifdef CONFIG_POSIX
1587 void memory_region_init_ram_from_file(MemoryRegion *mr,
1588 struct Object *owner,
1589 const char *name,
1590 uint64_t size,
1591 uint64_t align,
1592 uint32_t ram_flags,
1593 const char *path,
1594 Error **errp)
1595 {
1596 Error *err = NULL;
1597 memory_region_init(mr, owner, name, size);
1598 mr->ram = true;
1599 mr->terminates = true;
1600 mr->destructor = memory_region_destructor_ram;
1601 mr->align = align;
1602 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err);
1603 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1604 if (err) {
1605 mr->size = int128_zero();
1606 object_unparent(OBJECT(mr));
1607 error_propagate(errp, err);
1608 }
1609 }
1610
1611 void memory_region_init_ram_from_fd(MemoryRegion *mr,
1612 struct Object *owner,
1613 const char *name,
1614 uint64_t size,
1615 bool share,
1616 int fd,
1617 Error **errp)
1618 {
1619 Error *err = NULL;
1620 memory_region_init(mr, owner, name, size);
1621 mr->ram = true;
1622 mr->terminates = true;
1623 mr->destructor = memory_region_destructor_ram;
1624 mr->ram_block = qemu_ram_alloc_from_fd(size, mr,
1625 share ? RAM_SHARED : 0,
1626 fd, &err);
1627 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1628 if (err) {
1629 mr->size = int128_zero();
1630 object_unparent(OBJECT(mr));
1631 error_propagate(errp, err);
1632 }
1633 }
1634 #endif
1635
1636 void memory_region_init_ram_ptr(MemoryRegion *mr,
1637 Object *owner,
1638 const char *name,
1639 uint64_t size,
1640 void *ptr)
1641 {
1642 memory_region_init(mr, owner, name, size);
1643 mr->ram = true;
1644 mr->terminates = true;
1645 mr->destructor = memory_region_destructor_ram;
1646 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1647
1648 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1649 assert(ptr != NULL);
1650 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1651 }
1652
1653 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
1654 Object *owner,
1655 const char *name,
1656 uint64_t size,
1657 void *ptr)
1658 {
1659 memory_region_init(mr, owner, name, size);
1660 mr->ram = true;
1661 mr->terminates = true;
1662 mr->ram_device = true;
1663 mr->ops = &ram_device_mem_ops;
1664 mr->opaque = mr;
1665 mr->destructor = memory_region_destructor_ram;
1666 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1667 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */
1668 assert(ptr != NULL);
1669 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal);
1670 }
1671
1672 void memory_region_init_alias(MemoryRegion *mr,
1673 Object *owner,
1674 const char *name,
1675 MemoryRegion *orig,
1676 hwaddr offset,
1677 uint64_t size)
1678 {
1679 memory_region_init(mr, owner, name, size);
1680 mr->alias = orig;
1681 mr->alias_offset = offset;
1682 }
1683
1684 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
1685 struct Object *owner,
1686 const char *name,
1687 uint64_t size,
1688 Error **errp)
1689 {
1690 Error *err = NULL;
1691 memory_region_init(mr, owner, name, size);
1692 mr->ram = true;
1693 mr->readonly = true;
1694 mr->terminates = true;
1695 mr->destructor = memory_region_destructor_ram;
1696 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1697 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0;
1698 if (err) {
1699 mr->size = int128_zero();
1700 object_unparent(OBJECT(mr));
1701 error_propagate(errp, err);
1702 }
1703 }
1704
1705 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
1706 Object *owner,
1707 const MemoryRegionOps *ops,
1708 void *opaque,
1709 const char *name,
1710 uint64_t size,
1711 Error **errp)
1712 {
1713 Error *err = NULL;
1714 assert(ops);
1715 memory_region_init(mr, owner, name, size);
1716 mr->ops = ops;
1717 mr->opaque = opaque;
1718 mr->terminates = true;
1719 mr->rom_device = true;
1720 mr->destructor = memory_region_destructor_ram;
1721 mr->ram_block = qemu_ram_alloc(size, false, mr, &err);
1722 if (err) {
1723 mr->size = int128_zero();
1724 object_unparent(OBJECT(mr));
1725 error_propagate(errp, err);
1726 }
1727 }
1728
1729 void memory_region_init_iommu(void *_iommu_mr,
1730 size_t instance_size,
1731 const char *mrtypename,
1732 Object *owner,
1733 const char *name,
1734 uint64_t size)
1735 {
1736 struct IOMMUMemoryRegion *iommu_mr;
1737 struct MemoryRegion *mr;
1738
1739 object_initialize(_iommu_mr, instance_size, mrtypename);
1740 mr = MEMORY_REGION(_iommu_mr);
1741 memory_region_do_init(mr, owner, name, size);
1742 iommu_mr = IOMMU_MEMORY_REGION(mr);
1743 mr->terminates = true; /* then re-forwards */
1744 QLIST_INIT(&iommu_mr->iommu_notify);
1745 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE;
1746 }
1747
1748 static void memory_region_finalize(Object *obj)
1749 {
1750 MemoryRegion *mr = MEMORY_REGION(obj);
1751
1752 assert(!mr->container);
1753
1754 /* We know the region is not visible in any address space (it
1755 * does not have a container and cannot be a root either because
1756 * it has no references, so we can blindly clear mr->enabled.
1757 * memory_region_set_enabled instead could trigger a transaction
1758 * and cause an infinite loop.
1759 */
1760 mr->enabled = false;
1761 memory_region_transaction_begin();
1762 while (!QTAILQ_EMPTY(&mr->subregions)) {
1763 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions);
1764 memory_region_del_subregion(mr, subregion);
1765 }
1766 memory_region_transaction_commit();
1767
1768 mr->destructor(mr);
1769 memory_region_clear_coalescing(mr);
1770 g_free((char *)mr->name);
1771 g_free(mr->ioeventfds);
1772 }
1773
1774 Object *memory_region_owner(MemoryRegion *mr)
1775 {
1776 Object *obj = OBJECT(mr);
1777 return obj->parent;
1778 }
1779
1780 void memory_region_ref(MemoryRegion *mr)
1781 {
1782 /* MMIO callbacks most likely will access data that belongs
1783 * to the owner, hence the need to ref/unref the owner whenever
1784 * the memory region is in use.
1785 *
1786 * The memory region is a child of its owner. As long as the
1787 * owner doesn't call unparent itself on the memory region,
1788 * ref-ing the owner will also keep the memory region alive.
1789 * Memory regions without an owner are supposed to never go away;
1790 * we do not ref/unref them because it slows down DMA sensibly.
1791 */
1792 if (mr && mr->owner) {
1793 object_ref(mr->owner);
1794 }
1795 }
1796
1797 void memory_region_unref(MemoryRegion *mr)
1798 {
1799 if (mr && mr->owner) {
1800 object_unref(mr->owner);
1801 }
1802 }
1803
1804 uint64_t memory_region_size(MemoryRegion *mr)
1805 {
1806 if (int128_eq(mr->size, int128_2_64())) {
1807 return UINT64_MAX;
1808 }
1809 return int128_get64(mr->size);
1810 }
1811
1812 const char *memory_region_name(const MemoryRegion *mr)
1813 {
1814 if (!mr->name) {
1815 ((MemoryRegion *)mr)->name =
1816 object_get_canonical_path_component(OBJECT(mr));
1817 }
1818 return mr->name;
1819 }
1820
1821 bool memory_region_is_ram_device(MemoryRegion *mr)
1822 {
1823 return mr->ram_device;
1824 }
1825
1826 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
1827 {
1828 uint8_t mask = mr->dirty_log_mask;
1829 if (global_dirty_log && mr->ram_block) {
1830 mask |= (1 << DIRTY_MEMORY_MIGRATION);
1831 }
1832 return mask;
1833 }
1834
1835 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client)
1836 {
1837 return memory_region_get_dirty_log_mask(mr) & (1 << client);
1838 }
1839
1840 static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr)
1841 {
1842 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE;
1843 IOMMUNotifier *iommu_notifier;
1844 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1845
1846 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1847 flags |= iommu_notifier->notifier_flags;
1848 }
1849
1850 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) {
1851 imrc->notify_flag_changed(iommu_mr,
1852 iommu_mr->iommu_notify_flags,
1853 flags);
1854 }
1855
1856 iommu_mr->iommu_notify_flags = flags;
1857 }
1858
1859 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1860 IOMMUNotifier *n)
1861 {
1862 IOMMUMemoryRegion *iommu_mr;
1863
1864 if (mr->alias) {
1865 memory_region_register_iommu_notifier(mr->alias, n);
1866 return;
1867 }
1868
1869 /* We need to register for at least one bitfield */
1870 iommu_mr = IOMMU_MEMORY_REGION(mr);
1871 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE);
1872 assert(n->start <= n->end);
1873 assert(n->iommu_idx >= 0 &&
1874 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr));
1875
1876 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node);
1877 memory_region_update_iommu_notify_flags(iommu_mr);
1878 }
1879
1880 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr)
1881 {
1882 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1883
1884 if (imrc->get_min_page_size) {
1885 return imrc->get_min_page_size(iommu_mr);
1886 }
1887 return TARGET_PAGE_SIZE;
1888 }
1889
1890 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
1891 {
1892 MemoryRegion *mr = MEMORY_REGION(iommu_mr);
1893 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1894 hwaddr addr, granularity;
1895 IOMMUTLBEntry iotlb;
1896
1897 /* If the IOMMU has its own replay callback, override */
1898 if (imrc->replay) {
1899 imrc->replay(iommu_mr, n);
1900 return;
1901 }
1902
1903 granularity = memory_region_iommu_get_min_page_size(iommu_mr);
1904
1905 for (addr = 0; addr < memory_region_size(mr); addr += granularity) {
1906 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx);
1907 if (iotlb.perm != IOMMU_NONE) {
1908 n->notify(n, &iotlb);
1909 }
1910
1911 /* if (2^64 - MR size) < granularity, it's possible to get an
1912 * infinite loop here. This should catch such a wraparound */
1913 if ((addr + granularity) < addr) {
1914 break;
1915 }
1916 }
1917 }
1918
1919 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr)
1920 {
1921 IOMMUNotifier *notifier;
1922
1923 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) {
1924 memory_region_iommu_replay(iommu_mr, notifier);
1925 }
1926 }
1927
1928 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1929 IOMMUNotifier *n)
1930 {
1931 IOMMUMemoryRegion *iommu_mr;
1932
1933 if (mr->alias) {
1934 memory_region_unregister_iommu_notifier(mr->alias, n);
1935 return;
1936 }
1937 QLIST_REMOVE(n, node);
1938 iommu_mr = IOMMU_MEMORY_REGION(mr);
1939 memory_region_update_iommu_notify_flags(iommu_mr);
1940 }
1941
1942 void memory_region_notify_one(IOMMUNotifier *notifier,
1943 IOMMUTLBEntry *entry)
1944 {
1945 IOMMUNotifierFlag request_flags;
1946 hwaddr entry_end = entry->iova + entry->addr_mask;
1947
1948 /*
1949 * Skip the notification if the notification does not overlap
1950 * with registered range.
1951 */
1952 if (notifier->start > entry_end || notifier->end < entry->iova) {
1953 return;
1954 }
1955
1956 assert(entry->iova >= notifier->start && entry_end <= notifier->end);
1957
1958 if (entry->perm & IOMMU_RW) {
1959 request_flags = IOMMU_NOTIFIER_MAP;
1960 } else {
1961 request_flags = IOMMU_NOTIFIER_UNMAP;
1962 }
1963
1964 if (notifier->notifier_flags & request_flags) {
1965 notifier->notify(notifier, entry);
1966 }
1967 }
1968
1969 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1970 int iommu_idx,
1971 IOMMUTLBEntry entry)
1972 {
1973 IOMMUNotifier *iommu_notifier;
1974
1975 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr)));
1976
1977 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) {
1978 if (iommu_notifier->iommu_idx == iommu_idx) {
1979 memory_region_notify_one(iommu_notifier, &entry);
1980 }
1981 }
1982 }
1983
1984 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1985 enum IOMMUMemoryRegionAttr attr,
1986 void *data)
1987 {
1988 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
1989
1990 if (!imrc->get_attr) {
1991 return -EINVAL;
1992 }
1993
1994 return imrc->get_attr(iommu_mr, attr, data);
1995 }
1996
1997 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1998 MemTxAttrs attrs)
1999 {
2000 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2001
2002 if (!imrc->attrs_to_index) {
2003 return 0;
2004 }
2005
2006 return imrc->attrs_to_index(iommu_mr, attrs);
2007 }
2008
2009 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr)
2010 {
2011 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr);
2012
2013 if (!imrc->num_indexes) {
2014 return 1;
2015 }
2016
2017 return imrc->num_indexes(iommu_mr);
2018 }
2019
2020 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
2021 {
2022 uint8_t mask = 1 << client;
2023 uint8_t old_logging;
2024
2025 assert(client == DIRTY_MEMORY_VGA);
2026 old_logging = mr->vga_logging_count;
2027 mr->vga_logging_count += log ? 1 : -1;
2028 if (!!old_logging == !!mr->vga_logging_count) {
2029 return;
2030 }
2031
2032 memory_region_transaction_begin();
2033 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
2034 memory_region_update_pending |= mr->enabled;
2035 memory_region_transaction_commit();
2036 }
2037
2038 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
2039 hwaddr size)
2040 {
2041 assert(mr->ram_block);
2042 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
2043 size,
2044 memory_region_get_dirty_log_mask(mr));
2045 }
2046
2047 static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
2048 {
2049 MemoryListener *listener;
2050 AddressSpace *as;
2051 FlatView *view;
2052 FlatRange *fr;
2053
2054 /* If the same address space has multiple log_sync listeners, we
2055 * visit that address space's FlatView multiple times. But because
2056 * log_sync listeners are rare, it's still cheaper than walking each
2057 * address space once.
2058 */
2059 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2060 if (!listener->log_sync) {
2061 continue;
2062 }
2063 as = listener->address_space;
2064 view = address_space_get_flatview(as);
2065 FOR_EACH_FLAT_RANGE(fr, view) {
2066 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) {
2067 MemoryRegionSection mrs = section_from_flat_range(fr, view);
2068 listener->log_sync(listener, &mrs);
2069 }
2070 }
2071 flatview_unref(view);
2072 }
2073 }
2074
2075 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
2076 hwaddr len)
2077 {
2078 MemoryRegionSection mrs;
2079 MemoryListener *listener;
2080 AddressSpace *as;
2081 FlatView *view;
2082 FlatRange *fr;
2083 hwaddr sec_start, sec_end, sec_size;
2084
2085 QTAILQ_FOREACH(listener, &memory_listeners, link) {
2086 if (!listener->log_clear) {
2087 continue;
2088 }
2089 as = listener->address_space;
2090 view = address_space_get_flatview(as);
2091 FOR_EACH_FLAT_RANGE(fr, view) {
2092 if (!fr->dirty_log_mask || fr->mr != mr) {
2093 /*
2094 * Clear dirty bitmap operation only applies to those
2095 * regions whose dirty logging is at least enabled
2096 */
2097 continue;
2098 }
2099
2100 mrs = section_from_flat_range(fr, view);
2101
2102 sec_start = MAX(mrs.offset_within_region, start);
2103 sec_end = mrs.offset_within_region + int128_get64(mrs.size);
2104 sec_end = MIN(sec_end, start + len);
2105
2106 if (sec_start >= sec_end) {
2107 /*
2108 * If this memory region section has no intersection
2109 * with the requested range, skip.
2110 */
2111 continue;
2112 }
2113
2114 /* Valid case; shrink the section if needed */
2115 mrs.offset_within_address_space +=
2116 sec_start - mrs.offset_within_region;
2117 mrs.offset_within_region = sec_start;
2118 sec_size = sec_end - sec_start;
2119 mrs.size = int128_make64(sec_size);
2120 listener->log_clear(listener, &mrs);
2121 }
2122 flatview_unref(view);
2123 }
2124 }
2125
2126 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
2127 hwaddr addr,
2128 hwaddr size,
2129 unsigned client)
2130 {
2131 DirtyBitmapSnapshot *snapshot;
2132 assert(mr->ram_block);
2133 memory_region_sync_dirty_bitmap(mr);
2134 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
2135 memory_global_after_dirty_log_sync();
2136 return snapshot;
2137 }
2138
2139 bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
2140 hwaddr addr, hwaddr size)
2141 {
2142 assert(mr->ram_block);
2143 return cpu_physical_memory_snapshot_get_dirty(snap,
2144 memory_region_get_ram_addr(mr) + addr, size);
2145 }
2146
2147 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
2148 {
2149 if (mr->readonly != readonly) {
2150 memory_region_transaction_begin();
2151 mr->readonly = readonly;
2152 memory_region_update_pending |= mr->enabled;
2153 memory_region_transaction_commit();
2154 }
2155 }
2156
2157 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile)
2158 {
2159 if (mr->nonvolatile != nonvolatile) {
2160 memory_region_transaction_begin();
2161 mr->nonvolatile = nonvolatile;
2162 memory_region_update_pending |= mr->enabled;
2163 memory_region_transaction_commit();
2164 }
2165 }
2166
2167 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
2168 {
2169 if (mr->romd_mode != romd_mode) {
2170 memory_region_transaction_begin();
2171 mr->romd_mode = romd_mode;
2172 memory_region_update_pending |= mr->enabled;
2173 memory_region_transaction_commit();
2174 }
2175 }
2176
2177 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
2178 hwaddr size, unsigned client)
2179 {
2180 assert(mr->ram_block);
2181 cpu_physical_memory_test_and_clear_dirty(
2182 memory_region_get_ram_addr(mr) + addr, size, client);
2183 }
2184
2185 int memory_region_get_fd(MemoryRegion *mr)
2186 {
2187 int fd;
2188
2189 rcu_read_lock();
2190 while (mr->alias) {
2191 mr = mr->alias;
2192 }
2193 fd = mr->ram_block->fd;
2194 rcu_read_unlock();
2195
2196 return fd;
2197 }
2198
2199 void *memory_region_get_ram_ptr(MemoryRegion *mr)
2200 {
2201 void *ptr;
2202 uint64_t offset = 0;
2203
2204 rcu_read_lock();
2205 while (mr->alias) {
2206 offset += mr->alias_offset;
2207 mr = mr->alias;
2208 }
2209 assert(mr->ram_block);
2210 ptr = qemu_map_ram_ptr(mr->ram_block, offset);
2211 rcu_read_unlock();
2212
2213 return ptr;
2214 }
2215
2216 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset)
2217 {
2218 RAMBlock *block;
2219
2220 block = qemu_ram_block_from_host(ptr, false, offset);
2221 if (!block) {
2222 return NULL;
2223 }
2224
2225 return block->mr;
2226 }
2227
2228 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
2229 {
2230 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID;
2231 }
2232
2233 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp)
2234 {
2235 assert(mr->ram_block);
2236
2237 qemu_ram_resize(mr->ram_block, newsize, errp);
2238 }
2239
2240 /*
2241 * Call proper memory listeners about the change on the newly
2242 * added/removed CoalescedMemoryRange.
2243 */
2244 static void memory_region_update_coalesced_range(MemoryRegion *mr,
2245 CoalescedMemoryRange *cmr,
2246 bool add)
2247 {
2248 AddressSpace *as;
2249 FlatView *view;
2250 FlatRange *fr;
2251
2252 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
2253 view = address_space_get_flatview(as);
2254 FOR_EACH_FLAT_RANGE(fr, view) {
2255 if (fr->mr == mr) {
2256 flat_range_coalesced_io_notify(fr, as, cmr, add);
2257 }
2258 }
2259 flatview_unref(view);
2260 }
2261 }
2262
2263 void memory_region_set_coalescing(MemoryRegion *mr)
2264 {
2265 memory_region_clear_coalescing(mr);
2266 memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
2267 }
2268
2269 void memory_region_add_coalescing(MemoryRegion *mr,
2270 hwaddr offset,
2271 uint64_t size)
2272 {
2273 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
2274
2275 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
2276 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
2277 memory_region_update_coalesced_range(mr, cmr, true);
2278 memory_region_set_flush_coalesced(mr);
2279 }
2280
2281 void memory_region_clear_coalescing(MemoryRegion *mr)
2282 {
2283 CoalescedMemoryRange *cmr;
2284
2285 if (QTAILQ_EMPTY(&mr->coalesced)) {
2286 return;
2287 }
2288
2289 qemu_flush_coalesced_mmio_buffer();
2290 mr->flush_coalesced_mmio = false;
2291
2292 while (!QTAILQ_EMPTY(&mr->coalesced)) {
2293 cmr = QTAILQ_FIRST(&mr->coalesced);
2294 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
2295 memory_region_update_coalesced_range(mr, cmr, false);
2296 g_free(cmr);
2297 }
2298 }
2299
2300 void memory_region_set_flush_coalesced(MemoryRegion *mr)
2301 {
2302 mr->flush_coalesced_mmio = true;
2303 }
2304
2305 void memory_region_clear_flush_coalesced(MemoryRegion *mr)
2306 {
2307 qemu_flush_coalesced_mmio_buffer();
2308 if (QTAILQ_EMPTY(&mr->coalesced)) {
2309 mr->flush_coalesced_mmio = false;
2310 }
2311 }
2312
2313 void memory_region_clear_global_locking(MemoryRegion *mr)
2314 {
2315 mr->global_locking = false;
2316 }
2317
2318 static bool userspace_eventfd_warning;
2319
2320 void memory_region_add_eventfd(MemoryRegion *mr,
2321 hwaddr addr,
2322 unsigned size,
2323 bool match_data,
2324 uint64_t data,
2325 EventNotifier *e)
2326 {
2327 MemoryRegionIoeventfd mrfd = {
2328 .addr.start = int128_make64(addr),
2329 .addr.size = int128_make64(size),
2330 .match_data = match_data,
2331 .data = data,
2332 .e = e,
2333 };
2334 unsigned i;
2335
2336 if (kvm_enabled() && (!(kvm_eventfds_enabled() ||
2337 userspace_eventfd_warning))) {
2338 userspace_eventfd_warning = true;
2339 error_report("Using eventfd without MMIO binding in KVM. "
2340 "Suboptimal performance expected");
2341 }
2342
2343 if (size) {
2344 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2345 }
2346 memory_region_transaction_begin();
2347 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2348 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) {
2349 break;
2350 }
2351 }
2352 ++mr->ioeventfd_nb;
2353 mr->ioeventfds = g_realloc(mr->ioeventfds,
2354 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
2355 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
2356 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
2357 mr->ioeventfds[i] = mrfd;
2358 ioeventfd_update_pending |= mr->enabled;
2359 memory_region_transaction_commit();
2360 }
2361
2362 void memory_region_del_eventfd(MemoryRegion *mr,
2363 hwaddr addr,
2364 unsigned size,
2365 bool match_data,
2366 uint64_t data,
2367 EventNotifier *e)
2368 {
2369 MemoryRegionIoeventfd mrfd = {
2370 .addr.start = int128_make64(addr),
2371 .addr.size = int128_make64(size),
2372 .match_data = match_data,
2373 .data = data,
2374 .e = e,
2375 };
2376 unsigned i;
2377
2378 if (size) {
2379 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE);
2380 }
2381 memory_region_transaction_begin();
2382 for (i = 0; i < mr->ioeventfd_nb; ++i) {
2383 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) {
2384 break;
2385 }
2386 }
2387 assert(i != mr->ioeventfd_nb);
2388 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
2389 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
2390 --mr->ioeventfd_nb;
2391 mr->ioeventfds = g_realloc(mr->ioeventfds,
2392 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
2393 ioeventfd_update_pending |= mr->enabled;
2394 memory_region_transaction_commit();
2395 }
2396
2397 static void memory_region_update_container_subregions(MemoryRegion *subregion)
2398 {
2399 MemoryRegion *mr = subregion->container;
2400 MemoryRegion *other;
2401
2402 memory_region_transaction_begin();
2403
2404 memory_region_ref(subregion);
2405 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
2406 if (subregion->priority >= other->priority) {
2407 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
2408 goto done;
2409 }
2410 }
2411 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
2412 done:
2413 memory_region_update_pending |= mr->enabled && subregion->enabled;
2414 memory_region_transaction_commit();
2415 }
2416
2417 static void memory_region_add_subregion_common(MemoryRegion *mr,
2418 hwaddr offset,
2419 MemoryRegion *subregion)
2420 {
2421 assert(!subregion->container);
2422 subregion->container = mr;
2423 subregion->addr = offset;
2424 memory_region_update_container_subregions(subregion);
2425 }
2426
2427 void memory_region_add_subregion(MemoryRegion *mr,
2428 hwaddr offset,
2429 MemoryRegion *subregion)
2430 {
2431 subregion->priority = 0;
2432 memory_region_add_subregion_common(mr, offset, subregion);
2433 }
2434
2435 void memory_region_add_subregion_overlap(MemoryRegion *mr,
2436 hwaddr offset,
2437 MemoryRegion *subregion,
2438 int priority)
2439 {
2440 subregion->priority = priority;
2441 memory_region_add_subregion_common(mr, offset, subregion);
2442 }
2443
2444 void memory_region_del_subregion(MemoryRegion *mr,
2445 MemoryRegion *subregion)
2446 {
2447 memory_region_transaction_begin();
2448 assert(subregion->container == mr);
2449 subregion->container = NULL;
2450 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
2451 memory_region_unref(subregion);
2452 memory_region_update_pending |= mr->enabled && subregion->enabled;
2453 memory_region_transaction_commit();
2454 }
2455
2456 void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
2457 {
2458 if (enabled == mr->enabled) {
2459 return;
2460 }
2461 memory_region_transaction_begin();
2462 mr->enabled = enabled;
2463 memory_region_update_pending = true;
2464 memory_region_transaction_commit();
2465 }
2466
2467 void memory_region_set_size(MemoryRegion *mr, uint64_t size)
2468 {
2469 Int128 s = int128_make64(size);
2470
2471 if (size == UINT64_MAX) {
2472 s = int128_2_64();
2473 }
2474 if (int128_eq(s, mr->size)) {
2475 return;
2476 }
2477 memory_region_transaction_begin();
2478 mr->size = s;
2479 memory_region_update_pending = true;
2480 memory_region_transaction_commit();
2481 }
2482
2483 static void memory_region_readd_subregion(MemoryRegion *mr)
2484 {
2485 MemoryRegion *container = mr->container;
2486
2487 if (container) {
2488 memory_region_transaction_begin();
2489 memory_region_ref(mr);
2490 memory_region_del_subregion(container, mr);
2491 mr->container = container;
2492 memory_region_update_container_subregions(mr);
2493 memory_region_unref(mr);
2494 memory_region_transaction_commit();
2495 }
2496 }
2497
2498 void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
2499 {
2500 if (addr != mr->addr) {
2501 mr->addr = addr;
2502 memory_region_readd_subregion(mr);
2503 }
2504 }
2505
2506 void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
2507 {
2508 assert(mr->alias);
2509
2510 if (offset == mr->alias_offset) {
2511 return;
2512 }
2513
2514 memory_region_transaction_begin();
2515 mr->alias_offset = offset;
2516 memory_region_update_pending |= mr->enabled;
2517 memory_region_transaction_commit();
2518 }
2519
2520 uint64_t memory_region_get_alignment(const MemoryRegion *mr)
2521 {
2522 return mr->align;
2523 }
2524
2525 static int cmp_flatrange_addr(const void *addr_, const void *fr_)
2526 {
2527 const AddrRange *addr = addr_;
2528 const FlatRange *fr = fr_;
2529
2530 if (int128_le(addrrange_end(*addr), fr->addr.start)) {
2531 return -1;
2532 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
2533 return 1;
2534 }
2535 return 0;
2536 }
2537
2538 static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
2539 {
2540 return bsearch(&addr, view->ranges, view->nr,
2541 sizeof(FlatRange), cmp_flatrange_addr);
2542 }
2543
2544 bool memory_region_is_mapped(MemoryRegion *mr)
2545 {
2546 return mr->container ? true : false;
2547 }
2548
2549 /* Same as memory_region_find, but it does not add a reference to the
2550 * returned region. It must be called from an RCU critical section.
2551 */
2552 static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr,
2553 hwaddr addr, uint64_t size)
2554 {
2555 MemoryRegionSection ret = { .mr = NULL };
2556 MemoryRegion *root;
2557 AddressSpace *as;
2558 AddrRange range;
2559 FlatView *view;
2560 FlatRange *fr;
2561
2562 addr += mr->addr;
2563 for (root = mr; root->container; ) {
2564 root = root->container;
2565 addr += root->addr;
2566 }
2567
2568 as = memory_region_to_address_space(root);
2569 if (!as) {
2570 return ret;
2571 }
2572 range = addrrange_make(int128_make64(addr), int128_make64(size));
2573
2574 view = address_space_to_flatview(as);
2575 fr = flatview_lookup(view, range);
2576 if (!fr) {
2577 return ret;
2578 }
2579
2580 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
2581 --fr;
2582 }
2583
2584 ret.mr = fr->mr;
2585 ret.fv = view;
2586 range = addrrange_intersection(range, fr->addr);
2587 ret.offset_within_region = fr->offset_in_region;
2588 ret.offset_within_region += int128_get64(int128_sub(range.start,
2589 fr->addr.start));
2590 ret.size = range.size;
2591 ret.offset_within_address_space = int128_get64(range.start);
2592 ret.readonly = fr->readonly;
2593 ret.nonvolatile = fr->nonvolatile;
2594 return ret;
2595 }
2596
2597 MemoryRegionSection memory_region_find(MemoryRegion *mr,
2598 hwaddr addr, uint64_t size)
2599 {
2600 MemoryRegionSection ret;
2601 rcu_read_lock();
2602 ret = memory_region_find_rcu(mr, addr, size);
2603 if (ret.mr) {
2604 memory_region_ref(ret.mr);
2605 }
2606 rcu_read_unlock();
2607 return ret;
2608 }
2609
2610 bool memory_region_present(MemoryRegion *container, hwaddr addr)
2611 {
2612 MemoryRegion *mr;
2613
2614 rcu_read_lock();
2615 mr = memory_region_find_rcu(container, addr, 1).mr;
2616 rcu_read_unlock();
2617 return mr && mr != container;
2618 }
2619
2620 void memory_global_dirty_log_sync(void)
2621 {
2622 memory_region_sync_dirty_bitmap(NULL);
2623 }
2624
2625 void memory_global_after_dirty_log_sync(void)
2626 {
2627 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward);
2628 }
2629
2630 static VMChangeStateEntry *vmstate_change;
2631
2632 void memory_global_dirty_log_start(void)
2633 {
2634 if (vmstate_change) {
2635 qemu_del_vm_change_state_handler(vmstate_change);
2636 vmstate_change = NULL;
2637 }
2638
2639 global_dirty_log = true;
2640
2641 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
2642
2643 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2644 memory_region_transaction_begin();
2645 memory_region_update_pending = true;
2646 memory_region_transaction_commit();
2647 }
2648
2649 static void memory_global_dirty_log_do_stop(void)
2650 {
2651 global_dirty_log = false;
2652
2653 /* Refresh DIRTY_MEMORY_MIGRATION bit. */
2654 memory_region_transaction_begin();
2655 memory_region_update_pending = true;
2656 memory_region_transaction_commit();
2657
2658 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
2659 }
2660
2661 static void memory_vm_change_state_handler(void *opaque, int running,
2662 RunState state)
2663 {
2664 if (running) {
2665 memory_global_dirty_log_do_stop();
2666
2667 if (vmstate_change) {
2668 qemu_del_vm_change_state_handler(vmstate_change);
2669 vmstate_change = NULL;
2670 }
2671 }
2672 }
2673
2674 void memory_global_dirty_log_stop(void)
2675 {
2676 if (!runstate_is_running()) {
2677 if (vmstate_change) {
2678 return;
2679 }
2680 vmstate_change = qemu_add_vm_change_state_handler(
2681 memory_vm_change_state_handler, NULL);
2682 return;
2683 }
2684
2685 memory_global_dirty_log_do_stop();
2686 }
2687
2688 static void listener_add_address_space(MemoryListener *listener,
2689 AddressSpace *as)
2690 {
2691 FlatView *view;
2692 FlatRange *fr;
2693
2694 if (listener->begin) {
2695 listener->begin(listener);
2696 }
2697 if (global_dirty_log) {
2698 if (listener->log_global_start) {
2699 listener->log_global_start(listener);
2700 }
2701 }
2702
2703 view = address_space_get_flatview(as);
2704 FOR_EACH_FLAT_RANGE(fr, view) {
2705 MemoryRegionSection section = section_from_flat_range(fr, view);
2706
2707 if (listener->region_add) {
2708 listener->region_add(listener, &section);
2709 }
2710 if (fr->dirty_log_mask && listener->log_start) {
2711 listener->log_start(listener, &section, 0, fr->dirty_log_mask);
2712 }
2713 }
2714 if (listener->commit) {
2715 listener->commit(listener);
2716 }
2717 flatview_unref(view);
2718 }
2719
2720 static void listener_del_address_space(MemoryListener *listener,
2721 AddressSpace *as)
2722 {
2723 FlatView *view;
2724 FlatRange *fr;
2725
2726 if (listener->begin) {
2727 listener->begin(listener);
2728 }
2729 view = address_space_get_flatview(as);
2730 FOR_EACH_FLAT_RANGE(fr, view) {
2731 MemoryRegionSection section = section_from_flat_range(fr, view);
2732
2733 if (fr->dirty_log_mask && listener->log_stop) {
2734 listener->log_stop(listener, &section, fr->dirty_log_mask, 0);
2735 }
2736 if (listener->region_del) {
2737 listener->region_del(listener, &section);
2738 }
2739 }
2740 if (listener->commit) {
2741 listener->commit(listener);
2742 }
2743 flatview_unref(view);
2744 }
2745
2746 void memory_listener_register(MemoryListener *listener, AddressSpace *as)
2747 {
2748 MemoryListener *other = NULL;
2749
2750 listener->address_space = as;
2751 if (QTAILQ_EMPTY(&memory_listeners)
2752 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) {
2753 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
2754 } else {
2755 QTAILQ_FOREACH(other, &memory_listeners, link) {
2756 if (listener->priority < other->priority) {
2757 break;
2758 }
2759 }
2760 QTAILQ_INSERT_BEFORE(other, listener, link);
2761 }
2762
2763 if (QTAILQ_EMPTY(&as->listeners)
2764 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) {
2765 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as);
2766 } else {
2767 QTAILQ_FOREACH(other, &as->listeners, link_as) {
2768 if (listener->priority < other->priority) {
2769 break;
2770 }
2771 }
2772 QTAILQ_INSERT_BEFORE(other, listener, link_as);
2773 }
2774
2775 listener_add_address_space(listener, as);
2776 }
2777
2778 void memory_listener_unregister(MemoryListener *listener)
2779 {
2780 if (!listener->address_space) {
2781 return;
2782 }
2783
2784 listener_del_address_space(listener, listener->address_space);
2785 QTAILQ_REMOVE(&memory_listeners, listener, link);
2786 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as);
2787 listener->address_space = NULL;
2788 }
2789
2790 void address_space_remove_listeners(AddressSpace *as)
2791 {
2792 while (!QTAILQ_EMPTY(&as->listeners)) {
2793 memory_listener_unregister(QTAILQ_FIRST(&as->listeners));
2794 }
2795 }
2796
2797 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
2798 {
2799 memory_region_ref(root);
2800 as->root = root;
2801 as->current_map = NULL;
2802 as->ioeventfd_nb = 0;
2803 as->ioeventfds = NULL;
2804 QTAILQ_INIT(&as->listeners);
2805 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
2806 as->name = g_strdup(name ? name : "anonymous");
2807 address_space_update_topology(as);
2808 address_space_update_ioeventfds(as);
2809 }
2810
2811 static void do_address_space_destroy(AddressSpace *as)
2812 {
2813 assert(QTAILQ_EMPTY(&as->listeners));
2814
2815 flatview_unref(as->current_map);
2816 g_free(as->name);
2817 g_free(as->ioeventfds);
2818 memory_region_unref(as->root);
2819 }
2820
2821 void address_space_destroy(AddressSpace *as)
2822 {
2823 MemoryRegion *root = as->root;
2824
2825 /* Flush out anything from MemoryListeners listening in on this */
2826 memory_region_transaction_begin();
2827 as->root = NULL;
2828 memory_region_transaction_commit();
2829 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
2830
2831 /* At this point, as->dispatch and as->current_map are dummy
2832 * entries that the guest should never use. Wait for the old
2833 * values to expire before freeing the data.
2834 */
2835 as->root = root;
2836 call_rcu(as, do_address_space_destroy, rcu);
2837 }
2838
2839 static const char *memory_region_type(MemoryRegion *mr)
2840 {
2841 if (memory_region_is_ram_device(mr)) {
2842 return "ramd";
2843 } else if (memory_region_is_romd(mr)) {
2844 return "romd";
2845 } else if (memory_region_is_rom(mr)) {
2846 return "rom";
2847 } else if (memory_region_is_ram(mr)) {
2848 return "ram";
2849 } else {
2850 return "i/o";
2851 }
2852 }
2853
2854 typedef struct MemoryRegionList MemoryRegionList;
2855
2856 struct MemoryRegionList {
2857 const MemoryRegion *mr;
2858 QTAILQ_ENTRY(MemoryRegionList) mrqueue;
2859 };
2860
2861 typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead;
2862
2863 #define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \
2864 int128_sub((size), int128_one())) : 0)
2865 #define MTREE_INDENT " "
2866
2867 static void mtree_expand_owner(const char *label, Object *obj)
2868 {
2869 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE);
2870
2871 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj");
2872 if (dev && dev->id) {
2873 qemu_printf(" id=%s", dev->id);
2874 } else {
2875 gchar *canonical_path = object_get_canonical_path(obj);
2876 if (canonical_path) {
2877 qemu_printf(" path=%s", canonical_path);
2878 g_free(canonical_path);
2879 } else {
2880 qemu_printf(" type=%s", object_get_typename(obj));
2881 }
2882 }
2883 qemu_printf("}");
2884 }
2885
2886 static void mtree_print_mr_owner(const MemoryRegion *mr)
2887 {
2888 Object *owner = mr->owner;
2889 Object *parent = memory_region_owner((MemoryRegion *)mr);
2890
2891 if (!owner && !parent) {
2892 qemu_printf(" orphan");
2893 return;
2894 }
2895 if (owner) {
2896 mtree_expand_owner("owner", owner);
2897 }
2898 if (parent && parent != owner) {
2899 mtree_expand_owner("parent", parent);
2900 }
2901 }
2902
2903 static void mtree_print_mr(const MemoryRegion *mr, unsigned int level,
2904 hwaddr base,
2905 MemoryRegionListHead *alias_print_queue,
2906 bool owner)
2907 {
2908 MemoryRegionList *new_ml, *ml, *next_ml;
2909 MemoryRegionListHead submr_print_queue;
2910 const MemoryRegion *submr;
2911 unsigned int i;
2912 hwaddr cur_start, cur_end;
2913
2914 if (!mr) {
2915 return;
2916 }
2917
2918 for (i = 0; i < level; i++) {
2919 qemu_printf(MTREE_INDENT);
2920 }
2921
2922 cur_start = base + mr->addr;
2923 cur_end = cur_start + MR_SIZE(mr->size);
2924
2925 /*
2926 * Try to detect overflow of memory region. This should never
2927 * happen normally. When it happens, we dump something to warn the
2928 * user who is observing this.
2929 */
2930 if (cur_start < base || cur_end < cur_start) {
2931 qemu_printf("[DETECTED OVERFLOW!] ");
2932 }
2933
2934 if (mr->alias) {
2935 MemoryRegionList *ml;
2936 bool found = false;
2937
2938 /* check if the alias is already in the queue */
2939 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) {
2940 if (ml->mr == mr->alias) {
2941 found = true;
2942 }
2943 }
2944
2945 if (!found) {
2946 ml = g_new(MemoryRegionList, 1);
2947 ml->mr = mr->alias;
2948 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue);
2949 }
2950 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2951 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx
2952 "-" TARGET_FMT_plx "%s",
2953 cur_start, cur_end,
2954 mr->priority,
2955 mr->nonvolatile ? "nv-" : "",
2956 memory_region_type((MemoryRegion *)mr),
2957 memory_region_name(mr),
2958 memory_region_name(mr->alias),
2959 mr->alias_offset,
2960 mr->alias_offset + MR_SIZE(mr->size),
2961 mr->enabled ? "" : " [disabled]");
2962 if (owner) {
2963 mtree_print_mr_owner(mr);
2964 }
2965 } else {
2966 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx
2967 " (prio %d, %s%s): %s%s",
2968 cur_start, cur_end,
2969 mr->priority,
2970 mr->nonvolatile ? "nv-" : "",
2971 memory_region_type((MemoryRegion *)mr),
2972 memory_region_name(mr),
2973 mr->enabled ? "" : " [disabled]");
2974 if (owner) {
2975 mtree_print_mr_owner(mr);
2976 }
2977 }
2978 qemu_printf("\n");
2979
2980 QTAILQ_INIT(&submr_print_queue);
2981
2982 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
2983 new_ml = g_new(MemoryRegionList, 1);
2984 new_ml->mr = submr;
2985 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
2986 if (new_ml->mr->addr < ml->mr->addr ||
2987 (new_ml->mr->addr == ml->mr->addr &&
2988 new_ml->mr->priority > ml->mr->priority)) {
2989 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue);
2990 new_ml = NULL;
2991 break;
2992 }
2993 }
2994 if (new_ml) {
2995 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue);
2996 }
2997 }
2998
2999 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) {
3000 mtree_print_mr(ml->mr, level + 1, cur_start,
3001 alias_print_queue, owner);
3002 }
3003
3004 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) {
3005 g_free(ml);
3006 }
3007 }
3008
3009 struct FlatViewInfo {
3010 int counter;
3011 bool dispatch_tree;
3012 bool owner;
3013 AccelClass *ac;
3014 const char *ac_name;
3015 };
3016
3017 static void mtree_print_flatview(gpointer key, gpointer value,
3018 gpointer user_data)
3019 {
3020 FlatView *view = key;
3021 GArray *fv_address_spaces = value;
3022 struct FlatViewInfo *fvi = user_data;
3023 FlatRange *range = &view->ranges[0];
3024 MemoryRegion *mr;
3025 int n = view->nr;
3026 int i;
3027 AddressSpace *as;
3028
3029 qemu_printf("FlatView #%d\n", fvi->counter);
3030 ++fvi->counter;
3031
3032 for (i = 0; i < fv_address_spaces->len; ++i) {
3033 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3034 qemu_printf(" AS \"%s\", root: %s",
3035 as->name, memory_region_name(as->root));
3036 if (as->root->alias) {
3037 qemu_printf(", alias %s", memory_region_name(as->root->alias));
3038 }
3039 qemu_printf("\n");
3040 }
3041
3042 qemu_printf(" Root memory region: %s\n",
3043 view->root ? memory_region_name(view->root) : "(none)");
3044
3045 if (n <= 0) {
3046 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n");
3047 return;
3048 }
3049
3050 while (n--) {
3051 mr = range->mr;
3052 if (range->offset_in_region) {
3053 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3054 " (prio %d, %s%s): %s @" TARGET_FMT_plx,
3055 int128_get64(range->addr.start),
3056 int128_get64(range->addr.start)
3057 + MR_SIZE(range->addr.size),
3058 mr->priority,
3059 range->nonvolatile ? "nv-" : "",
3060 range->readonly ? "rom" : memory_region_type(mr),
3061 memory_region_name(mr),
3062 range->offset_in_region);
3063 } else {
3064 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx
3065 " (prio %d, %s%s): %s",
3066 int128_get64(range->addr.start),
3067 int128_get64(range->addr.start)
3068 + MR_SIZE(range->addr.size),
3069 mr->priority,
3070 range->nonvolatile ? "nv-" : "",
3071 range->readonly ? "rom" : memory_region_type(mr),
3072 memory_region_name(mr));
3073 }
3074 if (fvi->owner) {
3075 mtree_print_mr_owner(mr);
3076 }
3077
3078 if (fvi->ac) {
3079 for (i = 0; i < fv_address_spaces->len; ++i) {
3080 as = g_array_index(fv_address_spaces, AddressSpace*, i);
3081 if (fvi->ac->has_memory(current_machine, as,
3082 int128_get64(range->addr.start),
3083 MR_SIZE(range->addr.size) + 1)) {
3084 qemu_printf(" %s", fvi->ac_name);
3085 }
3086 }
3087 }
3088 qemu_printf("\n");
3089 range++;
3090 }
3091
3092 #if !defined(CONFIG_USER_ONLY)
3093 if (fvi->dispatch_tree && view->root) {
3094 mtree_print_dispatch(view->dispatch, view->root);
3095 }
3096 #endif
3097
3098 qemu_printf("\n");
3099 }
3100
3101 static gboolean mtree_info_flatview_free(gpointer key, gpointer value,
3102 gpointer user_data)
3103 {
3104 FlatView *view = key;
3105 GArray *fv_address_spaces = value;
3106
3107 g_array_unref(fv_address_spaces);
3108 flatview_unref(view);
3109
3110 return true;
3111 }
3112
3113 void mtree_info(bool flatview, bool dispatch_tree, bool owner)
3114 {
3115 MemoryRegionListHead ml_head;
3116 MemoryRegionList *ml, *ml2;
3117 AddressSpace *as;
3118
3119 if (flatview) {
3120 FlatView *view;
3121 struct FlatViewInfo fvi = {
3122 .counter = 0,
3123 .dispatch_tree = dispatch_tree,
3124 .owner = owner,
3125 };
3126 GArray *fv_address_spaces;
3127 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal);
3128 AccelClass *ac = ACCEL_GET_CLASS(current_machine->accelerator);
3129
3130 if (ac->has_memory) {
3131 fvi.ac = ac;
3132 fvi.ac_name = current_machine->accel ? current_machine->accel :
3133 object_class_get_name(OBJECT_CLASS(ac));
3134 }
3135
3136 /* Gather all FVs in one table */
3137 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3138 view = address_space_get_flatview(as);
3139
3140 fv_address_spaces = g_hash_table_lookup(views, view);
3141 if (!fv_address_spaces) {
3142 fv_address_spaces = g_array_new(false, false, sizeof(as));
3143 g_hash_table_insert(views, view, fv_address_spaces);
3144 }
3145
3146 g_array_append_val(fv_address_spaces, as);
3147 }
3148
3149 /* Print */
3150 g_hash_table_foreach(views, mtree_print_flatview, &fvi);
3151
3152 /* Free */
3153 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0);
3154 g_hash_table_unref(views);
3155
3156 return;
3157 }
3158
3159 QTAILQ_INIT(&ml_head);
3160
3161 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
3162 qemu_printf("address-space: %s\n", as->name);
3163 mtree_print_mr(as->root, 1, 0, &ml_head, owner);
3164 qemu_printf("\n");
3165 }
3166
3167 /* print aliased regions */
3168 QTAILQ_FOREACH(ml, &ml_head, mrqueue) {
3169 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr));
3170 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner);
3171 qemu_printf("\n");
3172 }
3173
3174 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) {
3175 g_free(ml);
3176 }
3177 }
3178
3179 void memory_region_init_ram(MemoryRegion *mr,
3180 struct Object *owner,
3181 const char *name,
3182 uint64_t size,
3183 Error **errp)
3184 {
3185 DeviceState *owner_dev;
3186 Error *err = NULL;
3187
3188 memory_region_init_ram_nomigrate(mr, owner, name, size, &err);
3189 if (err) {
3190 error_propagate(errp, err);
3191 return;
3192 }
3193 /* This will assert if owner is neither NULL nor a DeviceState.
3194 * We only want the owner here for the purposes of defining a
3195 * unique name for migration. TODO: Ideally we should implement
3196 * a naming scheme for Objects which are not DeviceStates, in
3197 * which case we can relax this restriction.
3198 */
3199 owner_dev = DEVICE(owner);
3200 vmstate_register_ram(mr, owner_dev);
3201 }
3202
3203 void memory_region_init_rom(MemoryRegion *mr,
3204 struct Object *owner,
3205 const char *name,
3206 uint64_t size,
3207 Error **errp)
3208 {
3209 DeviceState *owner_dev;
3210 Error *err = NULL;
3211
3212 memory_region_init_rom_nomigrate(mr, owner, name, size, &err);
3213 if (err) {
3214 error_propagate(errp, err);
3215 return;
3216 }
3217 /* This will assert if owner is neither NULL nor a DeviceState.
3218 * We only want the owner here for the purposes of defining a
3219 * unique name for migration. TODO: Ideally we should implement
3220 * a naming scheme for Objects which are not DeviceStates, in
3221 * which case we can relax this restriction.
3222 */
3223 owner_dev = DEVICE(owner);
3224 vmstate_register_ram(mr, owner_dev);
3225 }
3226
3227 void memory_region_init_rom_device(MemoryRegion *mr,
3228 struct Object *owner,
3229 const MemoryRegionOps *ops,
3230 void *opaque,
3231 const char *name,
3232 uint64_t size,
3233 Error **errp)
3234 {
3235 DeviceState *owner_dev;
3236 Error *err = NULL;
3237
3238 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque,
3239 name, size, &err);
3240 if (err) {
3241 error_propagate(errp, err);
3242 return;
3243 }
3244 /* This will assert if owner is neither NULL nor a DeviceState.
3245 * We only want the owner here for the purposes of defining a
3246 * unique name for migration. TODO: Ideally we should implement
3247 * a naming scheme for Objects which are not DeviceStates, in
3248 * which case we can relax this restriction.
3249 */
3250 owner_dev = DEVICE(owner);
3251 vmstate_register_ram(mr, owner_dev);
3252 }
3253
3254 static const TypeInfo memory_region_info = {
3255 .parent = TYPE_OBJECT,
3256 .name = TYPE_MEMORY_REGION,
3257 .class_size = sizeof(MemoryRegionClass),
3258 .instance_size = sizeof(MemoryRegion),
3259 .instance_init = memory_region_initfn,
3260 .instance_finalize = memory_region_finalize,
3261 };
3262
3263 static const TypeInfo iommu_memory_region_info = {
3264 .parent = TYPE_MEMORY_REGION,
3265 .name = TYPE_IOMMU_MEMORY_REGION,
3266 .class_size = sizeof(IOMMUMemoryRegionClass),
3267 .instance_size = sizeof(IOMMUMemoryRegion),
3268 .instance_init = iommu_memory_region_initfn,
3269 .abstract = true,
3270 };
3271
3272 static void memory_register_types(void)
3273 {
3274 type_register_static(&memory_region_info);
3275 type_register_static(&iommu_memory_region_info);
3276 }
3277
3278 type_init(memory_register_types)
3279
3280 MemOp devend_memop(enum device_endian end)
3281 {
3282 static MemOp conv[] = {
3283 [DEVICE_LITTLE_ENDIAN] = MO_LE,
3284 [DEVICE_BIG_ENDIAN] = MO_BE,
3285 [DEVICE_NATIVE_ENDIAN] = MO_TE,
3286 [DEVICE_HOST_ENDIAN] = 0,
3287 };
3288 switch (end) {
3289 case DEVICE_LITTLE_ENDIAN:
3290 case DEVICE_BIG_ENDIAN:
3291 case DEVICE_NATIVE_ENDIAN:
3292 return conv[end];
3293 default:
3294 g_assert_not_reached();
3295 }
3296 }