]> git.ipfire.org Git - thirdparty/qemu.git/commitdiff
migration: mapped-ram: handle zero pages
authorMarco Cavenati <Marco.Cavenati@eurecom.fr>
Fri, 10 Oct 2025 11:59:54 +0000 (13:59 +0200)
committerPeter Xu <peterx@redhat.com>
Mon, 3 Nov 2025 21:04:09 +0000 (16:04 -0500)
Make mapped-ram compatible with loadvm snapshot restoring by explicitly
zeroing memory pages in this case.
Skip zeroing for -incoming and -loadvm migrations to preserve performance.

Signed-off-by: Marco Cavenati <Marco.Cavenati@eurecom.fr>
Link: https://lore.kernel.org/r/20251010115954.1995298-3-Marco.Cavenati@eurecom.fr
Signed-off-by: Peter Xu <peterx@redhat.com>
migration/options.c
migration/ram.c

index d9227809d7d457181c0a733ce27a110e4c867b2d..e78324b80c139de830f034f4de31b9357badd51d 100644 (file)
@@ -449,7 +449,6 @@ INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
 static const
 INITIALIZE_MIGRATE_CAPS_SET(check_caps_savevm,
                             MIGRATION_CAPABILITY_MULTIFD,
-                            MIGRATION_CAPABILITY_MAPPED_RAM,
 );
 
 static bool migrate_incoming_started(void)
index 13847481933e3b4b9b8bc4ab1ff4194454fe613b..29f016cb25848acb5aa947e8913673869cb40769 100644 (file)
@@ -4038,12 +4038,58 @@ static size_t ram_load_multifd_pages(void *host_addr, size_t size,
     return size;
 }
 
+/**
+ * handle_zero_mapped_ram: Zero out a range of RAM pages if required during
+ * mapped-ram load
+ *
+ * Zeroing is only performed when restoring from a snapshot (HMP loadvm).
+ * During incoming migration or -loadvm cli snapshot load, the function is a
+ * no-op and returns true as in those cases the pages are already guaranteed to
+ * be zeroed.
+ *
+ * Returns: true on success, false on error (with @errp set).
+ * @from_bit_idx: Starting index relative to the map of the page (inclusive)
+ * @to_bit_idx:   Ending index relative to the map of the page (exclusive)
+ */
+static bool handle_zero_mapped_ram(RAMBlock *block, unsigned long from_bit_idx,
+                                   unsigned long to_bit_idx, Error **errp)
+{
+    ERRP_GUARD();
+    ram_addr_t offset;
+    size_t size;
+    void *host;
+
+    /*
+     * Zeroing is not needed for either -loadvm (RUN_STATE_PRELAUNCH), or
+     * -incoming (RUN_STATE_INMIGRATE).
+     */
+    if (!runstate_check(RUN_STATE_RESTORE_VM)) {
+        return true;
+    }
+
+    if (from_bit_idx >= to_bit_idx) {
+        return true;
+    }
+
+    size = TARGET_PAGE_SIZE * (to_bit_idx - from_bit_idx);
+    offset = from_bit_idx << TARGET_PAGE_BITS;
+    host = host_from_ram_block_offset(block, offset);
+    if (!host) {
+        error_setg(errp, "zero page outside of ramblock %s range",
+                   block->idstr);
+        return false;
+    }
+    ram_handle_zero(host, size);
+
+    return true;
+}
+
 static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
                                      long num_pages, unsigned long *bitmap,
                                      Error **errp)
 {
     ERRP_GUARD();
-    unsigned long set_bit_idx, clear_bit_idx;
+    unsigned long set_bit_idx, clear_bit_idx = 0;
     ram_addr_t offset;
     void *host;
     size_t read, unread, size;
@@ -4052,6 +4098,12 @@ static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
          set_bit_idx < num_pages;
          set_bit_idx = find_next_bit(bitmap, num_pages, clear_bit_idx + 1)) {
 
+        /* Zero pages */
+        if (!handle_zero_mapped_ram(block, clear_bit_idx, set_bit_idx, errp)) {
+            return false;
+        }
+
+        /* Non-zero pages */
         clear_bit_idx = find_next_zero_bit(bitmap, num_pages, set_bit_idx + 1);
 
         unread = TARGET_PAGE_SIZE * (clear_bit_idx - set_bit_idx);
@@ -4083,6 +4135,11 @@ static bool read_ramblock_mapped_ram(QEMUFile *f, RAMBlock *block,
         }
     }
 
+    /* Handle trailing 0 pages */
+    if (!handle_zero_mapped_ram(block, clear_bit_idx, num_pages, errp)) {
+        return false;
+    }
+
     return true;
 
 err: