__func__, block->idstr);
return NULL;
}
+
+ /*
+ * During colo checkpoint, we need bitmap of these migrated pages.
+ * It help us to decide which pages in ram cache should be flushed
+ * into VM's RAM later.
+ */
+ if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
+ ram_state->migration_dirty_pages++;
+ }
return block->colo_cache + offset;
}
RAMBlock *block;
rcu_read_lock();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
block->colo_cache = qemu_anon_ram_alloc(block->used_length,
NULL,
false);
memcpy(block->colo_cache, block->host, block->used_length);
}
rcu_read_unlock();
+ /*
+ * Record the dirty pages that sent by PVM, we use this dirty bitmap together
+ * with to decide which page in cache should be flushed into SVM's RAM. Here
+ * we use the same name 'ram_bitmap' as for migration.
+ */
+ if (ram_bytes_total()) {
+ RAMBlock *block;
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
+
+ block->bmap = bitmap_new(pages);
+ bitmap_set(block->bmap, 0, pages);
+ }
+ }
+ ram_state = g_new0(RAMState, 1);
+ ram_state->migration_dirty_pages = 0;
+
return 0;
out_locked:
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
{
RAMBlock *block;
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ g_free(block->bmap);
+ block->bmap = NULL;
+ }
+
rcu_read_lock();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
}
}
+
rcu_read_unlock();
+ g_free(ram_state);
+ ram_state = NULL;
}
/**