]> git.ipfire.org Git - thirdparty/libvirt.git/commitdiff
qemu: Restore async job start timestamp on reconnect
authorJiri Denemark <jdenemar@redhat.com>
Fri, 13 May 2022 12:15:00 +0000 (14:15 +0200)
committerJiri Denemark <jdenemar@redhat.com>
Tue, 7 Jun 2022 15:40:20 +0000 (17:40 +0200)
Jobs that are supposed to remain active even when libvirt daemon
restarts were reported as started at the time the daemon was restarted.
This is not very helpful, we should restore the original timestamp.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
src/qemu/qemu_domainjob.c
src/qemu/qemu_domainjob.h
src/qemu/qemu_process.c
tests/qemustatusxml2xmldata/migration-in-params-in.xml
tests/qemustatusxml2xmldata/migration-out-nbd-bitmaps-in.xml
tests/qemustatusxml2xmldata/migration-out-nbd-out.xml
tests/qemustatusxml2xmldata/migration-out-nbd-tls-out.xml
tests/qemustatusxml2xmldata/migration-out-params-in.xml

index f85429a7cec918090936b1bd601ca61fa4786895..e76eb7f2cf52524e58b1e61d5282314c2b62a8ae 100644 (file)
@@ -235,6 +235,7 @@ qemuDomainObjPreserveJob(virDomainObj *obj,
     job->owner = priv->job.owner;
     job->asyncJob = priv->job.asyncJob;
     job->asyncOwner = priv->job.asyncOwner;
+    job->asyncStarted = priv->job.asyncStarted;
     job->phase = priv->job.phase;
     job->privateData = g_steal_pointer(&priv->job.privateData);
     job->apiFlags = priv->job.apiFlags;
@@ -254,6 +255,7 @@ void
 qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
                              virDomainAsyncJob asyncJob,
                              int phase,
+                             unsigned long long started,
                              virDomainJobOperation operation,
                              qemuDomainJobStatsType statsType,
                              virDomainJobStatus status,
@@ -261,18 +263,18 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
 {
     qemuDomainObjPrivate *priv = vm->privateData;
     qemuDomainJobObj *job = &priv->job;
-    unsigned long long now;
 
     VIR_DEBUG("Restoring %s async job for domain %s",
               virDomainAsyncJobTypeToString(asyncJob), vm->def->name);
 
-    ignore_value(virTimeMillisNow(&now));
+    if (started == 0)
+        ignore_value(virTimeMillisNow(&started));
 
     job->jobsQueued++;
     job->asyncJob = asyncJob;
     job->phase = phase;
     job->asyncOwnerAPI = g_strdup(virThreadJobGet());
-    job->asyncStarted = now;
+    job->asyncStarted = started;
 
     qemuDomainObjSetAsyncJobMask(vm, allowedJobs);
 
@@ -280,7 +282,7 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
     qemuDomainJobSetStatsType(priv->job.current, statsType);
     job->current->operation = operation;
     job->current->status = status;
-    job->current->started = now;
+    job->current->started = started;
 }
 
 
@@ -1250,8 +1252,10 @@ qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
                                                           priv->job.phase));
     }
 
-    if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE)
+    if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE) {
         virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
+        virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", priv->job.asyncStarted);
+    }
 
     if (priv->job.cb &&
         priv->job.cb->formatJob(&childBuf, &priv->job, vm) < 0)
@@ -1307,6 +1311,13 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
             }
             VIR_FREE(tmp);
         }
+
+        if (virXPathULongLong("string(@asyncStarted)", ctxt,
+                              &priv->job.asyncStarted) == -2) {
+            virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+                           _("Invalid async job start"));
+            return -1;
+        }
     }
 
     if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) {
index 069bb9f8cbdde9929a531e9e18b7767e7394c781..707d4e91eda0485448b315d6d400340a2db08047 100644 (file)
@@ -164,6 +164,7 @@ void
 qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
                              virDomainAsyncJob asyncJob,
                              int phase,
+                             unsigned long long started,
                              virDomainJobOperation operation,
                              qemuDomainJobStatsType statsType,
                              virDomainJobStatus status,
index 081b0496724f90cd3a7dc6c055d30f6e7d8bb46b..a87dc9a1fb1680dc37e552fdf1d16ffc66573750 100644 (file)
@@ -3402,7 +3402,8 @@ qemuProcessRestoreMigrationJob(virDomainObj *vm,
         allowedJobs = VIR_JOB_DEFAULT_MASK | JOB_MASK(VIR_JOB_MIGRATION_OP);
     }
 
-    qemuDomainObjRestoreAsyncJob(vm, job->asyncJob, job->phase, op,
+    qemuDomainObjRestoreAsyncJob(vm, job->asyncJob, job->phase,
+                                 job->asyncStarted, op,
                                  QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
                                  VIR_DOMAIN_JOB_STATUS_PAUSED,
                                  allowedJobs);
@@ -3675,6 +3676,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
     case VIR_ASYNC_JOB_BACKUP:
         /* Restore the config of the async job which is not persisted */
         qemuDomainObjRestoreAsyncJob(vm, VIR_ASYNC_JOB_BACKUP, 0,
+                                     job->asyncStarted,
                                      VIR_DOMAIN_JOB_OPERATION_BACKUP,
                                      QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP,
                                      VIR_DOMAIN_JOB_STATUS_ACTIVE,
index 8b0878c82e2812a800f8bf8e6d12e036c40673cb..1086f5230d7096f2d51dacdcca42f12fabdef966 100644 (file)
     <flag name='dump-completed'/>
     <flag name='hda-output'/>
   </qemuCaps>
-  <job type='none' async='migration in' phase='prepare' flags='0x900'>
+  <job type='none' async='migration in' phase='prepare' flags='0x900' asyncStarted='0'>
     <migParams>
       <param name='compress-level' value='1'/>
       <param name='compress-threads' value='8'/>
index 7d55db0996af0d16fc75898a624a64e72f1083df..4ee44ffbd480a652cc83be7955aa348da0f7a806 100644 (file)
     <flag name='cpu-max'/>
     <flag name='migration-param.block-bitmap-mapping'/>
   </qemuCaps>
-  <job type='none' async='migration out' phase='perform3' flags='0x42'>
+  <job type='none' async='migration out' phase='perform3' flags='0x42' asyncStarted='0'>
     <disk dev='hda' migrating='no'/>
     <disk dev='vda' migrating='yes'>
       <migrationSource type='network' format='raw'>
index 1a918c0b5ae5bd1d47f25d97b1c352a3d742fd1c..540e385de335e352bd1ac735e74b4cde9d55758b 100644 (file)
     <flag name='dump-completed'/>
     <flag name='hda-output'/>
   </qemuCaps>
-  <job type='none' async='migration out' phase='perform3' flags='0x0'>
+  <job type='none' async='migration out' phase='perform3' flags='0x0' asyncStarted='0'>
     <disk dev='vdb' migrating='yes'/>
     <disk dev='hda' migrating='no'/>
   </job>
index 87c67f83000933672e302866b16cf4ad1e3bc06d..d0e997913f4a41a0d07259f78394d0aae8cab2af 100644 (file)
     <flag name='nbd-tls'/>
     <flag name='blockdev-del'/>
   </qemuCaps>
-  <job type='none' async='migration out' phase='perform3' flags='0x0'>
+  <job type='none' async='migration out' phase='perform3' flags='0x0' asyncStarted='0'>
     <disk dev='vdb' migrating='yes'>
       <migrationSource type='network' format='raw'>
         <source protocol='nbd' name='drive-virtio-disk1' tlsFromConfig='0'>
index 73ac09fb920929e4146b4a038d42bfb7b8e075b7..758a6f03b7a491858797a84808de2ea1252f0f21 100644 (file)
     <flag name='dump-completed'/>
     <flag name='hda-output'/>
   </qemuCaps>
-  <job type='none' async='migration out' phase='perform3' flags='0x802'>
+  <job type='none' async='migration out' phase='perform3' flags='0x802' asyncStarted='0'>
     <disk dev='vda' migrating='no'/>
     <migParams>
       <param name='compress-level' value='1'/>