"destroy",
"suspend",
"modify",
+ "migration operation",
"none", /* async job is never stored in job.active */
"async nested",
);
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
+ QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
/* The following two items must always be the last items before JOB_LAST */
QEMU_JOB_ASYNC, /* Asynchronous job */
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live migration offline */
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change */
- QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
};
struct qemuDomainJobSignalsData {
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
- unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
};
struct qemuDomainJobObj {
qemuDriverLock(driver);
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+ qemuDriverUnlock(driver);
if (!vm) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(dom->uuid, uuidstr);
qemuReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching uuid '%s'"), uuidstr);
- goto cleanup;
+ return -1;
}
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
+ goto cleanup;
+
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto cleanup;
+ goto endjob;
}
priv = vm->privateData;
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not being migrated"));
- goto cleanup;
+ goto endjob;
}
- VIR_DEBUG("Requesting migration speed change to %luMbs", bandwidth);
- priv->job.signalsData.migrateBandwidth = bandwidth;
- priv->job.signals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
- ret = 0;
+ VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
+ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
+ ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
+ qemuDomainObjExitMonitor(driver, vm);
+
+endjob:
+ if (qemuDomainObjEndJob(driver, vm) == 0)
+ vm = NULL;
cleanup:
if (vm)
virDomainObjUnlock(vm);
- qemuDriverUnlock(driver);
return ret;
}
}
if (ret < 0)
VIR_WARN("Unable to set migration downtime");
- } else if (priv->job.signals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
- unsigned long bandwidth = priv->job.signalsData.migrateBandwidth;
-
- priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
- priv->job.signalsData.migrateBandwidth = 0;
- VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
- ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
- if (ret == 0) {
- ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
- qemuDomainObjExitMonitorWithDriver(driver, vm);
- }
- if (ret < 0)
- VIR_WARN("Unable to set migration speed");
} else {
ret = 0;
}
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
return -1;
- if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
- else
- qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK);
+ } else {
+ qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
+ JOB_MASK(QEMU_JOB_MIGRATION_OP));
+ }
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
*/
break;
+ case QEMU_JOB_MIGRATION_OP:
case QEMU_JOB_ASYNC:
case QEMU_JOB_ASYNC_NESTED:
/* async job was already handled above */