if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN ||
vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("another migration job is already running for domain '%s'"),
+ _("another migration job is already running for domain '%1$s'"),
vm->def->name);
return false;
}
if (phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
phase < vm->job->phase) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("migration protocol going backwards %s => %s"),
+ _("migration protocol going backwards %1$s => %2$s"),
qemuMigrationJobPhaseTypeToString(vm->job->phase),
qemuMigrationJobPhaseTypeToString(phase));
return -1;
const char *msg;
if (job == VIR_ASYNC_JOB_MIGRATION_IN)
- msg = _("domain '%s' is not processing incoming migration");
+ msg = _("domain '%1$s' is not processing incoming migration");
else
- msg = _("domain '%s' is not being migrated");
+ msg = _("domain '%1$s' is not being migrated");
virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
return false;
/* Hm, we already know we are in error here. We don't want to
* overwrite the previous error, though, so we just throw something
* to the logs and hope for the best */
- VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
+ VIR_ERROR(_("Failed to resume guest %1$s after failure"), vm->def->name);
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
virObjectEvent *event;
if (!(volName = strrchr(basePath, '/'))) {
virReportError(VIR_ERR_INVALID_ARG,
- _("malformed disk path: %s"),
+ _("malformed disk path: %1$s"),
disk->src->path);
goto cleanup;
}
case VIR_STORAGE_TYPE_NONE:
case VIR_STORAGE_TYPE_LAST:
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("cannot precreate storage for disk type '%s'"),
+ _("cannot precreate storage for disk type '%1$s'"),
virStorageTypeToString(disk->src->type));
goto cleanup;
}
if (!(disk = virDomainDiskByTarget(vm->def, nbd->disks[i].target))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("unable to find disk by target: %s"),
+ _("unable to find disk by target: %1$s"),
nbd->disks[i].target);
goto cleanup;
}
return -1;
if (!uri->scheme) {
- virReportError(VIR_ERR_INVALID_ARG, _("No URI scheme specified: %s"), nbdURI);
+ virReportError(VIR_ERR_INVALID_ARG, _("No URI scheme specified: %1$s"), nbdURI);
return -1;
}
* we should rather error out instead of auto-allocating a port
* as that would be the exact opposite of what was requested. */
virReportError(VIR_ERR_INVALID_ARG,
- _("URI with tcp scheme did not provide a server part: %s"),
+ _("URI with tcp scheme did not provide a server part: %1$s"),
nbdURI);
return -1;
}
server.socket = (char *)uri->path;
} else {
virReportError(VIR_ERR_INVALID_ARG,
- _("Unsupported scheme in disks URI: %s"),
+ _("Unsupported scheme in disks URI: %1$s"),
uri->scheme);
return -1;
}
if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
- _("Cannot migrate empty or read-only disk %s"),
+ _("Cannot migrate empty or read-only disk %1$s"),
disk->dst);
goto cleanup;
}
{
if (job->errmsg) {
virReportError(VIR_ERR_OPERATION_FAILED,
- _("migration of disk %s failed: %s"),
+ _("migration of disk %1$s failed: %2$s"),
diskdst, job->errmsg);
} else {
virReportError(VIR_ERR_OPERATION_FAILED,
- _("migration of disk %s failed"), diskdst);
+ _("migration of disk %1$s failed"), diskdst);
}
}
if (!(job = qemuBlockJobDiskGetJob(disk))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("missing block job data for disk '%s'"), disk->dst);
+ _("missing block job data for disk '%1$s'"), disk->dst);
return -1;
}
if (mirror_speed > LLONG_MAX >> 20) {
virReportError(VIR_ERR_OVERFLOW,
- _("bandwidth must be less than %llu"),
+ _("bandwidth must be less than %1$llu"),
LLONG_MAX >> 20);
return -1;
}
return -1;
} else {
virReportError(VIR_ERR_INVALID_ARG,
- _("Unsupported scheme in disks URI: %s"),
+ _("Unsupported scheme in disks URI: %1$s"),
uri->scheme);
return -1;
}
if (vm->job->abortJob) {
vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
- virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
+ virReportError(VIR_ERR_OPERATION_ABORTED, _("%1$s: %2$s"),
virDomainAsyncJobTypeToString(vm->job->asyncJob),
_("canceled by client"));
return -1;
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
- _("cannot migrate a domain with <hostdev mode='subsystem' type='%s'>"),
+ _("cannot migrate a domain with <hostdev mode='subsystem' type='%1$s'>"),
virDomainHostdevSubsysTypeToString(hostdev->source.subsys.type));
return false;
virDomainNetType actualType = virDomainNetGetActualType(hostdev->parentnet);
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
- _("cannot migrate a domain with <interface type='%s'>"),
+ _("cannot migrate a domain with <interface type='%1$s'>"),
virDomainNetTypeToString(actualType));
} else {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
- _("cannot migrate a domain with <hostdev mode='subsystem' type='%s'>"),
+ _("cannot migrate a domain with <hostdev mode='subsystem' type='%1$s'>"),
virDomainHostdevSubsysTypeToString(hostdev->source.subsys.type));
}
return false;
if (nsnapshots > 0) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("cannot migrate domain with %d snapshots"),
+ _("cannot migrate domain with %1$d snapshots"),
nsnapshots);
return false;
}
if (blockers && blockers[0]) {
g_autofree char *reasons = g_strjoinv("; ", blockers);
virReportError(VIR_ERR_OPERATION_INVALID,
- _("cannot migrate domain: %s"), reasons);
+ _("cannot migrate domain: %1$s"), reasons);
return false;
}
} else {
}
if (shmem->role != VIR_DOMAIN_SHMEM_ROLE_MASTER) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("shmem device '%s' cannot be migrated, "
- "only shmem with role='%s' can be migrated"),
+ _("shmem device '%1$s' cannot be migrated, only shmem with role='%2$s' can be migrated"),
shmem->name,
virDomainShmemRoleTypeToString(VIR_DOMAIN_SHMEM_ROLE_MASTER));
return false;
switch (jobData->status) {
case VIR_DOMAIN_JOB_STATUS_NONE:
virReportError(VIR_ERR_OPERATION_FAILED,
- _("job '%s' is not active"),
+ _("job '%1$s' is not active"),
qemuMigrationJobName(vm));
return -1;
case VIR_DOMAIN_JOB_STATUS_FAILED:
if (error) {
virReportError(VIR_ERR_OPERATION_FAILED,
- _("job '%s' failed: %s"),
+ _("job '%1$s' failed: %2$s"),
qemuMigrationJobName(vm), error);
} else {
virReportError(VIR_ERR_OPERATION_FAILED,
- _("job '%s' unexpectedly failed"),
+ _("job '%1$s' unexpectedly failed"),
qemuMigrationJobName(vm));
}
return -1;
case VIR_DOMAIN_JOB_STATUS_CANCELED:
virReportError(VIR_ERR_OPERATION_ABORTED,
- _("job '%s' canceled by client"),
+ _("job '%1$s' canceled by client"),
qemuMigrationJobName(vm));
return -1;
case VIR_DOMAIN_JOB_STATUS_POSTCOPY_PAUSED:
virReportError(VIR_ERR_OPERATION_FAILED,
- _("job '%s' failed in post-copy phase"),
+ _("job '%1$s' failed in post-copy phase"),
qemuMigrationJobName(vm));
return -1;
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
virReportError(VIR_ERR_OPERATION_FAILED,
- _("job '%s' failed due to I/O error"),
+ _("job '%1$s' failed due to I/O error"),
qemuMigrationJobName(vm));
goto error;
}
if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
virReportError(VIR_ERR_INVALID_ARG,
- _("unknown graphics type %s"), uri->scheme);
+ _("unknown graphics type %1$s"), uri->scheme);
return -1;
}
if (STRCASEEQ(param->name, "tlsPort")) {
if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
virReportError(VIR_ERR_INVALID_ARG,
- _("invalid tlsPort number: %s"),
+ _("invalid tlsPort number: %1$s"),
param->value);
return -1;
}
if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
netptr->ifname) != 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("Unable to run command to set OVS port data for "
- "interface %s"), netptr->ifname);
+ _("Unable to run command to set OVS port data for interface %1$s"),
+ netptr->ifname);
return -1;
}
break;
if (j == vm->def->ndisks) {
virReportError(VIR_ERR_INVALID_ARG,
- _("disk target %s not found"),
+ _("disk target %1$s not found"),
migrate_disks[i]);
return NULL;
}
if (vm->job->asyncOwner != 0 &&
vm->job->asyncOwner != virThreadSelfID()) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("migration of domain %s is being actively monitored by another thread"),
+ _("migration of domain %1$s is being actively monitored by another thread"),
vm->def->name);
return false;
}
if (!virDomainObjIsPostcopy(vm, vm->job)) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("migration of domain %s is not in post-copy phase"),
+ _("migration of domain %1$s is not in post-copy phase"),
vm->def->name);
return false;
}
if (vm->job->phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
!virDomainObjIsFailedPostcopy(vm, vm->job)) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("post-copy migration of domain %s has not failed"),
+ _("post-copy migration of domain %1$s has not failed"),
vm->def->name);
return false;
}
if (vm->job->phase > expectedPhase) {
virReportError(VIR_ERR_OPERATION_INVALID,
- _("resuming failed post-copy migration of domain %s already in progress"),
+ _("resuming failed post-copy migration of domain %1$s already in progress"),
vm->def->name);
return false;
}
if (!(nodedata = virHashLookup(blockNamedNodeData, disk->nodename))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("failed to find data for block node '%s'"),
+ _("failed to find data for block node '%1$s'"),
disk->nodename);
return -1;
}
vm = virDomainObjListFindByName(driver->domains, def->name);
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
- _("no domain with matching name '%s'"), def->name);
+ _("no domain with matching name '%1$s'"), def->name);
qemuMigrationDstErrorReport(driver, def->name);
return -1;
}
if (uri->scheme == NULL) {
virReportError(VIR_ERR_INVALID_ARG,
- _("missing scheme in migration URI: %s"),
+ _("missing scheme in migration URI: %1$s"),
uri_in);
goto cleanup;
}
STRNEQ(uri->scheme, "rdma") &&
STRNEQ(uri->scheme, "unix")) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
- _("unsupported scheme %s in migration URI %s"),
+ _("unsupported scheme %1$s in migration URI %2$s"),
uri->scheme, uri_in);
goto cleanup;
}
listenAddress = uri->path;
} else {
if (uri->server == NULL) {
- virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
- " URI: %s"), uri_in);
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("missing host in migration URI: %1$s"),
+ uri_in);
goto cleanup;
}
/* Migration expects a blocking FD */
if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
- virReportSystemError(errno, _("Unable to set FD %d blocking"),
+ virReportSystemError(errno, _("Unable to set FD %1$d blocking"),
spec->dest.fd.qemu);
goto cleanup;
}
}
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("unexpected migration schema: %d"), spec->destType);
+ _("unexpected migration schema: %1$d"), spec->destType);
return -1;
}
if (virLockManagerPluginUsesState(driver->lockManager) &&
!cookieout) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("Migration with lock driver %s requires"
- " cookie support"),
+ _("Migration with lock driver %1$s requires cookie support"),
virLockManagerPluginGetName(driver->lockManager));
return -1;
}
* as this is a critical section so we are guaranteed
* vm->job->abortJob will not change */
vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
- virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
+ virReportError(VIR_ERR_OPERATION_ABORTED, _("%1$s: %2$s"),
virDomainAsyncJobTypeToString(vm->job->asyncJob),
_("canceled by client"));
goto exit_monitor;
if (uribits->scheme == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR,
- _("missing scheme in migration URI: %s"),
+ _("missing scheme in migration URI: %1$s"),
uri);
return -1;
}
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
false) < 0) {
virReportError(VIR_ERR_OPERATION_FAILED,
- _("Port profile Associate failed for %s"),
+ _("Port profile Associate failed for %1$s"),
net->ifname);
goto err_exit;
}