numbers. In the latter case the management application may wish
to choose a specific port number outside the default range in order
to comply with local firewall policies.</li>
+ <li>The second URI uses UNIX transport method. In this advanced case
+ libvirt should not guess a *migrateuri* and it should be specified using
+ UNIX socket path URI: <code>unix:///path/to/socket</code>.</li>
</ol>
<h2><a id="config">Configuration file handling</a></h2>
Supported by QEMU driver
</p>
+
+ <h3><a id="scenariounixsocket">Migration using only UNIX sockets</a></h3>
+
+ <p>
+ In niche scenarios where libvirt daemon does not have access to the
+ network (e.g. running in a restricted container on a host that has
+ accessible network), when a management application wants to have complete
+ control over the transfer or when migrating between two containers on the
+ same host all the communication can be done using UNIX sockets. This
+ includes connecting to non-standard socket path for the destination
+ daemon, using UNIX sockets for hypervisor's communication or for the NBD
+ data transfer. All of that can be used with both peer2peer and direct
+ migration options.
+ </p>
+
+ <p>
+ Example using <code>/tmp/migdir</code> as a directory representing the
+ same path visible from both libvirt daemons. That can be achieved by
+ bind-mounting the same directory to different containers running separate
+ daemons or forwarding connections to these sockets manually
+ (using <code>socat</code>, <code>netcat</code> or a custom piece of
+ software):
+ <pre>
+virsh migrate web1 [--p2p] --copy-storage-all 'qemu+unix:///system?socket=/tmp/migdir/test-sock-driver' 'unix:///tmp/migdir/test-sock-qemu' --disks-uri unix:///tmp/migdir/test-sock-nbd
+ </pre>
+
+ <p>
+ Supported by QEMU driver
+ </p>
+
</body>
</html>
const char *dom_xml = NULL;
const char *dname = NULL;
const char *uri_in = NULL;
- const char *listenAddress = cfg->migrationAddress;
+ const char *listenAddress = NULL;
int nbdPort = 0;
int nmigrate_disks;
g_autofree const char **migrate_disks = NULL;
return -1;
}
+ if (listenAddress) {
+ if (uri_in && STRPREFIX(uri_in, "unix:")) {
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
+ _("Usage of listen-address is forbidden when "
+ "migration URI uses UNIX transport method"));
+ return -1;
+ }
+ } else {
+ listenAddress = cfg->migrationAddress;
+ }
+
if (flags & VIR_MIGRATE_TUNNELLED) {
/* this is a logical error; we never should have gotten here with
* VIR_MIGRATE_TUNNELLED set
goto cleanup;
}
+ if (listenAddress) {
+ if (uri && STRPREFIX(uri, "unix:")) {
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
+ _("Usage of listen-address is forbidden when "
+ "migration URI uses UNIX transport method"));
+ return -1;
+ }
+ }
+
nmigrate_disks = virTypedParamsGetStringList(params, nparams,
VIR_MIGRATE_PARAM_MIGRATE_DISKS,
&migrate_disks);
if (tunnel) {
migrateFrom = g_strdup("stdio");
+ } else if (g_strcmp0(protocol, "unix") == 0) {
+ migrateFrom = g_strdup_printf("%s:%s", protocol, listenAddress);
} else {
bool encloseAddress = false;
bool hostIPv6Capable = false;
}
if (STRNEQ(uri->scheme, "tcp") &&
- STRNEQ(uri->scheme, "rdma")) {
+ STRNEQ(uri->scheme, "rdma") &&
+ STRNEQ(uri->scheme, "unix")) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
_("unsupported scheme %s in migration URI %s"),
uri->scheme, uri_in);
goto cleanup;
}
- if (uri->server == NULL) {
- virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
- " URI: %s"), uri_in);
- goto cleanup;
- }
-
- if (uri->port == 0) {
- if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
+ if (STREQ(uri->scheme, "unix")) {
+ autoPort = false;
+ listenAddress = uri->path;
+ } else {
+ if (uri->server == NULL) {
+ virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
+ " URI: %s"), uri_in);
goto cleanup;
+ }
- /* Send well-formed URI only if uri_in was well-formed */
- if (well_formed_uri) {
- uri->port = port;
- if (!(*uri_out = virURIFormat(uri)))
+ if (uri->port == 0) {
+ if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
goto cleanup;
+
+ /* Send well-formed URI only if uri_in was well-formed */
+ if (well_formed_uri) {
+ uri->port = port;
+ if (!(*uri_out = virURIFormat(uri)))
+ goto cleanup;
+ } else {
+ *uri_out = g_strdup_printf("%s:%d", uri_in, port);
+ }
} else {
- *uri_out = g_strdup_printf("%s:%d", uri_in, port);
+ port = uri->port;
+ autoPort = false;
}
- } else {
- port = uri->port;
- autoPort = false;
}
}
enum qemuMigrationDestinationType {
MIGRATION_DEST_HOST,
MIGRATION_DEST_CONNECT_HOST,
+ MIGRATION_DEST_SOCKET,
+ MIGRATION_DEST_CONNECT_SOCKET,
MIGRATION_DEST_FD,
};
int port;
} host;
+ struct {
+ const char *path;
+ } socket;
+
struct {
int qemu;
int local;
if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
- port = g_strdup_printf("%d", spec->dest.host.port);
- if (virNetSocketNewConnectTCP(spec->dest.host.name,
- port,
- AF_UNSPEC,
- &sock) == 0) {
- fd_qemu = virNetSocketDupFD(sock, true);
- virObjectUnref(sock);
+
+ switch (spec->destType) {
+ case MIGRATION_DEST_CONNECT_HOST:
+ port = g_strdup_printf("%d", spec->dest.host.port);
+ if (virNetSocketNewConnectTCP(spec->dest.host.name,
+ port,
+ AF_UNSPEC,
+ &sock) == 0) {
+ fd_qemu = virNetSocketDupFD(sock, true);
+ virObjectUnref(sock);
+ }
+ break;
+ case MIGRATION_DEST_CONNECT_SOCKET:
+ if (virNetSocketNewConnectUNIX(spec->dest.socket.path,
+ false, NULL,
+ &sock) == 0) {
+ fd_qemu = virNetSocketDupFD(sock, true);
+ virObjectUnref(sock);
+ }
+ break;
+ case MIGRATION_DEST_HOST:
+ case MIGRATION_DEST_SOCKET:
+ case MIGRATION_DEST_FD:
+ break;
}
spec->destType = MIGRATION_DEST_FD;
if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
if (mig->nbd) {
+ const char *host = "";
+
+ if (spec->destType == MIGRATION_DEST_HOST ||
+ spec->destType == MIGRATION_DEST_CONNECT_HOST) {
+ host = spec->dest.host.name;
+ }
+
/* Currently libvirt does not support setting up of the NBD
* non-shared storage migration with TLS. As we need to honour the
* VIR_MIGRATE_TLS flag, we need to reject such migration until
/* This will update migrate_flags on success */
if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
- spec->dest.host.name,
+ host,
migrate_speed,
&migrate_flags,
nmigrate_disks,
goto exit_monitor;
/* connect to the destination qemu if needed */
- if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
+ if ((spec->destType == MIGRATION_DEST_CONNECT_HOST ||
+ spec->destType == MIGRATION_DEST_CONNECT_SOCKET) &&
qemuMigrationSrcConnect(driver, vm, spec) < 0) {
goto exit_monitor;
}
spec->dest.host.port);
break;
+ case MIGRATION_DEST_SOCKET:
+ qemuSecurityDomainSetPathLabel(driver, vm, spec->dest.socket.path, false);
+ rc = qemuMonitorMigrateToSocket(priv->mon, migrate_flags,
+ spec->dest.socket.path);
+ break;
+
case MIGRATION_DEST_CONNECT_HOST:
+ case MIGRATION_DEST_CONNECT_SOCKET:
/* handled above and transformed into MIGRATION_DEST_FD */
break;
}
}
- /* RDMA and multi-fd migration requires QEMU to connect to the destination
- * itself.
- */
- if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
- spec.destType = MIGRATION_DEST_HOST;
- else
- spec.destType = MIGRATION_DEST_CONNECT_HOST;
- spec.dest.host.protocol = uribits->scheme;
- spec.dest.host.name = uribits->server;
- spec.dest.host.port = uribits->port;
+ if (STREQ(uribits->scheme, "unix")) {
+ if ((flags & VIR_MIGRATE_TLS) &&
+ !qemuMigrationParamsTLSHostnameIsSet(migParams)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("Explicit destination hostname is required "
+ "for TLS migration over UNIX socket"));
+ return -1;
+ }
+
+ if (flags & VIR_MIGRATE_PARALLEL)
+ spec.destType = MIGRATION_DEST_SOCKET;
+ else
+ spec.destType = MIGRATION_DEST_CONNECT_SOCKET;
+
+ spec.dest.socket.path = uribits->path;
+ } else {
+ /* RDMA and multi-fd migration requires QEMU to connect to the destination
+ * itself.
+ */
+ if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
+ spec.destType = MIGRATION_DEST_HOST;
+ else
+ spec.destType = MIGRATION_DEST_CONNECT_HOST;
+
+ spec.dest.host.protocol = uribits->scheme;
+ spec.dest.host.name = uribits->server;
+ spec.dest.host.port = uribits->port;
+ }
+
spec.fwdType = MIGRATION_FWD_DIRECT;
ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,