qemu: Allow migration over UNIX socket

This allows:

 a) migration without access to network

 b) complete control of the migration stream

 c) easy migration between containerised libvirt daemons on the same host

Resolves: https://bugzilla.redhat.com/1638889

Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
This commit is contained in:
Martin Kletzander 2020-09-02 12:06:12 +02:00
parent ee6c936fbb
commit f51cbe92c0
8 changed files with 201 additions and 43 deletions

View File

@ -3270,6 +3270,14 @@ There are a few scenarios where specifying *migrateuri* may help:
might be specified to choose a specific port number outside the default range in
order to comply with local firewall policies.
* The *desturi* uses UNIX transport method. In this advanced case libvirt
should not guess a *migrateuri* and it should be specified using
UNIX socket path URI:
.. code-block::
unix:///path/to/socket
See `https://libvirt.org/migration.html#uris <https://libvirt.org/migration.html#uris>`_ for more details on
migration URIs.
@ -3296,8 +3304,9 @@ specific parameters separated by '&'. Currently recognized parameters are
Optional *listen-address* sets the listen address that hypervisor on the
destination side should bind to for incoming migration. Both IPv4 and IPv6
addresses are accepted as well as hostnames (the resolving is done on
destination). Some hypervisors do not support this feature and will return an
error if this parameter is used.
destination). Some hypervisors do not support specifying the listen address and
will return an error if this parameter is used. This parameter cannot be used if
*desturi* uses UNIX transport method.
Optional *disks-port* sets the port that hypervisor on destination side should
bind to for incoming disks traffic. Currently it is supported only by QEMU.

View File

@ -201,6 +201,9 @@
numbers. In the latter case the management application may wish
to choose a specific port number outside the default range in order
to comply with local firewall policies.</li>
<li>The second URI uses UNIX transport method. In this advanced case
libvirt should not guess a *migrateuri* and it should be specified using
UNIX socket path URI: <code>unix:///path/to/socket</code>.</li>
</ol>
<h2><a id="config">Configuration file handling</a></h2>
@ -628,5 +631,35 @@ virsh migrate --p2p --tunnelled web1 qemu+ssh://desthost/system qemu+ssh://10.0.
Supported by QEMU driver
</p>
<h3><a id="scenariounixsocket">Migration using only UNIX sockets</a></h3>
<p>
In niche scenarios where libvirt daemon does not have access to the
network (e.g. running in a restricted container on a host that has
accessible network), when a management application wants to have complete
control over the transfer or when migrating between two containers on the
same host all the communication can be done using UNIX sockets. This
includes connecting to non-standard socket path for the destination
daemon, using UNIX sockets for hypervisor's communication or for the NBD
data transfer. All of that can be used with both peer2peer and direct
migration options.
</p>
<p>
Example using <code>/tmp/migdir</code> as a directory representing the
same path visible from both libvirt daemons. That can be achieved by
bind-mounting the same directory to different containers running separate
daemons or forwarding connections to these sockets manually
(using <code>socat</code>, <code>netcat</code> or a custom piece of
software):
<pre>
virsh migrate web1 [--p2p] --copy-storage-all 'qemu+unix:///system?socket=/tmp/migdir/test-sock-driver' 'unix:///tmp/migdir/test-sock-qemu' --disks-uri unix:///tmp/migdir/test-sock-nbd
</pre>
<p>
Supported by QEMU driver
</p>
</body>
</html>

View File

@ -11480,7 +11480,7 @@ qemuDomainMigratePrepare3Params(virConnectPtr dconn,
const char *dom_xml = NULL;
const char *dname = NULL;
const char *uri_in = NULL;
const char *listenAddress = cfg->migrationAddress;
const char *listenAddress = NULL;
int nbdPort = 0;
int nmigrate_disks;
g_autofree const char **migrate_disks = NULL;
@ -11530,6 +11530,17 @@ qemuDomainMigratePrepare3Params(virConnectPtr dconn,
return -1;
}
if (listenAddress) {
if (uri_in && STRPREFIX(uri_in, "unix:")) {
virReportError(VIR_ERR_INVALID_ARG, "%s",
_("Usage of listen-address is forbidden when "
"migration URI uses UNIX transport method"));
return -1;
}
} else {
listenAddress = cfg->migrationAddress;
}
if (flags & VIR_MIGRATE_TUNNELLED) {
/* this is a logical error; we never should have gotten here with
* VIR_MIGRATE_TUNNELLED set
@ -11771,6 +11782,15 @@ qemuDomainMigratePerform3Params(virDomainPtr dom,
goto cleanup;
}
if (listenAddress) {
if (uri && STRPREFIX(uri, "unix:")) {
virReportError(VIR_ERR_INVALID_ARG, "%s",
_("Usage of listen-address is forbidden when "
"migration URI uses UNIX transport method"));
return -1;
}
}
nmigrate_disks = virTypedParamsGetStringList(params, nparams,
VIR_MIGRATE_PARAM_MIGRATE_DISKS,
&migrate_disks);

View File

@ -2411,6 +2411,8 @@ qemuMigrationDstPrepare(virDomainObjPtr vm,
if (tunnel) {
migrateFrom = g_strdup("stdio");
} else if (g_strcmp0(protocol, "unix") == 0) {
migrateFrom = g_strdup_printf("%s:%s", protocol, listenAddress);
} else {
bool encloseAddress = false;
bool hostIPv6Capable = false;
@ -2995,34 +2997,40 @@ qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
}
if (STRNEQ(uri->scheme, "tcp") &&
STRNEQ(uri->scheme, "rdma")) {
STRNEQ(uri->scheme, "rdma") &&
STRNEQ(uri->scheme, "unix")) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
_("unsupported scheme %s in migration URI %s"),
uri->scheme, uri_in);
goto cleanup;
}
if (uri->server == NULL) {
virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
" URI: %s"), uri_in);
goto cleanup;
}
if (uri->port == 0) {
if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
goto cleanup;
/* Send well-formed URI only if uri_in was well-formed */
if (well_formed_uri) {
uri->port = port;
if (!(*uri_out = virURIFormat(uri)))
goto cleanup;
} else {
*uri_out = g_strdup_printf("%s:%d", uri_in, port);
}
} else {
port = uri->port;
if (STREQ(uri->scheme, "unix")) {
autoPort = false;
listenAddress = uri->path;
} else {
if (uri->server == NULL) {
virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
" URI: %s"), uri_in);
goto cleanup;
}
if (uri->port == 0) {
if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
goto cleanup;
/* Send well-formed URI only if uri_in was well-formed */
if (well_formed_uri) {
uri->port = port;
if (!(*uri_out = virURIFormat(uri)))
goto cleanup;
} else {
*uri_out = g_strdup_printf("%s:%d", uri_in, port);
}
} else {
port = uri->port;
autoPort = false;
}
}
}
@ -3237,6 +3245,8 @@ qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
enum qemuMigrationDestinationType {
MIGRATION_DEST_HOST,
MIGRATION_DEST_CONNECT_HOST,
MIGRATION_DEST_SOCKET,
MIGRATION_DEST_CONNECT_SOCKET,
MIGRATION_DEST_FD,
};
@ -3256,6 +3266,10 @@ struct _qemuMigrationSpec {
int port;
} host;
struct {
const char *path;
} socket;
struct {
int qemu;
int local;
@ -3470,13 +3484,30 @@ qemuMigrationSrcConnect(virQEMUDriverPtr driver,
if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
port = g_strdup_printf("%d", spec->dest.host.port);
if (virNetSocketNewConnectTCP(spec->dest.host.name,
port,
AF_UNSPEC,
&sock) == 0) {
fd_qemu = virNetSocketDupFD(sock, true);
virObjectUnref(sock);
switch (spec->destType) {
case MIGRATION_DEST_CONNECT_HOST:
port = g_strdup_printf("%d", spec->dest.host.port);
if (virNetSocketNewConnectTCP(spec->dest.host.name,
port,
AF_UNSPEC,
&sock) == 0) {
fd_qemu = virNetSocketDupFD(sock, true);
virObjectUnref(sock);
}
break;
case MIGRATION_DEST_CONNECT_SOCKET:
if (virNetSocketNewConnectUNIX(spec->dest.socket.path,
false, NULL,
&sock) == 0) {
fd_qemu = virNetSocketDupFD(sock, true);
virObjectUnref(sock);
}
break;
case MIGRATION_DEST_HOST:
case MIGRATION_DEST_SOCKET:
case MIGRATION_DEST_FD:
break;
}
spec->destType = MIGRATION_DEST_FD;
@ -3684,6 +3715,13 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
if (mig->nbd) {
const char *host = "";
if (spec->destType == MIGRATION_DEST_HOST ||
spec->destType == MIGRATION_DEST_CONNECT_HOST) {
host = spec->dest.host.name;
}
/* Currently libvirt does not support setting up of the NBD
* non-shared storage migration with TLS. As we need to honour the
* VIR_MIGRATE_TLS flag, we need to reject such migration until
@ -3697,7 +3735,7 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
/* This will update migrate_flags on success */
if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
spec->dest.host.name,
host,
migrate_speed,
&migrate_flags,
nmigrate_disks,
@ -3745,7 +3783,8 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
goto exit_monitor;
/* connect to the destination qemu if needed */
if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
if ((spec->destType == MIGRATION_DEST_CONNECT_HOST ||
spec->destType == MIGRATION_DEST_CONNECT_SOCKET) &&
qemuMigrationSrcConnect(driver, vm, spec) < 0) {
goto exit_monitor;
}
@ -3767,7 +3806,14 @@ qemuMigrationSrcRun(virQEMUDriverPtr driver,
spec->dest.host.port);
break;
case MIGRATION_DEST_SOCKET:
qemuSecurityDomainSetPathLabel(driver, vm, spec->dest.socket.path, false);
rc = qemuMonitorMigrateToSocket(priv->mon, migrate_flags,
spec->dest.socket.path);
break;
case MIGRATION_DEST_CONNECT_HOST:
case MIGRATION_DEST_CONNECT_SOCKET:
/* handled above and transformed into MIGRATION_DEST_FD */
break;
@ -3983,16 +4029,35 @@ qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
}
}
/* RDMA and multi-fd migration requires QEMU to connect to the destination
* itself.
*/
if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
spec.destType = MIGRATION_DEST_HOST;
else
spec.destType = MIGRATION_DEST_CONNECT_HOST;
spec.dest.host.protocol = uribits->scheme;
spec.dest.host.name = uribits->server;
spec.dest.host.port = uribits->port;
if (STREQ(uribits->scheme, "unix")) {
if ((flags & VIR_MIGRATE_TLS) &&
!qemuMigrationParamsTLSHostnameIsSet(migParams)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("Explicit destination hostname is required "
"for TLS migration over UNIX socket"));
return -1;
}
if (flags & VIR_MIGRATE_PARALLEL)
spec.destType = MIGRATION_DEST_SOCKET;
else
spec.destType = MIGRATION_DEST_CONNECT_SOCKET;
spec.dest.socket.path = uribits->path;
} else {
/* RDMA and multi-fd migration requires QEMU to connect to the destination
* itself.
*/
if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
spec.destType = MIGRATION_DEST_HOST;
else
spec.destType = MIGRATION_DEST_CONNECT_HOST;
spec.dest.host.protocol = uribits->scheme;
spec.dest.host.name = uribits->server;
spec.dest.host.port = uribits->port;
}
spec.fwdType = MIGRATION_FWD_DIRECT;
ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,

View File

@ -1008,6 +1008,15 @@ qemuMigrationParamsDisableTLS(virDomainObjPtr vm,
}
bool
qemuMigrationParamsTLSHostnameIsSet(qemuMigrationParamsPtr migParams)
{
int param = QEMU_MIGRATION_PARAM_TLS_HOSTNAME;
return (migParams->params[param].set &&
STRNEQ(migParams->params[param].value.s, ""));
}
/* qemuMigrationParamsResetTLS
* @driver: pointer to qemu driver
* @vm: domain object

View File

@ -113,6 +113,9 @@ int
qemuMigrationParamsDisableTLS(virDomainObjPtr vm,
qemuMigrationParamsPtr migParams);
bool
qemuMigrationParamsTLSHostnameIsSet(qemuMigrationParamsPtr migParams);
int
qemuMigrationParamsFetch(virQEMUDriverPtr driver,
virDomainObjPtr vm,

View File

@ -2555,6 +2555,21 @@ qemuMonitorMigrateToHost(qemuMonitorPtr mon,
}
int
qemuMonitorMigrateToSocket(qemuMonitorPtr mon,
unsigned int flags,
const char *socketPath)
{
g_autofree char *uri = g_strdup_printf("unix:%s", socketPath);
VIR_DEBUG("socketPath=%s flags=0x%x", socketPath, flags);
QEMU_CHECK_MONITOR(mon);
return qemuMonitorJSONMigrate(mon, flags, uri);
}
int
qemuMonitorMigrateCancel(qemuMonitorPtr mon)
{

View File

@ -853,6 +853,10 @@ int qemuMonitorMigrateToHost(qemuMonitorPtr mon,
const char *hostname,
int port);
int qemuMonitorMigrateToSocket(qemuMonitorPtr mon,
unsigned int flags,
const char *socketPath);
int qemuMonitorMigrateCancel(qemuMonitorPtr mon);
int qemuMonitorGetDumpGuestMemoryCapability(qemuMonitorPtr mon,