mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-02-22 11:22:23 +00:00
qemu: Fix seamless spice migration
Calling qemuDomainMigrateGraphicsRelocate notifies spice clients to connect to destination qemu so that they can seamlessly switch streams once migration is done. Unfortunately, current qemu is not able to accept any connections while incoming migration connection is open. Thus, we need to delay opening the migration connection to the point spice client is already connected to the destination qemu.
This commit is contained in:
parent
8f0b03910c
commit
d9d518b1c8
@ -1381,6 +1381,7 @@ cleanup:
|
||||
|
||||
enum qemuMigrationDestinationType {
|
||||
MIGRATION_DEST_HOST,
|
||||
MIGRATION_DEST_CONNECT_HOST,
|
||||
MIGRATION_DEST_UNIX,
|
||||
MIGRATION_DEST_FD,
|
||||
};
|
||||
@ -1518,6 +1519,44 @@ cleanup:
|
||||
return rv;
|
||||
}
|
||||
|
||||
static int
|
||||
qemuMigrationConnect(struct qemud_driver *driver,
|
||||
virDomainObjPtr vm,
|
||||
qemuMigrationSpecPtr spec)
|
||||
{
|
||||
virNetSocketPtr sock;
|
||||
const char *host;
|
||||
char *port = NULL;
|
||||
int ret = -1;
|
||||
|
||||
host = spec->dest.host.name;
|
||||
if (virAsprintf(&port, "%d", spec->dest.host.port) < 0) {
|
||||
virReportOOMError();
|
||||
return -1;
|
||||
}
|
||||
|
||||
spec->destType = MIGRATION_DEST_FD;
|
||||
spec->dest.fd.qemu = -1;
|
||||
|
||||
if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
|
||||
goto cleanup;
|
||||
if (virNetSocketNewConnectTCP(host, port, &sock) == 0) {
|
||||
spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
|
||||
virNetSocketFree(sock);
|
||||
}
|
||||
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
|
||||
spec->dest.fd.qemu == -1)
|
||||
goto cleanup;
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
VIR_FREE(port);
|
||||
if (ret < 0)
|
||||
VIR_FORCE_CLOSE(spec->dest.fd.qemu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
qemuMigrationRun(struct qemud_driver *driver,
|
||||
virDomainObjPtr vm,
|
||||
@ -1583,6 +1622,11 @@ qemuMigrationRun(struct qemud_driver *driver,
|
||||
if (flags & VIR_MIGRATE_NON_SHARED_INC)
|
||||
migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
|
||||
|
||||
/* connect to the destination qemu if needed */
|
||||
if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
|
||||
qemuMigrationConnect(driver, vm, spec) < 0)
|
||||
goto cleanup;
|
||||
|
||||
switch (spec->destType) {
|
||||
case MIGRATION_DEST_HOST:
|
||||
ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
|
||||
@ -1590,6 +1634,10 @@ qemuMigrationRun(struct qemud_driver *driver,
|
||||
spec->dest.host.port);
|
||||
break;
|
||||
|
||||
case MIGRATION_DEST_CONNECT_HOST:
|
||||
/* handled above and transformed into MIGRATION_DEST_FD */
|
||||
break;
|
||||
|
||||
case MIGRATION_DEST_UNIX:
|
||||
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
|
||||
ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags,
|
||||
@ -1712,7 +1760,6 @@ static int doNativeMigrate(struct qemud_driver *driver,
|
||||
xmlURIPtr uribits = NULL;
|
||||
int ret = -1;
|
||||
qemuMigrationSpec spec;
|
||||
char *tmp = NULL;
|
||||
|
||||
VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
|
||||
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu",
|
||||
@ -1720,6 +1767,7 @@ static int doNativeMigrate(struct qemud_driver *driver,
|
||||
cookieout, cookieoutlen, flags, resource);
|
||||
|
||||
if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
|
||||
char *tmp;
|
||||
/* HACK: source host generates bogus URIs, so fix them up */
|
||||
if (virAsprintf(&tmp, "tcp://%s", uri + strlen("tcp:")) < 0) {
|
||||
virReportOOMError();
|
||||
@ -1736,41 +1784,20 @@ static int doNativeMigrate(struct qemud_driver *driver,
|
||||
return -1;
|
||||
}
|
||||
|
||||
spec.fwdType = MIGRATION_FWD_DIRECT;
|
||||
|
||||
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD)) {
|
||||
virNetSocketPtr sock;
|
||||
|
||||
spec.destType = MIGRATION_DEST_FD;
|
||||
spec.dest.fd.qemu = -1;
|
||||
|
||||
if (virAsprintf(&tmp, "%d", uribits->port) < 0) {
|
||||
virReportOOMError();
|
||||
goto cleanup;
|
||||
}
|
||||
if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
|
||||
goto cleanup;
|
||||
if (virNetSocketNewConnectTCP(uribits->server, tmp, &sock) == 0) {
|
||||
spec.dest.fd.qemu = virNetSocketDupFD(sock, true);
|
||||
virNetSocketFree(sock);
|
||||
}
|
||||
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
|
||||
spec.dest.fd.qemu == -1)
|
||||
goto cleanup;
|
||||
} else {
|
||||
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD))
|
||||
spec.destType = MIGRATION_DEST_CONNECT_HOST;
|
||||
else
|
||||
spec.destType = MIGRATION_DEST_HOST;
|
||||
spec.dest.host.name = uribits->server;
|
||||
spec.dest.host.port = uribits->port;
|
||||
}
|
||||
spec.dest.host.name = uribits->server;
|
||||
spec.dest.host.port = uribits->port;
|
||||
spec.fwdType = MIGRATION_FWD_DIRECT;
|
||||
|
||||
ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
|
||||
cookieoutlen, flags, resource, &spec, dconn);
|
||||
|
||||
cleanup:
|
||||
if (spec.destType == MIGRATION_DEST_FD)
|
||||
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
|
||||
|
||||
VIR_FREE(tmp);
|
||||
xmlFreeURI(uribits);
|
||||
|
||||
return ret;
|
||||
|
Loading…
x
Reference in New Issue
Block a user