Tunnelled migration.

Implementation of tunnelled migration, using a Unix Domain Socket
on the qemu backend.  Note that this requires very new versions of
qemu (0.10.7 at least) in order to get the appropriate bugfixes.

Signed-off-by: Chris Lalancette <clalance@redhat.com>
This commit is contained in:
Chris Lalancette 2009-09-30 12:51:54 +02:00
parent 3bd4c7ba16
commit 6e16575a37
26 changed files with 1035 additions and 64 deletions

View File

@ -1,4 +1,5 @@
^src/libvirt\.c$
^src/qemu/qemu_driver\.c$
^src/util/util\.c$
^src/xen/xend_internal\.c$
^daemon/libvirtd.c$

View File

@ -55,6 +55,7 @@
#include "datatypes.h"
#include "memory.h"
#include "util.h"
#include "stream.h"
#define VIR_FROM_THIS VIR_FROM_REMOTE
#define REMOTE_DEBUG(fmt, ...) DEBUG(fmt, __VA_ARGS__)
@ -1492,6 +1493,49 @@ remoteDispatchDomainMigrateFinish2 (struct qemud_server *server ATTRIBUTE_UNUSED
return 0;
}
static int
remoteDispatchDomainMigratePrepareTunnel(struct qemud_server *server ATTRIBUTE_UNUSED,
struct qemud_client *client,
virConnectPtr conn,
remote_message_header *hdr,
remote_error *rerr,
remote_domain_migrate_prepare_tunnel_args *args,
void *ret ATTRIBUTE_UNUSED)
{
int r;
char *uri_in;
char *dname;
struct qemud_client_stream *stream;
CHECK_CONN (client);
uri_in = args->uri_in == NULL ? NULL : *args->uri_in;
dname = args->dname == NULL ? NULL : *args->dname;
stream = remoteCreateClientStream(conn, hdr);
if (!stream) {
remoteDispatchOOMError(rerr);
return -1;
}
r = virDomainMigratePrepareTunnel(conn, stream->st, uri_in,
args->flags, dname, args->resource,
args->dom_xml);
if (r == -1) {
remoteFreeClientStream(client, stream);
remoteDispatchConnError(rerr, conn);
return -1;
}
if (remoteAddClientStream(client, stream, 0) < 0) {
remoteDispatchConnError(rerr, conn);
virStreamAbort(stream->st);
remoteFreeClientStream(client, stream);
return -1;
}
return 0;
}
static int
remoteDispatchListDefinedDomains (struct qemud_server *server ATTRIBUTE_UNUSED,
struct qemud_client *client ATTRIBUTE_UNUSED,

View File

@ -125,3 +125,4 @@
remote_secret_get_value_args val_remote_secret_get_value_args;
remote_secret_undefine_args val_remote_secret_undefine_args;
remote_secret_lookup_by_usage_args val_remote_secret_lookup_by_usage_args;
remote_domain_migrate_prepare_tunnel_args val_remote_domain_migrate_prepare_tunnel_args;

View File

@ -298,6 +298,14 @@ static int remoteDispatchDomainMigratePrepare2(
remote_error *err,
remote_domain_migrate_prepare2_args *args,
remote_domain_migrate_prepare2_ret *ret);
static int remoteDispatchDomainMigratePrepareTunnel(
struct qemud_server *server,
struct qemud_client *client,
virConnectPtr conn,
remote_message_header *hdr,
remote_error *err,
remote_domain_migrate_prepare_tunnel_args *args,
void *ret);
static int remoteDispatchDomainPinVcpu(
struct qemud_server *server,
struct qemud_client *client,

View File

@ -742,3 +742,8 @@
.args_filter = (xdrproc_t) xdr_remote_secret_lookup_by_usage_args,
.ret_filter = (xdrproc_t) xdr_remote_secret_lookup_by_usage_ret,
},
{ /* DomainMigratePrepareTunnel => 148 */
.fn = (dispatch_fn) remoteDispatchDomainMigratePrepareTunnel,
.args_filter = (xdrproc_t) xdr_remote_domain_migrate_prepare_tunnel_args,
.ret_filter = (xdrproc_t) xdr_void,
},

View File

@ -38,6 +38,7 @@ ignored_functions = {
"virDomainMigratePerform": "private function for migration",
"virDomainMigratePrepare": "private function for migration",
"virDomainMigratePrepare2": "private function for migration",
"virDomainMigratePrepareTunnel": "private function for tunnelled migration",
"virDrvSupportsFeature": "private function for remote access",
"DllMain": "specific function for Win32",
}

View File

@ -337,6 +337,7 @@ typedef virDomainInterfaceStatsStruct *virDomainInterfaceStatsPtr;
/* Domain migration flags. */
typedef enum {
VIR_MIGRATE_LIVE = 1, /* live migration */
VIR_MIGRATE_TUNNELLED = 2, /* tunnelled migration */
} virDomainMigrateFlags;
/* Domain migration. */

View File

@ -347,6 +347,16 @@ typedef int
(*virDrvNodeDeviceReset)
(virNodeDevicePtr dev);
typedef int
(*virDrvDomainMigratePrepareTunnel)
(virConnectPtr conn,
virStreamPtr st,
const char *uri_in,
unsigned long flags,
const char *dname,
unsigned long resource,
const char *dom_xml);
/**
* _virDriver:
*
@ -427,6 +437,7 @@ struct _virDriver {
virDrvNodeDeviceDettach nodeDeviceDettach;
virDrvNodeDeviceReAttach nodeDeviceReAttach;
virDrvNodeDeviceReset nodeDeviceReset;
virDrvDomainMigratePrepareTunnel domainMigratePrepareTunnel;
};
typedef int

View File

@ -3274,6 +3274,7 @@ static virDriver esxDriver = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};

View File

@ -3044,6 +3044,70 @@ virDomainMigrateVersion2 (virDomainPtr domain,
return ddomain;
}
/*
* Tunnelled migration has the following flow:
*
* virDomainMigrate(src, uri)
* - virDomainMigratePerform(src, uri)
* - dst = virConnectOpen(uri)
* - virDomainMigratePrepareTunnel(dst)
* - while (1)
* - virStreamSend(dst, data)
* - virDomainMigrateFinish(dst)
* - virConnectClose(dst)
*/
static virDomainPtr
virDomainMigrateTunnelled(virDomainPtr domain,
unsigned long flags,
const char *dname,
const char *uri,
unsigned long bandwidth)
{
virConnectPtr dconn;
virDomainPtr ddomain = NULL;
if (uri == NULL) {
/* if you are doing a secure migration, you *must* also pass a uri */
virLibConnError(domain->conn, VIR_ERR_INVALID_ARG,
_("requested TUNNELLED migration, but no URI passed"));
return NULL;
}
if (domain->conn->flags & VIR_CONNECT_RO) {
virLibDomainError(domain, VIR_ERR_OPERATION_DENIED, __FUNCTION__);
return NULL;
}
/* FIXME: do we even need this check? In theory, V1 of the protocol
* should be able to do tunnelled migration as well
*/
if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_V2)) {
virLibConnError(domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
return NULL;
}
/* Perform the migration. The driver isn't supposed to return
* until the migration is complete.
*/
if (domain->conn->driver->domainMigratePerform
(domain, NULL, 0, uri, flags, dname, bandwidth) == -1)
return NULL;
dconn = virConnectOpen(uri);
if (dconn == NULL)
/* FIXME: this is pretty crappy; as far as we know, the migration has
* now succeeded, but we can't connect back to the other side
*/
return NULL;
ddomain = virDomainLookupByName(dconn, dname ? dname : domain->name);
virConnectClose(dconn);
return ddomain;
}
/**
* virDomainMigrate:
* @domain: a domain object
@ -3058,6 +3122,8 @@ virDomainMigrateVersion2 (virDomainPtr domain,
*
* Flags may be one of more of the following:
* VIR_MIGRATE_LIVE Attempt a live migration.
* VIR_MIGRATE_TUNNELLED Attempt to do a migration tunnelled through the
* libvirt RPC mechanism
*
* If a hypervisor supports renaming domains during migration,
* then you may set the dname parameter to the new name (otherwise
@ -3116,32 +3182,48 @@ virDomainMigrate (virDomainPtr domain,
goto error;
}
if (flags & VIR_MIGRATE_TUNNELLED) {
/* tunnelled migration is more or less a completely different migration
* protocol. dconn has to be NULL, uri has to be set, and the flow
* of logic is completely different. Hence, here we split off from
* the main migration flow and use a separate function.
*/
if (dconn != NULL) {
virLibConnError(domain->conn, VIR_ERR_INVALID_ARG,
_("requested TUNNELLED migration, but non-NULL dconn"));
goto error;
}
ddomain = virDomainMigrateTunnelled(domain, flags, dname, uri, bandwidth);
}
else {
/* Now checkout the destination */
if (!VIR_IS_CONNECT (dconn)) {
virLibConnError (domain->conn, VIR_ERR_INVALID_CONN, __FUNCTION__);
if (!VIR_IS_CONNECT(dconn)) {
virLibConnError(domain->conn, VIR_ERR_INVALID_CONN, __FUNCTION__);
goto error;
}
if (dconn->flags & VIR_CONNECT_RO) {
/* NB, deliberately report error against source object, not dest */
virLibDomainError (domain, VIR_ERR_OPERATION_DENIED, __FUNCTION__);
virLibDomainError(domain, VIR_ERR_OPERATION_DENIED, __FUNCTION__);
goto error;
}
/* Check that migration is supported by both drivers. */
if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_V1) &&
VIR_DRV_SUPPORTS_FEATURE (dconn->driver, dconn,
VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_V1))
ddomain = virDomainMigrateVersion1 (domain, dconn, flags, dname, uri, bandwidth);
else if (VIR_DRV_SUPPORTS_FEATURE (domain->conn->driver, domain->conn,
ddomain = virDomainMigrateVersion1(domain, dconn, flags, dname, uri, bandwidth);
else if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
VIR_DRV_FEATURE_MIGRATION_V2) &&
VIR_DRV_SUPPORTS_FEATURE (dconn->driver, dconn,
VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_V2))
ddomain = virDomainMigrateVersion2 (domain, dconn, flags, dname, uri, bandwidth);
ddomain = virDomainMigrateVersion2(domain, dconn, flags, dname, uri, bandwidth);
else {
virLibConnError (domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
virLibConnError(domain->conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
goto error;
}
}
if (ddomain == NULL)
goto error;
@ -3398,6 +3480,59 @@ error:
}
/*
* Not for public use. This function is part of the internal
* implementation of migration in the remote case.
*/
int
virDomainMigratePrepareTunnel(virConnectPtr conn,
virStreamPtr st,
const char *uri_in,
unsigned long flags,
const char *dname,
unsigned long bandwidth,
const char *dom_xml)
{
VIR_DEBUG("conn=%p, stream=%p, uri_in=%s, flags=%lu, dname=%s, "
"bandwidth=%lu, dom_xml=%s", conn, st, uri_in, flags,
NULLSTR(dname), bandwidth, dom_xml);
virResetLastError();
if (!VIR_IS_CONNECT(conn)) {
virLibConnError(NULL, VIR_ERR_INVALID_CONN, __FUNCTION__);
return -1;
}
if (conn->flags & VIR_CONNECT_RO) {
virLibConnError(conn, VIR_ERR_OPERATION_DENIED, __FUNCTION__);
goto error;
}
if (conn != st->conn) {
virLibConnError(conn, VIR_ERR_INVALID_ARG, __FUNCTION__);
goto error;
}
if (conn->driver->domainMigratePrepareTunnel) {
int rv = conn->driver->domainMigratePrepareTunnel(conn, st, uri_in,
flags, dname,
bandwidth, dom_xml);
if (rv < 0)
goto error;
return rv;
}
virLibConnError(conn, VIR_ERR_NO_SUPPORT, __FUNCTION__);
error:
/* Copy to connection error object for back compatability */
virSetConnError(conn);
return -1;
}
/**
* virNodeGetInfo:
* @conn: pointer to the hypervisor connection

View File

@ -70,6 +70,12 @@ virDomainPtr virDomainMigrateFinish2 (virConnectPtr dconn,
const char *uri,
unsigned long flags,
int retcode);
int virDomainMigratePrepareTunnel(virConnectPtr conn,
virStreamPtr st,
const char *uri_in,
unsigned long flags,
const char *dname,
unsigned long resource,
const char *dom_xml);
#endif

View File

@ -239,6 +239,7 @@ virDomainMigratePerform;
virDomainMigrateFinish;
virDomainMigratePrepare2;
virDomainMigrateFinish2;
virDomainMigratePrepareTunnel;
virRegisterDriver;
virRegisterInterfaceDriver;
virRegisterNetworkDriver;

View File

@ -2147,6 +2147,7 @@ static virDriver lxcDriver = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};
static virStateDriver lxcStateDriver = {

View File

@ -787,6 +787,7 @@ static virDriver oneDriver = {
NULL, /* nodeDeviceDettach; */
NULL, /* nodeDeviceReAttach; */
NULL, /* nodeDeviceReset; */
NULL, /* domainMigratePrepareTunnel */
};
static virStateDriver oneStateDriver = {

View File

@ -1432,6 +1432,7 @@ static virDriver openvzDriver = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};
int openvzRegister(void) {

View File

@ -1377,6 +1377,7 @@ virDriver phypDriver = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};
int

View File

@ -71,6 +71,7 @@
#include "hostusb.h"
#include "security/security_driver.h"
#include "cgroup.h"
#include "libvirt_internal.h"
#define VIR_FROM_THIS VIR_FROM_QEMU
@ -5796,6 +5797,432 @@ static void qemuDomainEventQueue(struct qemud_driver *driver,
/* Migration support. */
/* Tunnelled migration stream support */
struct qemuStreamMigFile {
int fd;
int watch;
unsigned int cbRemoved;
unsigned int dispatching;
virStreamEventCallback cb;
void *opaque;
virFreeCallback ff;
};
static int qemuStreamMigRemoveCallback(virStreamPtr stream)
{
struct qemud_driver *driver = stream->conn->privateData;
struct qemuStreamMigFile *qemust = stream->privateData;
int ret = -1;
if (!qemust) {
qemudReportError(stream->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream is not open"));
return -1;
}
qemuDriverLock(driver);
if (qemust->watch == 0) {
qemudReportError(stream->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream does not have a callback registered"));
goto cleanup;
}
virEventRemoveHandle(qemust->watch);
if (qemust->dispatching)
qemust->cbRemoved = 1;
else if (qemust->ff)
(qemust->ff)(qemust->opaque);
qemust->watch = 0;
qemust->ff = NULL;
qemust->cb = NULL;
qemust->opaque = NULL;
ret = 0;
cleanup:
qemuDriverUnlock(driver);
return ret;
}
static int qemuStreamMigUpdateCallback(virStreamPtr stream, int events)
{
struct qemud_driver *driver = stream->conn->privateData;
struct qemuStreamMigFile *qemust = stream->privateData;
int ret = -1;
if (!qemust) {
qemudReportError(stream->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream is not open"));
return -1;
}
qemuDriverLock(driver);
if (qemust->watch == 0) {
qemudReportError(stream->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream does not have a callback registered"));
goto cleanup;
}
virEventUpdateHandle(qemust->watch, events);
ret = 0;
cleanup:
qemuDriverUnlock(driver);
return ret;
}
static void qemuStreamMigEvent(int watch ATTRIBUTE_UNUSED,
int fd ATTRIBUTE_UNUSED,
int events,
void *opaque)
{
virStreamPtr stream = opaque;
struct qemud_driver *driver = stream->conn->privateData;
struct qemuStreamMigFile *qemust = stream->privateData;
virStreamEventCallback cb;
void *cbopaque;
virFreeCallback ff;
qemuDriverLock(driver);
if (!qemust || !qemust->cb) {
qemuDriverUnlock(driver);
return;
}
cb = qemust->cb;
cbopaque = qemust->opaque;
ff = qemust->ff;
qemust->dispatching = 1;
qemuDriverUnlock(driver);
cb(stream, events, cbopaque);
qemuDriverLock(driver);
qemust->dispatching = 0;
if (qemust->cbRemoved && ff)
(ff)(cbopaque);
qemuDriverUnlock(driver);
}
static int
qemuStreamMigAddCallback(virStreamPtr st,
int events,
virStreamEventCallback cb,
void *opaque,
virFreeCallback ff)
{
struct qemud_driver *driver = st->conn->privateData;
struct qemuStreamMigFile *qemust = st->privateData;
int ret = -1;
if (!qemust) {
qemudReportError(st->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream is not open"));
return -1;
}
qemuDriverLock(driver);
if (qemust->watch != 0) {
qemudReportError(st->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream already has a callback registered"));
goto cleanup;
}
if ((qemust->watch = virEventAddHandle(qemust->fd,
events,
qemuStreamMigEvent,
st,
NULL)) < 0) {
qemudReportError(st->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("cannot register file watch on stream"));
goto cleanup;
}
qemust->cbRemoved = 0;
qemust->cb = cb;
qemust->opaque = opaque;
qemust->ff = ff;
virStreamRef(st);
ret = 0;
cleanup:
qemuDriverUnlock(driver);
return ret;
}
static void qemuStreamMigFree(struct qemuStreamMigFile *qemust)
{
if (qemust->fd != -1)
close(qemust->fd);
VIR_FREE(qemust);
}
static struct qemuStreamMigFile *qemuStreamMigOpen(virStreamPtr st,
const char *unixfile)
{
struct qemuStreamMigFile *qemust = NULL;
struct sockaddr_un sa_qemu;
int i = 0;
int timeout = 3;
int ret;
if (VIR_ALLOC(qemust) < 0)
return NULL;
qemust->fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (qemust->fd < 0)
goto cleanup;
memset(&sa_qemu, 0, sizeof(sa_qemu));
sa_qemu.sun_family = AF_UNIX;
if (virStrcpy(sa_qemu.sun_path, unixfile, sizeof(sa_qemu.sun_path)) == NULL)
goto cleanup;
do {
ret = connect(qemust->fd, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu));
if (ret == 0)
break;
if (errno == ENOENT || errno == ECONNREFUSED) {
/* ENOENT : Socket may not have shown up yet
* ECONNREFUSED : Leftover socket hasn't been removed yet */
continue;
}
goto cleanup;
} while ((++i <= timeout*5) && (usleep(.2 * 1000000) <= 0));
if ((st->flags & VIR_STREAM_NONBLOCK) && virSetNonBlock(qemust->fd) < 0)
goto cleanup;
return qemust;
cleanup:
qemuStreamMigFree(qemust);
return NULL;
}
static int
qemuStreamMigClose(virStreamPtr st)
{
struct qemud_driver *driver = st->conn->privateData;
struct qemuStreamMigFile *qemust = st->privateData;
if (!qemust)
return 0;
qemuDriverLock(driver);
qemuStreamMigFree(qemust);
st->privateData = NULL;
qemuDriverUnlock(driver);
return 0;
}
static int qemuStreamMigWrite(virStreamPtr st, const char *bytes, size_t nbytes)
{
struct qemud_driver *driver = st->conn->privateData;
struct qemuStreamMigFile *qemust = st->privateData;
int ret;
if (!qemust) {
qemudReportError(st->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("stream is not open"));
return -1;
}
qemuDriverLock(driver);
retry:
ret = write(qemust->fd, bytes, nbytes);
if (ret < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
ret = -2;
} else if (errno == EINTR) {
goto retry;
} else {
ret = -1;
virReportSystemError(st->conn, errno, "%s",
_("cannot write to stream"));
}
}
qemuDriverUnlock(driver);
return ret;
}
static virStreamDriver qemuStreamMigDrv = {
.streamSend = qemuStreamMigWrite,
.streamFinish = qemuStreamMigClose,
.streamAbort = qemuStreamMigClose,
.streamAddCallback = qemuStreamMigAddCallback,
.streamUpdateCallback = qemuStreamMigUpdateCallback,
.streamRemoveCallback = qemuStreamMigRemoveCallback
};
/* Prepare is the first step, and it runs on the destination host.
*
* This version starts an empty VM listening on a localhost TCP port, and
* sets up the corresponding virStream to handle the incoming data.
*/
static int
qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
virStreamPtr st,
const char *uri_in,
unsigned long flags,
const char *dname,
unsigned long resource ATTRIBUTE_UNUSED,
const char *dom_xml)
{
struct qemud_driver *driver = dconn->privateData;
virDomainDefPtr def = NULL;
virDomainObjPtr vm = NULL;
char *migrateFrom;
virDomainEventPtr event = NULL;
int ret = -1;
int internalret;
char *unixfile = NULL;
unsigned int qemuCmdFlags;
struct qemuStreamMigFile *qemust = NULL;
qemuDriverLock(driver);
if (!dom_xml) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("no domain XML passed"));
goto cleanup;
}
if (!uri_in) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("no URI passed"));
goto cleanup;
}
if (!(flags & VIR_MIGRATE_TUNNELLED)) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("PrepareTunnel called but no TUNNELLED flag set"));
goto cleanup;
}
if (st == NULL) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("tunnelled migration requested but NULL stream passed"));
goto cleanup;
}
/* Parse the domain XML. */
if (!(def = virDomainDefParseString(dconn, driver->caps, dom_xml,
VIR_DOMAIN_XML_INACTIVE))) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
"%s", _("failed to parse XML"));
goto cleanup;
}
/* Target domain name, maybe renamed. */
dname = dname ? dname : def->name;
/* Ensure the name and UUID don't already exist in an active VM */
vm = virDomainFindByUUID(&driver->domains, def->uuid);
if (!vm) vm = virDomainFindByName(&driver->domains, dname);
if (vm) {
if (virDomainIsActive(vm)) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
_("domain with the same name or UUID already exists as '%s'"),
vm->def->name);
goto cleanup;
}
virDomainObjUnlock(vm);
}
if (!(vm = virDomainAssignDef(dconn,
&driver->domains,
def))) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
"%s", _("failed to assign new VM"));
goto cleanup;
}
def = NULL;
/* Domain starts inactive, even if the domain XML had an id field. */
vm->def->id = -1;
if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.dest.%s",
driver->stateDir, vm->def->name) < 0) {
virReportOOMError (dconn);
goto cleanup;
}
unlink(unixfile);
/* check that this qemu version supports the interactive exec */
if (qemudExtractVersionInfo(vm->def->emulator, NULL, &qemuCmdFlags) < 0) {
qemudReportError(dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
_("Cannot determine QEMU argv syntax %s"),
vm->def->emulator);
goto cleanup;
}
if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX)
internalret = virAsprintf(&migrateFrom, "unix:%s", unixfile);
else if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC)
internalret = virAsprintf(&migrateFrom, "exec:nc -U -l %s", unixfile);
else {
qemudReportError(dconn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
"%s", _("Destination qemu is too old to support tunnelled migration"));
goto cleanup;
}
if (internalret < 0) {
virReportOOMError(dconn);
goto cleanup;
}
/* Start the QEMU daemon, with the same command-line arguments plus
* -incoming unix:/path/to/file or exec:nc -U /path/to/file
*/
internalret = qemudStartVMDaemon(dconn, driver, vm, migrateFrom, -1);
VIR_FREE(migrateFrom);
if (internalret < 0) {
/* Note that we don't set an error here because qemudStartVMDaemon
* should have already done that.
*/
if (!vm->persistent) {
virDomainRemoveInactive(&driver->domains, vm);
vm = NULL;
}
goto cleanup;
}
qemust = qemuStreamMigOpen(st, unixfile);
if (qemust == NULL) {
qemudShutdownVMDaemon(NULL, driver, vm);
virReportSystemError(dconn, errno,
_("cannot open unix socket '%s' for tunnelled migration"),
unixfile);
goto cleanup;
}
st->driver = &qemuStreamMigDrv;
st->privateData = qemust;
event = virDomainEventNewFromObj(vm,
VIR_DOMAIN_EVENT_STARTED,
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
ret = 0;
cleanup:
virDomainDefFree(def);
unlink(unixfile);
VIR_FREE(unixfile);
if (vm)
virDomainObjUnlock(vm);
if (event)
qemuDomainEventQueue(driver, event);
qemuDriverUnlock(driver);
return ret;
}
/* Prepare is the first step, and it runs on the destination host.
*
* This starts an empty VM listening on a TCP port.
@ -5806,7 +6233,7 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn,
int *cookielen ATTRIBUTE_UNUSED,
const char *uri_in,
char **uri_out,
unsigned long flags ATTRIBUTE_UNUSED,
unsigned long flags,
const char *dname,
unsigned long resource ATTRIBUTE_UNUSED,
const char *dom_xml)
@ -5826,6 +6253,15 @@ qemudDomainMigratePrepare2 (virConnectPtr dconn,
*uri_out = NULL;
qemuDriverLock(driver);
if (flags & VIR_MIGRATE_TUNNELLED) {
/* this is a logical error; we never should have gotten here with
* VIR_MIGRATE_TUNNELLED set
*/
qemudReportError(dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("Tunnelled migration requested but invalid RPC method called"));
goto cleanup;
}
if (!dom_xml) {
qemudReportError (dconn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
"%s", _("no domain XML passed"));
@ -5967,6 +6403,209 @@ cleanup:
qemuDomainEventQueue(driver, event);
qemuDriverUnlock(driver);
return ret;
}
static int doTunnelMigrate(virDomainPtr dom,
virDomainObjPtr vm,
const char *uri,
unsigned long flags,
const char *dname,
unsigned long resource)
{
struct qemud_driver *driver = dom->conn->privateData;
int client_sock, qemu_sock;
struct sockaddr_un sa_qemu, sa_client;
socklen_t addrlen;
virConnectPtr dconn;
virDomainPtr ddomain;
int retval = -1;
ssize_t bytes;
char buffer[65536];
virStreamPtr st;
char *dom_xml = NULL;
char *unixfile = NULL;
int internalret;
unsigned int qemuCmdFlags;
int status;
unsigned long long transferred, remaining, total;
/* the order of operations is important here; we make sure the
* destination side is completely setup before we touch the source
*/
dconn = virConnectOpen(uri);
if (dconn == NULL) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
_("Failed to connect to remote libvirt URI %s"), uri);
return -1;
}
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
VIR_DRV_FEATURE_MIGRATION_V2)) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED, "%s",
_("Destination libvirt does not support required migration protocol 2"));
goto close_dconn;
}
st = virStreamNew(dconn, 0);
if (st == NULL)
/* virStreamNew only fails on OOM, and it reports the error itself */
goto close_dconn;
dom_xml = virDomainDefFormat(dom->conn, vm->def, VIR_DOMAIN_XML_SECURE);
if (!dom_xml) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
"%s", _("failed to get domain xml"));
goto close_stream;
}
internalret = dconn->driver->domainMigratePrepareTunnel(dconn, st, uri,
flags, dname,
resource, dom_xml);
VIR_FREE(dom_xml);
if (internalret < 0)
/* domainMigratePrepareTunnel sets the error for us */
goto close_stream;
if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s",
driver->stateDir, vm->def->name) < 0) {
virReportOOMError(dom->conn);
goto finish_migrate;
}
qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0);
if (qemu_sock < 0) {
virReportSystemError(dom->conn, errno, "%s",
_("cannot open tunnelled migration socket"));
goto free_unix_path;
}
memset(&sa_qemu, 0, sizeof(sa_qemu));
sa_qemu.sun_family = AF_UNIX;
if (virStrcpy(sa_qemu.sun_path, unixfile,
sizeof(sa_qemu.sun_path)) == NULL) {
qemudReportError(dom->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
_("Unix socket '%s' too big for destination"),
unixfile);
goto close_qemu_sock;
}
unlink(unixfile);
if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) {
virReportSystemError(dom->conn, errno,
_("Cannot bind to unix socket '%s' for tunnelled migration"),
unixfile);
goto close_qemu_sock;
}
if (listen(qemu_sock, 1) < 0) {
virReportSystemError(dom->conn, errno,
_("Cannot listen on unix socket '%s' for tunnelled migration"),
unixfile);
goto close_qemu_sock;
}
/* check that this qemu version supports the unix migration */
if (qemudExtractVersionInfo(vm->def->emulator, NULL, &qemuCmdFlags) < 0) {
qemudReportError(dom->conn, NULL, NULL, VIR_ERR_INTERNAL_ERROR,
_("Cannot extract Qemu version from '%s'"),
vm->def->emulator);
goto close_qemu_sock;
}
if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_UNIX)
internalret = qemuMonitorMigrateToUnix(vm, 1, unixfile);
else if (qemuCmdFlags & QEMUD_CMD_FLAG_MIGRATE_QEMU_EXEC) {
const char *args[] = { "nc", "-U", unixfile, NULL };
internalret = qemuMonitorMigrateToCommand(vm, 1, args, "/dev/null");
}
else {
qemudReportError(dom->conn, NULL, NULL, VIR_ERR_OPERATION_FAILED,
"%s", _("Source qemu is too old to support tunnelled migration"));
goto close_qemu_sock;
}
if (internalret < 0) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
"%s", _("tunnelled migration monitor command failed"));
goto close_qemu_sock;
}
/* it is also possible that the migrate didn't fail initially, but
* rather failed later on. Check the output of "info migrate"
*/
if (qemuMonitorGetMigrationStatus(vm, &status,
&transferred,
&remaining,
&total) < 0) {
goto qemu_cancel_migration;
}
if (status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
"%s",_("migrate failed"));
goto qemu_cancel_migration;
}
addrlen = sizeof(sa_client);
while ((client_sock = accept(qemu_sock, (struct sockaddr *)&sa_client, &addrlen)) < 0) {
if (errno == EAGAIN || errno == EINTR)
continue;
virReportSystemError(dom->conn, errno, "%s",
_("tunnelled migration failed to accept from qemu"));
goto qemu_cancel_migration;
}
for (;;) {
bytes = saferead(client_sock, buffer, sizeof(buffer));
if (bytes < 0) {
virReportSystemError(dconn, errno, "%s",
_("tunnelled migration failed to read from qemu"));
goto close_client_sock;
}
else if (bytes == 0)
/* EOF; get out of here */
break;
if (virStreamSend(st, buffer, bytes) < 0) {
qemudReportError(dom->conn, dom, NULL, VIR_ERR_OPERATION_FAILED,
_("Failed to write migration data to remote libvirtd"));
virStreamAbort(st);
goto close_client_sock;
}
}
if (virStreamFinish(st) < 0)
/* virStreamFinish set the error for us */
goto close_client_sock;
retval = 0;
close_client_sock:
close(client_sock);
qemu_cancel_migration:
if (retval != 0)
qemuMonitorMigrateCancel(vm);
close_qemu_sock:
close(qemu_sock);
free_unix_path:
unlink(unixfile);
VIR_FREE(unixfile);
finish_migrate:
dname = dname ? dname : dom->name;
ddomain = dconn->driver->domainMigrateFinish2
(dconn, dname, NULL, 0, uri, flags, retval);
if (ddomain)
virUnrefDomain(ddomain);
close_stream:
/* don't call virStreamFree(), because that resets any pending errors */
virUnrefStream(st);
close_dconn:
/* don't call virConnectClose(), because that resets any pending errors */
virUnrefConnect(dconn);
return retval;
}
/* Perform is the second step, and it runs on the source host. */
@ -6022,6 +6661,7 @@ qemudDomainMigratePerform (virDomainPtr dom,
qemuMonitorSetMigrationSpeed(vm, resource) < 0)
goto cleanup;
if (!(flags & VIR_MIGRATE_TUNNELLED)) {
/* Issue the migrate command. */
if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
/* HACK: source host generates bogus URIs, so fix them up */
@ -6059,6 +6699,12 @@ qemudDomainMigratePerform (virDomainPtr dom,
"%s", _("migrate did not successfully complete"));
goto cleanup;
}
}
else {
if (doTunnelMigrate(dom, vm, uri, flags, dname, resource) < 0)
/* doTunnelMigrate already set the error, so just get out */
goto cleanup;
}
/* Clean up the source domain. */
qemudShutdownVMDaemon (dom->conn, driver, vm);
@ -6357,6 +7003,7 @@ static virDriver qemuDriver = {
qemudNodeDeviceDettach, /* nodeDeviceDettach */
qemudNodeDeviceReAttach, /* nodeDeviceReAttach */
qemudNodeDeviceReset, /* nodeDeviceReset */
qemudDomainMigratePrepareTunnel, /* domainMigratePrepareTunnel */
};

View File

@ -6698,7 +6698,6 @@ done:
}
#if 0
static struct private_stream_data *
remoteStreamOpen(virStreamPtr st,
int output ATTRIBUTE_UNUSED,
@ -7049,9 +7048,51 @@ static virStreamDriver remoteStreamDrv = {
.streamUpdateCallback = remoteStreamEventUpdateCallback,
.streamRemoveCallback = remoteStreamEventRemoveCallback,
};
#endif
static int
remoteDomainMigratePrepareTunnel(virConnectPtr conn,
virStreamPtr st,
const char *uri_in,
unsigned long flags,
const char *dname,
unsigned long resource,
const char *dom_xml)
{
struct private_data *priv = conn->privateData;
struct private_stream_data *privst = NULL;
int rv = -1;
remote_domain_migrate_prepare_tunnel_args args;
remoteDriverLock(priv);
if (!(privst = remoteStreamOpen(st, 1, REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL, priv->counter)))
goto done;
st->driver = &remoteStreamDrv;
st->privateData = privst;
args.uri_in = uri_in == NULL ? NULL : (char **) &uri_in;
args.flags = flags;
args.dname = dname == NULL ? NULL : (char **) &dname;
args.resource = resource;
args.dom_xml = (char *) dom_xml;
if (call(conn, priv, 0, REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL,
(xdrproc_t) xdr_remote_domain_migrate_prepare_tunnel_args, (char *) &args,
(xdrproc_t) xdr_void, NULL) == -1) {
remoteStreamRelease(st);
goto done;
}
rv = 0;
done:
remoteDriverUnlock(priv);
return rv;
}
/*----------------------------------------------------------------------*/
@ -8410,6 +8451,7 @@ static virDriver remote_driver = {
remoteNodeDeviceDettach, /* nodeDeviceDettach */
remoteNodeDeviceReAttach, /* nodeDeviceReAttach */
remoteNodeDeviceReset, /* nodeDeviceReset */
remoteDomainMigratePrepareTunnel, /* domainMigratePrepareTunnel */
};
static virNetworkDriver network_driver = {

View File

@ -2697,6 +2697,23 @@ xdr_remote_secret_lookup_by_usage_ret (XDR *xdrs, remote_secret_lookup_by_usage_
return TRUE;
}
bool_t
xdr_remote_domain_migrate_prepare_tunnel_args (XDR *xdrs, remote_domain_migrate_prepare_tunnel_args *objp)
{
if (!xdr_remote_string (xdrs, &objp->uri_in))
return FALSE;
if (!xdr_uint64_t (xdrs, &objp->flags))
return FALSE;
if (!xdr_remote_string (xdrs, &objp->dname))
return FALSE;
if (!xdr_uint64_t (xdrs, &objp->resource))
return FALSE;
if (!xdr_remote_nonnull_string (xdrs, &objp->dom_xml))
return FALSE;
return TRUE;
}
bool_t
xdr_remote_procedure (XDR *xdrs, remote_procedure *objp)
{

View File

@ -1528,6 +1528,16 @@ struct remote_secret_lookup_by_usage_ret {
remote_nonnull_secret secret;
};
typedef struct remote_secret_lookup_by_usage_ret remote_secret_lookup_by_usage_ret;
struct remote_domain_migrate_prepare_tunnel_args {
remote_string uri_in;
uint64_t flags;
remote_string dname;
uint64_t resource;
remote_nonnull_string dom_xml;
};
typedef struct remote_domain_migrate_prepare_tunnel_args remote_domain_migrate_prepare_tunnel_args;
#define REMOTE_PROGRAM 0x20008086
#define REMOTE_PROTOCOL_VERSION 1
@ -1679,6 +1689,7 @@ enum remote_procedure {
REMOTE_PROC_SECRET_GET_VALUE = 145,
REMOTE_PROC_SECRET_UNDEFINE = 146,
REMOTE_PROC_SECRET_LOOKUP_BY_USAGE = 147,
REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL = 148,
};
typedef enum remote_procedure remote_procedure;
@ -1959,6 +1970,7 @@ extern bool_t xdr_remote_secret_get_value_ret (XDR *, remote_secret_get_value_r
extern bool_t xdr_remote_secret_undefine_args (XDR *, remote_secret_undefine_args*);
extern bool_t xdr_remote_secret_lookup_by_usage_args (XDR *, remote_secret_lookup_by_usage_args*);
extern bool_t xdr_remote_secret_lookup_by_usage_ret (XDR *, remote_secret_lookup_by_usage_ret*);
extern bool_t xdr_remote_domain_migrate_prepare_tunnel_args (XDR *, remote_domain_migrate_prepare_tunnel_args*);
extern bool_t xdr_remote_procedure (XDR *, remote_procedure*);
extern bool_t xdr_remote_message_type (XDR *, remote_message_type*);
extern bool_t xdr_remote_message_status (XDR *, remote_message_status*);
@ -2213,6 +2225,7 @@ extern bool_t xdr_remote_secret_get_value_ret ();
extern bool_t xdr_remote_secret_undefine_args ();
extern bool_t xdr_remote_secret_lookup_by_usage_args ();
extern bool_t xdr_remote_secret_lookup_by_usage_ret ();
extern bool_t xdr_remote_domain_migrate_prepare_tunnel_args ();
extern bool_t xdr_remote_procedure ();
extern bool_t xdr_remote_message_type ();
extern bool_t xdr_remote_message_status ();

View File

@ -1355,6 +1355,14 @@ struct remote_secret_lookup_by_usage_ret {
remote_nonnull_secret secret;
};
struct remote_domain_migrate_prepare_tunnel_args {
remote_string uri_in;
unsigned hyper flags;
remote_string dname;
unsigned hyper resource;
remote_nonnull_string dom_xml;
};
/*----- Protocol. -----*/
/* Define the program number, protocol version and procedure numbers here. */
@ -1523,7 +1531,9 @@ enum remote_procedure {
REMOTE_PROC_SECRET_SET_VALUE = 144,
REMOTE_PROC_SECRET_GET_VALUE = 145,
REMOTE_PROC_SECRET_UNDEFINE = 146,
REMOTE_PROC_SECRET_LOOKUP_BY_USAGE = 147
REMOTE_PROC_SECRET_LOOKUP_BY_USAGE = 147,
REMOTE_PROC_DOMAIN_MIGRATE_PREPARE_TUNNEL = 148
};

View File

@ -4267,6 +4267,7 @@ static virDriver testDriver = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};
static virNetworkDriver testNetworkDriver = {

View File

@ -1861,6 +1861,7 @@ static virDriver umlDriver = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};

View File

@ -6467,7 +6467,7 @@ virDriver NAME(Driver) = {
NULL, /* nodeDeviceDettach */
NULL, /* nodeDeviceReAttach */
NULL, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};
virNetworkDriver NAME(NetworkDriver) = {

View File

@ -1722,6 +1722,7 @@ static virDriver xenUnifiedDriver = {
xenUnifiedNodeDeviceDettach, /* nodeDeviceDettach */
xenUnifiedNodeDeviceReAttach, /* nodeDeviceReAttach */
xenUnifiedNodeDeviceReset, /* nodeDeviceReset */
NULL, /* domainMigratePrepareTunnel */
};
/**

View File

@ -2462,6 +2462,7 @@ static const vshCmdInfo info_migrate[] = {
static const vshCmdOptDef opts_migrate[] = {
{"live", VSH_OT_BOOL, 0, gettext_noop("live migration")},
{"tunnelled", VSH_OT_BOOL, 0, gettext_noop("tunnelled migration")},
{"domain", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("domain name, id or uuid")},
{"desturi", VSH_OT_DATA, VSH_OFLAG_REQ, gettext_noop("connection URI of the destination host")},
{"migrateuri", VSH_OT_DATA, 0, gettext_noop("migration URI, usually can be omitted")},
@ -2499,12 +2500,31 @@ cmdMigrate (vshControl *ctl, const vshCmd *cmd)
if (vshCommandOptBool (cmd, "live"))
flags |= VIR_MIGRATE_LIVE;
/* Temporarily connect to the destination host. */
dconn = virConnectOpenAuth (desturi, virConnectAuthPtrDefault, 0);
if (vshCommandOptBool (cmd, "tunnelled"))
flags |= VIR_MIGRATE_TUNNELLED;
if (!(flags & VIR_MIGRATE_TUNNELLED)) {
/* For regular live migration, temporarily connect to the destination
* host. For tunnelled migration, that will be done by the remote
* libvirtd.
*/
dconn = virConnectOpenAuth(desturi, virConnectAuthPtrDefault, 0);
if (!dconn) goto done;
}
else {
/* when doing tunnelled migration, use migrateuri if it's available,
* but if not, fall back to desturi. This allows both of these
* to work:
*
* virsh migrate guest qemu+tls://dest/system
* virsh migrate guest qemu+tls://dest/system qemu+tls://dest-alt/system
*/
if (migrateuri == NULL)
migrateuri = desturi;
}
/* Migrate. */
ddom = virDomainMigrate (dom, dconn, flags, dname, migrateuri, 0);
ddom = virDomainMigrate(dom, dconn, flags, dname, migrateuri, 0);
if (!ddom) goto done;
ret = TRUE;