2012-10-01 15:18:20 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/*
|
|
|
|
* qemu_migration.c: QEMU migration handling
|
|
|
|
*
|
2013-02-06 18:17:20 +00:00
|
|
|
* Copyright (C) 2006-2013 Red Hat, Inc.
|
2011-01-31 10:47:03 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-01-31 10:47:03 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
2013-03-22 13:52:25 +00:00
|
|
|
#include <netdb.h>
|
|
|
|
#include <sys/socket.h>
|
2011-01-31 10:47:03 +00:00
|
|
|
#include <sys/time.h>
|
2013-01-08 21:02:05 +00:00
|
|
|
#ifdef WITH_GNUTLS
|
2013-01-07 14:54:18 +00:00
|
|
|
# include <gnutls/gnutls.h>
|
|
|
|
# include <gnutls/x509.h>
|
|
|
|
#endif
|
2011-08-29 23:31:42 +00:00
|
|
|
#include <fcntl.h>
|
2012-04-23 14:17:55 +00:00
|
|
|
#include <poll.h>
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
#include "qemu_migration.h"
|
|
|
|
#include "qemu_monitor.h"
|
|
|
|
#include "qemu_domain.h"
|
|
|
|
#include "qemu_process.h"
|
|
|
|
#include "qemu_capabilities.h"
|
2013-01-31 13:48:06 +00:00
|
|
|
#include "qemu_command.h"
|
2011-03-10 00:35:13 +00:00
|
|
|
#include "qemu_cgroup.h"
|
2013-06-25 07:44:14 +00:00
|
|
|
#include "qemu_hotplug.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
#include "datatypes.h"
|
|
|
|
#include "fdstream.h"
|
2012-12-13 18:01:25 +00:00
|
|
|
#include "viruuid.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2011-05-18 16:34:21 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2011-08-12 08:54:05 +00:00
|
|
|
#include "rpc/virnetsocket.h"
|
2012-12-13 15:25:48 +00:00
|
|
|
#include "virstoragefile.h"
|
2012-02-24 18:48:55 +00:00
|
|
|
#include "viruri.h"
|
2012-12-12 17:00:34 +00:00
|
|
|
#include "virhook.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2013-06-25 13:49:21 +00:00
|
|
|
#include "virtypedparam.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2011-07-19 00:27:30 +00:00
|
|
|
VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
|
|
|
|
"none",
|
|
|
|
"perform2",
|
|
|
|
"begin3",
|
|
|
|
"perform3",
|
|
|
|
"perform3_done",
|
|
|
|
"confirm3_cancelled",
|
|
|
|
"confirm3",
|
|
|
|
"prepare",
|
|
|
|
"finish2",
|
|
|
|
"finish3",
|
|
|
|
);
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
enum qemuMigrationCookieFlags {
|
2011-05-18 15:33:17 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
|
2011-05-18 16:34:21 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
|
2011-09-15 13:13:11 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
|
2012-10-01 15:18:20 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_NETWORK,
|
2013-01-29 12:38:50 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_NBD,
|
2011-05-18 15:33:17 +00:00
|
|
|
|
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_LAST
|
|
|
|
};
|
|
|
|
|
|
|
|
VIR_ENUM_DECL(qemuMigrationCookieFlag);
|
|
|
|
VIR_ENUM_IMPL(qemuMigrationCookieFlag,
|
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_LAST,
|
2013-01-29 12:38:50 +00:00
|
|
|
"graphics",
|
|
|
|
"lockstate",
|
|
|
|
"persistent",
|
|
|
|
"network",
|
|
|
|
"nbd");
|
2011-05-18 15:33:17 +00:00
|
|
|
|
|
|
|
enum qemuMigrationCookieFeatures {
|
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
|
2011-05-18 16:34:21 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
|
2011-09-15 13:13:11 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
|
2012-10-01 15:18:20 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_NETWORK = (1 << QEMU_MIGRATION_COOKIE_FLAG_NETWORK),
|
2013-01-29 12:38:50 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_NBD = (1 << QEMU_MIGRATION_COOKIE_FLAG_NBD),
|
2011-02-17 13:17:59 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
|
|
|
|
typedef qemuMigrationCookieGraphics *qemuMigrationCookieGraphicsPtr;
|
|
|
|
struct _qemuMigrationCookieGraphics {
|
|
|
|
int type;
|
|
|
|
int port;
|
|
|
|
int tlsPort;
|
|
|
|
char *listen;
|
|
|
|
char *tlsSubject;
|
|
|
|
};
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
typedef struct _qemuMigrationCookieNetData qemuMigrationCookieNetData;
|
|
|
|
typedef qemuMigrationCookieNetData *qemuMigrationCookieNetDataPtr;
|
|
|
|
struct _qemuMigrationCookieNetData {
|
|
|
|
int vporttype; /* enum virNetDevVPortProfile */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Array of pointers to saved data. Each VIF will have it's own
|
|
|
|
* data to transfer.
|
|
|
|
*/
|
|
|
|
char *portdata;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _qemuMigrationCookieNetwork qemuMigrationCookieNetwork;
|
|
|
|
typedef qemuMigrationCookieNetwork *qemuMigrationCookieNetworkPtr;
|
|
|
|
struct _qemuMigrationCookieNetwork {
|
|
|
|
/* How many virtual NICs are we saving data for? */
|
|
|
|
int nnets;
|
|
|
|
|
|
|
|
qemuMigrationCookieNetDataPtr net;
|
|
|
|
};
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
typedef struct _qemuMigrationCookieNBD qemuMigrationCookieNBD;
|
|
|
|
typedef qemuMigrationCookieNBD *qemuMigrationCookieNBDPtr;
|
|
|
|
struct _qemuMigrationCookieNBD {
|
|
|
|
int port; /* on which port does NBD server listen for incoming data */
|
|
|
|
};
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
typedef struct _qemuMigrationCookie qemuMigrationCookie;
|
|
|
|
typedef qemuMigrationCookie *qemuMigrationCookiePtr;
|
|
|
|
struct _qemuMigrationCookie {
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags;
|
|
|
|
unsigned int flagsMandatory;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
/* Host properties */
|
2011-05-23 15:42:15 +00:00
|
|
|
unsigned char localHostuuid[VIR_UUID_BUFLEN];
|
|
|
|
unsigned char remoteHostuuid[VIR_UUID_BUFLEN];
|
|
|
|
char *localHostname;
|
|
|
|
char *remoteHostname;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
/* Guest properties */
|
|
|
|
unsigned char uuid[VIR_UUID_BUFLEN];
|
|
|
|
char *name;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) */
|
|
|
|
char *lockState;
|
|
|
|
char *lockDriver;
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */
|
|
|
|
qemuMigrationCookieGraphicsPtr graphics;
|
2011-09-15 13:13:11 +00:00
|
|
|
|
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_PERSISTENT) */
|
|
|
|
virDomainDefPtr persistent;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_NETWORK) */
|
|
|
|
qemuMigrationCookieNetworkPtr network;
|
2013-01-29 12:38:50 +00:00
|
|
|
|
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_NBD) */
|
|
|
|
qemuMigrationCookieNBDPtr nbd;
|
2011-01-24 18:06:16 +00:00
|
|
|
};
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap)
|
|
|
|
{
|
|
|
|
if (!grap)
|
|
|
|
return;
|
|
|
|
VIR_FREE(grap->listen);
|
|
|
|
VIR_FREE(grap->tlsSubject);
|
|
|
|
VIR_FREE(grap);
|
|
|
|
}
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static void
|
|
|
|
qemuMigrationCookieNetworkFree(qemuMigrationCookieNetworkPtr network)
|
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
if (!network)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (network->net) {
|
|
|
|
for (i = 0; i < network->nnets; i++)
|
|
|
|
VIR_FREE(network->net[i].portdata);
|
|
|
|
}
|
|
|
|
VIR_FREE(network->net);
|
|
|
|
VIR_FREE(network);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
|
|
|
|
{
|
|
|
|
if (!mig)
|
|
|
|
return;
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS)
|
|
|
|
qemuMigrationCookieGraphicsFree(mig->graphics);
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_NETWORK)
|
|
|
|
qemuMigrationCookieNetworkFree(mig->network);
|
|
|
|
|
2011-05-23 15:42:15 +00:00
|
|
|
VIR_FREE(mig->localHostname);
|
|
|
|
VIR_FREE(mig->remoteHostname);
|
2011-01-24 18:06:16 +00:00
|
|
|
VIR_FREE(mig->name);
|
2011-05-18 16:34:21 +00:00
|
|
|
VIR_FREE(mig->lockState);
|
|
|
|
VIR_FREE(mig->lockDriver);
|
2013-01-29 12:38:50 +00:00
|
|
|
VIR_FREE(mig->nbd);
|
2011-01-24 18:06:16 +00:00
|
|
|
VIR_FREE(mig);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-08 21:02:05 +00:00
|
|
|
#ifdef WITH_GNUTLS
|
2011-02-17 13:17:59 +00:00
|
|
|
static char *
|
|
|
|
qemuDomainExtractTLSSubject(const char *certdir)
|
|
|
|
{
|
|
|
|
char *certfile = NULL;
|
|
|
|
char *subject = NULL;
|
|
|
|
char *pemdata = NULL;
|
|
|
|
gnutls_datum_t pemdatum;
|
|
|
|
gnutls_x509_crt_t cert;
|
|
|
|
int ret;
|
|
|
|
size_t subjectlen;
|
|
|
|
|
|
|
|
if (virAsprintf(&certfile, "%s/server-cert.pem", certdir) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
if (virFileReadAll(certfile, 8192, &pemdata) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unable to read server cert %s"), certfile);
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = gnutls_x509_crt_init(&cert);
|
|
|
|
if (ret < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot initialize cert object: %s"),
|
|
|
|
gnutls_strerror(ret));
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
pemdatum.data = (unsigned char *)pemdata;
|
|
|
|
pemdatum.size = strlen(pemdata);
|
|
|
|
|
|
|
|
ret = gnutls_x509_crt_import(cert, &pemdatum, GNUTLS_X509_FMT_PEM);
|
|
|
|
if (ret < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot load cert data from %s: %s"),
|
|
|
|
certfile, gnutls_strerror(ret));
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
subjectlen = 1024;
|
|
|
|
if (VIR_ALLOC_N(subject, subjectlen+1) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
gnutls_x509_crt_get_dn(cert, subject, &subjectlen);
|
|
|
|
subject[subjectlen] = '\0';
|
|
|
|
|
|
|
|
VIR_FREE(certfile);
|
|
|
|
VIR_FREE(pemdata);
|
|
|
|
|
|
|
|
return subject;
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(certfile);
|
|
|
|
VIR_FREE(pemdata);
|
|
|
|
return NULL;
|
|
|
|
}
|
2013-01-07 14:54:18 +00:00
|
|
|
#endif
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
static qemuMigrationCookieGraphicsPtr
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationCookieGraphicsAlloc(virQEMUDriverPtr driver,
|
2011-02-17 13:17:59 +00:00
|
|
|
virDomainGraphicsDefPtr def)
|
|
|
|
{
|
|
|
|
qemuMigrationCookieGraphicsPtr mig = NULL;
|
|
|
|
const char *listenAddr;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(mig) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
mig->type = def->type;
|
|
|
|
if (mig->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
|
|
|
mig->port = def->data.vnc.port;
|
conf: add <listen> subelement to domain <graphics> element
Once it's plugged in, the <listen> element will be an optional
replacement for the "listen" attribute that graphics elements already
have. If the <listen> element is type='address', it will have an
attribute called 'address' which will contain an IP address or dns
name that the guest's display server should listen on. If, however,
type='network', the <listen> element should have an attribute called
'network' that will be set to the name of a network configuration to
get the IP address from.
* docs/schemas/domain.rng: updated to allow the <listen> element
* docs/formatdomain.html.in: document the <listen> element and its
attributes.
* src/conf/domain_conf.[hc]:
1) The domain parser, formatter, and data structure are modified to
support 0 or more <listen> subelements to each <graphics>
element. The old style "legacy" listen attribute is also still
accepted, and will be stored internally just as if it were a
separate <listen> element. On output (i.e. format), the address
attribute of the first <listen> element of type 'address' will be
duplicated in the legacy "listen" attribute of the <graphic>
element.
2) The "listenAddr" attribute has been removed from the unions in
virDomainGRaphicsDef for graphics types vnc, rdp, and spice.
This attribute is now in the <listen> subelement (aka
virDomainGraphicsListenDef)
3) Helper functions were written to provide simple access
(both Get and Set) to the listen elements and their attributes.
* src/libvirt_private.syms: export the listen helper functions
* src/qemu/qemu_command.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/vbox/vbox_tmpl.c,
src/vmx/vmx.c, src/xenxs/xen_sxpr.c, src/xenxs/xen_xm.c
Modify all these files to use the listen helper functions rather
than directly referencing the (now missing) listenAddr
attribute. There can be multiple <listen> elements to a single
<graphics>, but the drivers all currently only support one, so all
replacements of direct access with a helper function indicate index
"0".
* tests/* - only 3 of these are new files added explicitly to test the
new <listen> element. All the others have been modified to reflect
the fact that any legacy "listen" attributes passed in to the domain
parse will be saved in a <listen> element (i.e. one of the
virDomainGraphicsListenDefs), and during the domain format function,
both the <listen> element as well as the legacy attributes will be
output.
2011-07-07 04:20:28 +00:00
|
|
|
listenAddr = virDomainGraphicsListenGetAddress(def, 0);
|
2011-02-17 13:17:59 +00:00
|
|
|
if (!listenAddr)
|
2013-01-10 21:03:14 +00:00
|
|
|
listenAddr = cfg->vncListen;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
2013-01-08 21:02:05 +00:00
|
|
|
#ifdef WITH_GNUTLS
|
2013-01-10 21:03:14 +00:00
|
|
|
if (cfg->vncTLS &&
|
|
|
|
!(mig->tlsSubject = qemuDomainExtractTLSSubject(cfg->vncTLSx509certdir)))
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
2013-01-07 14:54:18 +00:00
|
|
|
#endif
|
2011-02-17 13:17:59 +00:00
|
|
|
} else {
|
|
|
|
mig->port = def->data.spice.port;
|
2013-01-10 21:03:14 +00:00
|
|
|
if (cfg->spiceTLS)
|
2011-02-17 13:17:59 +00:00
|
|
|
mig->tlsPort = def->data.spice.tlsPort;
|
|
|
|
else
|
|
|
|
mig->tlsPort = -1;
|
conf: add <listen> subelement to domain <graphics> element
Once it's plugged in, the <listen> element will be an optional
replacement for the "listen" attribute that graphics elements already
have. If the <listen> element is type='address', it will have an
attribute called 'address' which will contain an IP address or dns
name that the guest's display server should listen on. If, however,
type='network', the <listen> element should have an attribute called
'network' that will be set to the name of a network configuration to
get the IP address from.
* docs/schemas/domain.rng: updated to allow the <listen> element
* docs/formatdomain.html.in: document the <listen> element and its
attributes.
* src/conf/domain_conf.[hc]:
1) The domain parser, formatter, and data structure are modified to
support 0 or more <listen> subelements to each <graphics>
element. The old style "legacy" listen attribute is also still
accepted, and will be stored internally just as if it were a
separate <listen> element. On output (i.e. format), the address
attribute of the first <listen> element of type 'address' will be
duplicated in the legacy "listen" attribute of the <graphic>
element.
2) The "listenAddr" attribute has been removed from the unions in
virDomainGRaphicsDef for graphics types vnc, rdp, and spice.
This attribute is now in the <listen> subelement (aka
virDomainGraphicsListenDef)
3) Helper functions were written to provide simple access
(both Get and Set) to the listen elements and their attributes.
* src/libvirt_private.syms: export the listen helper functions
* src/qemu/qemu_command.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/vbox/vbox_tmpl.c,
src/vmx/vmx.c, src/xenxs/xen_sxpr.c, src/xenxs/xen_xm.c
Modify all these files to use the listen helper functions rather
than directly referencing the (now missing) listenAddr
attribute. There can be multiple <listen> elements to a single
<graphics>, but the drivers all currently only support one, so all
replacements of direct access with a helper function indicate index
"0".
* tests/* - only 3 of these are new files added explicitly to test the
new <listen> element. All the others have been modified to reflect
the fact that any legacy "listen" attributes passed in to the domain
parse will be saved in a <listen> element (i.e. one of the
virDomainGraphicsListenDefs), and during the domain format function,
both the <listen> element as well as the legacy attributes will be
output.
2011-07-07 04:20:28 +00:00
|
|
|
listenAddr = virDomainGraphicsListenGetAddress(def, 0);
|
2011-02-17 13:17:59 +00:00
|
|
|
if (!listenAddr)
|
2013-01-10 21:03:14 +00:00
|
|
|
listenAddr = cfg->spiceListen;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
2013-01-08 21:02:05 +00:00
|
|
|
#ifdef WITH_GNUTLS
|
2013-01-10 21:03:14 +00:00
|
|
|
if (cfg->spiceTLS &&
|
|
|
|
!(mig->tlsSubject = qemuDomainExtractTLSSubject(cfg->spiceTLSx509certdir)))
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
2013-01-07 14:54:18 +00:00
|
|
|
#endif
|
2011-02-17 13:17:59 +00:00
|
|
|
}
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(mig->listen, listenAddr) < 0)
|
|
|
|
goto error;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-17 13:17:59 +00:00
|
|
|
return mig;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieGraphicsFree(mig);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-17 13:17:59 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static qemuMigrationCookieNetworkPtr
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationCookieNetworkAlloc(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
|
2012-10-01 15:18:20 +00:00
|
|
|
virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
qemuMigrationCookieNetworkPtr mig;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(mig) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
mig->nnets = def->nnets;
|
|
|
|
|
|
|
|
if (VIR_ALLOC_N(mig->net, def->nnets) <0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr netptr;
|
|
|
|
virNetDevVPortProfilePtr vport;
|
|
|
|
|
|
|
|
netptr = def->nets[i];
|
|
|
|
vport = virDomainNetGetActualVirtPortProfile(netptr);
|
|
|
|
|
|
|
|
if (vport) {
|
|
|
|
mig->net[i].vporttype = vport->virtPortType;
|
|
|
|
|
|
|
|
switch (vport->virtPortType) {
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_NONE:
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_8021QBG:
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_8021QBH:
|
2012-10-01 15:18:22 +00:00
|
|
|
break;
|
2012-10-01 15:18:20 +00:00
|
|
|
case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
|
2012-10-01 15:18:22 +00:00
|
|
|
if (virNetDevOpenvswitchGetMigrateData(&mig->net[i].portdata,
|
|
|
|
netptr->ifname) != 0) {
|
|
|
|
virReportSystemError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to run command to get OVS port data for "
|
|
|
|
"interface %s"), netptr->ifname);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
2012-10-01 15:18:20 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return mig;
|
|
|
|
|
2012-10-01 15:18:22 +00:00
|
|
|
error:
|
2012-10-01 15:18:20 +00:00
|
|
|
qemuMigrationCookieNetworkFree(mig);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
static qemuMigrationCookiePtr
|
|
|
|
qemuMigrationCookieNew(virDomainObjPtr dom)
|
|
|
|
{
|
2011-10-04 07:11:35 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-10-04 07:11:35 +00:00
|
|
|
const char *name;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(mig) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2011-10-04 07:11:35 +00:00
|
|
|
if (priv->origname)
|
|
|
|
name = priv->origname;
|
|
|
|
else
|
|
|
|
name = dom->def->name;
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(mig->name, name) < 0)
|
|
|
|
goto error;
|
2011-01-24 18:06:16 +00:00
|
|
|
memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN);
|
|
|
|
|
2013-04-26 16:39:11 +00:00
|
|
|
if (!(mig->localHostname = virGetHostname()))
|
2011-08-03 10:00:17 +00:00
|
|
|
goto error;
|
2011-05-23 15:42:15 +00:00
|
|
|
if (virGetHostUUID(mig->localHostuuid) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Unable to obtain host UUID"));
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mig;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2011-02-17 13:17:59 +00:00
|
|
|
virDomainObjPtr dom)
|
|
|
|
{
|
2013-07-01 12:16:51 +00:00
|
|
|
size_t i = 0;
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration graphics data already present"));
|
2011-02-17 13:17:59 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-07-01 12:16:51 +00:00
|
|
|
for (i = 0; i < dom->def->ngraphics; i++) {
|
|
|
|
if (dom->def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
if (!(mig->graphics =
|
|
|
|
qemuMigrationCookieGraphicsAlloc(driver, dom->def->graphics[i])))
|
|
|
|
return -1;
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_GRAPHICS;
|
|
|
|
break;
|
|
|
|
}
|
2011-05-17 08:54:22 +00:00
|
|
|
}
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2011-05-18 16:34:21 +00:00
|
|
|
virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
|
|
|
|
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration lockstate data already present"));
|
2011-05-18 16:34:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainObjGetState(dom, NULL) == VIR_DOMAIN_PAUSED) {
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(mig->lockState, priv->lockState) < 0)
|
2011-05-18 16:34:21 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
if (virDomainLockProcessInquire(driver->lockManager, dom, &mig->lockState) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(mig->lockDriver, virLockManagerPluginGetName(driver->lockManager)) < 0) {
|
2011-05-18 16:34:21 +00:00
|
|
|
VIR_FREE(mig->lockState);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
|
|
|
|
mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-15 13:13:11 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddPersistent(qemuMigrationCookiePtr mig,
|
|
|
|
virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration persistent data already present"));
|
2011-09-15 13:13:11 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dom->newDef)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
mig->persistent = dom->newDef;
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
|
|
|
|
mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_PERSISTENT;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddNetwork(qemuMigrationCookiePtr mig,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2012-10-01 15:18:20 +00:00
|
|
|
virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Network migration data already present"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dom->def->nnets > 0) {
|
|
|
|
mig->network = qemuMigrationCookieNetworkAlloc(driver, dom->def);
|
|
|
|
if (!mig->network)
|
|
|
|
return -1;
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_NETWORK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddNBD(qemuMigrationCookiePtr mig,
|
|
|
|
virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
/* It is not a bug if there already is a NBD data */
|
|
|
|
if (!mig->nbd &&
|
2013-07-04 10:14:12 +00:00
|
|
|
VIR_ALLOC(mig->nbd) < 0)
|
2013-01-29 12:38:50 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
mig->nbd->port = priv->nbdPort;
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf,
|
|
|
|
qemuMigrationCookieGraphicsPtr grap)
|
|
|
|
{
|
|
|
|
virBufferAsprintf(buf, " <graphics type='%s' port='%d' listen='%s'",
|
|
|
|
virDomainGraphicsTypeToString(grap->type),
|
|
|
|
grap->port, grap->listen);
|
|
|
|
if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
|
|
|
|
virBufferAsprintf(buf, " tlsPort='%d'", grap->tlsPort);
|
|
|
|
if (grap->tlsSubject) {
|
|
|
|
virBufferAddLit(buf, ">\n");
|
|
|
|
virBufferEscapeString(buf, " <cert info='subject' value='%s'/>\n", grap->tlsSubject);
|
|
|
|
virBufferAddLit(buf, " </graphics>\n");
|
|
|
|
} else {
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static void
|
|
|
|
qemuMigrationCookieNetworkXMLFormat(virBufferPtr buf,
|
|
|
|
qemuMigrationCookieNetworkPtr optr)
|
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-10-01 15:18:20 +00:00
|
|
|
bool empty = true;
|
|
|
|
|
|
|
|
for (i = 0; i < optr->nnets; i++) {
|
|
|
|
/* If optr->net[i].vporttype is not set, there is nothing to transfer */
|
|
|
|
if (optr->net[i].vporttype != VIR_NETDEV_VPORT_PROFILE_NONE) {
|
|
|
|
if (empty) {
|
2013-05-07 10:28:50 +00:00
|
|
|
virBufferAddLit(buf, " <network>\n");
|
2012-10-01 15:18:20 +00:00
|
|
|
empty = false;
|
|
|
|
}
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
virBufferAsprintf(buf, " <interface index='%zu' vporttype='%s'",
|
2012-10-01 15:18:20 +00:00
|
|
|
i, virNetDevVPortTypeToString(optr->net[i].vporttype));
|
|
|
|
if (optr->net[i].portdata) {
|
|
|
|
virBufferAddLit(buf, ">\n");
|
|
|
|
virBufferEscapeString(buf, " <portdata>%s</portdata>\n",
|
|
|
|
optr->net[i].portdata);
|
|
|
|
virBufferAddLit(buf, " </interface>\n");
|
|
|
|
} else {
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!empty)
|
|
|
|
virBufferAddLit(buf, " </network>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-17 12:57:30 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationCookieXMLFormat(virQEMUDriverPtr driver,
|
2012-05-04 19:00:13 +00:00
|
|
|
virBufferPtr buf,
|
2011-09-17 12:57:30 +00:00
|
|
|
qemuMigrationCookiePtr mig)
|
2011-01-24 18:06:16 +00:00
|
|
|
{
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
char hostuuidstr[VIR_UUID_STRING_BUFLEN];
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
virUUIDFormat(mig->uuid, uuidstr);
|
2011-05-23 15:42:15 +00:00
|
|
|
virUUIDFormat(mig->localHostuuid, hostuuidstr);
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2013-05-07 10:28:50 +00:00
|
|
|
virBufferAddLit(buf, "<qemu-migration>\n");
|
2011-01-24 18:06:16 +00:00
|
|
|
virBufferEscapeString(buf, " <name>%s</name>\n", mig->name);
|
|
|
|
virBufferAsprintf(buf, " <uuid>%s</uuid>\n", uuidstr);
|
2011-05-23 15:42:15 +00:00
|
|
|
virBufferEscapeString(buf, " <hostname>%s</hostname>\n", mig->localHostname);
|
2011-01-24 18:06:16 +00:00
|
|
|
virBufferAsprintf(buf, " <hostuuid>%s</hostuuid>\n", hostuuidstr);
|
2011-02-17 13:17:59 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < QEMU_MIGRATION_COOKIE_FLAG_LAST; i++) {
|
2011-05-18 15:33:17 +00:00
|
|
|
if (mig->flagsMandatory & (1 << i))
|
|
|
|
virBufferAsprintf(buf, " <feature name='%s'/>\n",
|
|
|
|
qemuMigrationCookieFlagTypeToString(i));
|
|
|
|
}
|
|
|
|
|
2011-05-17 08:54:22 +00:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
|
|
|
|
mig->graphics)
|
2011-02-17 13:17:59 +00:00
|
|
|
qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics);
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
|
|
|
|
mig->lockState) {
|
|
|
|
virBufferAsprintf(buf, " <lockstate driver='%s'>\n",
|
|
|
|
mig->lockDriver);
|
|
|
|
virBufferAsprintf(buf, " <leases>%s</leases>\n",
|
|
|
|
mig->lockState);
|
|
|
|
virBufferAddLit(buf, " </lockstate>\n");
|
|
|
|
}
|
|
|
|
|
2011-09-15 13:13:11 +00:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_PERSISTENT) &&
|
|
|
|
mig->persistent) {
|
2011-09-17 12:57:30 +00:00
|
|
|
virBufferAdjustIndent(buf, 2);
|
2012-05-04 19:00:13 +00:00
|
|
|
if (qemuDomainDefFormatBuf(driver,
|
|
|
|
mig->persistent,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE |
|
2012-10-08 09:58:05 +00:00
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE,
|
2012-05-04 19:00:13 +00:00
|
|
|
buf) < 0)
|
2011-09-17 12:57:30 +00:00
|
|
|
return -1;
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
2011-09-15 13:13:11 +00:00
|
|
|
}
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_NETWORK) && mig->network)
|
|
|
|
qemuMigrationCookieNetworkXMLFormat(buf, mig->network);
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd) {
|
|
|
|
virBufferAddLit(buf, " <nbd");
|
|
|
|
if (mig->nbd->port)
|
|
|
|
virBufferAsprintf(buf, " port='%d'", mig->nbd->port);
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
|
|
|
}
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
virBufferAddLit(buf, "</qemu-migration>\n");
|
2011-09-17 12:57:30 +00:00
|
|
|
return 0;
|
2011-01-24 18:06:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
static char *qemuMigrationCookieXMLFormatStr(virQEMUDriverPtr driver,
|
2012-05-04 19:00:13 +00:00
|
|
|
qemuMigrationCookiePtr mig)
|
2011-01-24 18:06:16 +00:00
|
|
|
{
|
|
|
|
virBuffer buf = VIR_BUFFER_INITIALIZER;
|
|
|
|
|
2012-05-04 19:00:13 +00:00
|
|
|
if (qemuMigrationCookieXMLFormat(driver, &buf, mig) < 0) {
|
2011-09-17 12:57:30 +00:00
|
|
|
virBufferFreeAndReset(&buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
if (virBufferError(&buf)) {
|
|
|
|
virReportOOMError();
|
2011-09-17 12:57:30 +00:00
|
|
|
virBufferFreeAndReset(&buf);
|
2011-01-24 18:06:16 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return virBufferContentAndReset(&buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
static qemuMigrationCookieGraphicsPtr
|
|
|
|
qemuMigrationCookieGraphicsXMLParse(xmlXPathContextPtr ctxt)
|
|
|
|
{
|
|
|
|
qemuMigrationCookieGraphicsPtr grap;
|
|
|
|
char *tmp;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(grap) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
if (!(tmp = virXPathString("string(./graphics/@type)", ctxt))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing type attribute in migration data"));
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if ((grap->type = virDomainGraphicsTypeFromString(tmp)) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown graphics type %s"), tmp);
|
2011-02-17 13:17:59 +00:00
|
|
|
VIR_FREE(tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
2011-10-13 22:19:44 +00:00
|
|
|
VIR_FREE(tmp);
|
2011-02-17 13:17:59 +00:00
|
|
|
if (virXPathInt("string(./graphics/@port)", ctxt, &grap->port) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing port attribute in migration data"));
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
if (virXPathInt("string(./graphics/@tlsPort)", ctxt, &grap->tlsPort) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing tlsPort attribute in migration data"));
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!(grap->listen = virXPathString("string(./graphics/@listen)", ctxt))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing listen attribute in migration data"));
|
2011-02-17 13:17:59 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* Optional */
|
2011-05-18 09:57:07 +00:00
|
|
|
grap->tlsSubject = virXPathString("string(./graphics/cert[@info='subject']/@value)", ctxt);
|
2011-02-17 13:17:59 +00:00
|
|
|
|
|
|
|
return grap;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieGraphicsFree(grap);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static qemuMigrationCookieNetworkPtr
|
|
|
|
qemuMigrationCookieNetworkXMLParse(xmlXPathContextPtr ctxt)
|
|
|
|
{
|
|
|
|
qemuMigrationCookieNetworkPtr optr;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-10-01 15:18:20 +00:00
|
|
|
int n;
|
|
|
|
xmlNodePtr *interfaces = NULL;
|
|
|
|
char *vporttype;
|
|
|
|
xmlNodePtr save_ctxt = ctxt->node;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(optr) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
if ((n = virXPathNodeSet("./network/interface", ctxt, &interfaces)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing interface information"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
optr->nnets = n;
|
2012-11-16 08:03:42 +00:00
|
|
|
if (VIR_ALLOC_N(optr->net, optr->nnets) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
/* portdata is optional, and may not exist */
|
|
|
|
ctxt->node = interfaces[i];
|
|
|
|
optr->net[i].portdata = virXPathString("string(./portdata[1])", ctxt);
|
|
|
|
|
|
|
|
if (!(vporttype = virXMLPropString(interfaces[i], "vporttype"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing vporttype attribute in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
optr->net[i].vporttype = virNetDevVPortTypeFromString(vporttype);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(interfaces);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
ctxt->node = save_ctxt;
|
|
|
|
return optr;
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(interfaces);
|
|
|
|
qemuMigrationCookieNetworkFree(optr);
|
|
|
|
optr = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2011-09-15 13:13:11 +00:00
|
|
|
xmlDocPtr doc,
|
2011-01-24 18:06:16 +00:00
|
|
|
xmlXPathContextPtr ctxt,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2011-01-24 18:06:16 +00:00
|
|
|
{
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
2013-02-01 17:04:15 +00:00
|
|
|
char *tmp = NULL;
|
2011-05-18 15:33:17 +00:00
|
|
|
xmlNodePtr *nodes = NULL;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
|
|
|
int n;
|
2013-02-01 17:04:15 +00:00
|
|
|
virCapsPtr caps = NULL;
|
|
|
|
|
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto error;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
/* We don't store the uuid, name, hostname, or hostuuid
|
|
|
|
* values. We just compare them to local data to do some
|
|
|
|
* sanity checking on migration operation
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Extract domain name */
|
|
|
|
if (!(tmp = virXPathString("string(./name[1])", ctxt))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing name element in migration data"));
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (STRNEQ(tmp, mig->name)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Incoming cookie data had unexpected name %s vs %s"),
|
|
|
|
tmp, mig->name);
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
|
|
|
/* Extract domain uuid */
|
|
|
|
tmp = virXPathString("string(./uuid[1])", ctxt);
|
|
|
|
if (!tmp) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing uuid element in migration data"));
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
virUUIDFormat(mig->uuid, uuidstr);
|
|
|
|
if (STRNEQ(tmp, uuidstr)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Incoming cookie data had unexpected UUID %s vs %s"),
|
|
|
|
tmp, uuidstr);
|
2011-01-24 18:06:16 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
|
|
|
/* Check & forbid "localhost" migration */
|
2011-05-23 15:42:15 +00:00
|
|
|
if (!(mig->remoteHostname = virXPathString("string(./hostname[1])", ctxt))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing hostname element in migration data"));
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2011-05-23 15:42:15 +00:00
|
|
|
if (STREQ(mig->remoteHostname, mig->localHostname)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Attempt to migrate guest to the same host %s"),
|
|
|
|
mig->remoteHostname);
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(tmp = virXPathString("string(./hostuuid[1])", ctxt))) {
|
2012-10-26 13:06:17 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing hostuuid element in migration data"));
|
|
|
|
goto error;
|
2011-01-24 18:06:16 +00:00
|
|
|
}
|
2012-10-26 13:06:17 +00:00
|
|
|
if (virUUIDParse(tmp, mig->remoteHostuuid) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("malformed hostuuid element in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (memcmp(mig->remoteHostuuid, mig->localHostuuid, VIR_UUID_BUFLEN) == 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Attempt to migrate guest to the same host %s"),
|
|
|
|
tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2011-05-18 15:33:17 +00:00
|
|
|
/* Check to ensure all mandatory features from XML are also
|
|
|
|
* present in 'flags' */
|
2013-02-20 09:30:38 +00:00
|
|
|
if ((n = virXPathNodeSet("./feature", ctxt, &nodes)) < 0)
|
2011-05-18 15:33:17 +00:00
|
|
|
goto error;
|
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
2011-05-18 15:33:17 +00:00
|
|
|
int val;
|
|
|
|
char *str = virXMLPropString(nodes[i], "name");
|
|
|
|
if (!str) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing feature name"));
|
2011-05-18 15:33:17 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((val = qemuMigrationCookieFlagTypeFromString(str)) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown migration cookie feature %s"),
|
|
|
|
str);
|
2011-05-18 15:33:17 +00:00
|
|
|
VIR_FREE(str);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flags & (1 << val)) == 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unsupported migration cookie feature %s"),
|
|
|
|
str);
|
2011-05-18 15:33:17 +00:00
|
|
|
VIR_FREE(str);
|
|
|
|
}
|
|
|
|
VIR_FREE(str);
|
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
|
|
|
|
virXPathBoolean("count(./graphics) > 0", ctxt) &&
|
|
|
|
(!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt))))
|
|
|
|
goto error;
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
|
|
|
|
virXPathBoolean("count(./lockstate) > 0", ctxt)) {
|
|
|
|
mig->lockDriver = virXPathString("string(./lockstate[1]/@driver)", ctxt);
|
|
|
|
if (!mig->lockDriver) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Missing lock driver name in migration cookie"));
|
2011-05-18 16:34:21 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
mig->lockState = virXPathString("string(./lockstate[1]/leases[1])", ctxt);
|
|
|
|
if (mig->lockState && STREQ(mig->lockState, ""))
|
|
|
|
VIR_FREE(mig->lockState);
|
|
|
|
}
|
|
|
|
|
2011-09-15 13:13:11 +00:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_PERSISTENT) &&
|
|
|
|
virXPathBoolean("count(./domain) > 0", ctxt)) {
|
|
|
|
if ((n = virXPathNodeSet("./domain", ctxt, &nodes)) > 1) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Too many domain elements in "
|
|
|
|
"migration cookie: %d"),
|
|
|
|
n);
|
2011-09-15 13:13:11 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2013-03-28 13:55:55 +00:00
|
|
|
mig->persistent = virDomainDefParseNode(doc, nodes[0],
|
|
|
|
caps, driver->xmlopt,
|
2011-09-15 13:13:11 +00:00
|
|
|
-1, VIR_DOMAIN_XML_INACTIVE);
|
|
|
|
if (!mig->persistent) {
|
|
|
|
/* virDomainDefParseNode already reported
|
|
|
|
* an error for us */
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
}
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_NETWORK) &&
|
|
|
|
virXPathBoolean("count(./network) > 0", ctxt) &&
|
|
|
|
(!(mig->network = qemuMigrationCookieNetworkXMLParse(ctxt))))
|
|
|
|
goto error;
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_NBD &&
|
|
|
|
virXPathBoolean("boolean(./nbd)", ctxt)) {
|
|
|
|
char *port;
|
|
|
|
|
2013-07-04 10:14:12 +00:00
|
|
|
if (VIR_ALLOC(mig->nbd) < 0)
|
2013-01-29 12:38:50 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
port = virXPathString("string(./nbd/@port)", ctxt);
|
|
|
|
if (port && virStrToLong_i(port, NULL, 10, &mig->nbd->port) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Malformed nbd port '%s'"),
|
|
|
|
port);
|
|
|
|
VIR_FREE(port);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(port);
|
|
|
|
}
|
|
|
|
|
2013-02-01 17:04:15 +00:00
|
|
|
virObjectUnref(caps);
|
2011-01-24 18:06:16 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(tmp);
|
2011-05-18 15:33:17 +00:00
|
|
|
VIR_FREE(nodes);
|
2013-02-01 17:04:15 +00:00
|
|
|
virObjectUnref(caps);
|
2011-01-24 18:06:16 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2011-01-24 18:06:16 +00:00
|
|
|
const char *xml,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2011-01-24 18:06:16 +00:00
|
|
|
{
|
|
|
|
xmlDocPtr doc = NULL;
|
|
|
|
xmlXPathContextPtr ctxt = NULL;
|
2011-05-16 15:10:35 +00:00
|
|
|
int ret = -1;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("xml=%s", NULLSTR(xml));
|
|
|
|
|
2011-09-14 14:17:57 +00:00
|
|
|
if (!(doc = virXMLParseStringCtxt(xml, _("(qemu_migration_cookie)"), &ctxt)))
|
2011-01-24 18:06:16 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-09-15 13:13:11 +00:00
|
|
|
ret = qemuMigrationCookieXMLParse(mig, driver, doc, ctxt, flags);
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
xmlXPathFreeContext(ctxt);
|
|
|
|
xmlFreeDoc(doc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2011-02-17 13:17:59 +00:00
|
|
|
virDomainObjPtr dom,
|
2011-01-24 18:06:16 +00:00
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2011-01-24 18:06:16 +00:00
|
|
|
{
|
2011-06-04 10:14:05 +00:00
|
|
|
if (!cookieout || !cookieoutlen)
|
|
|
|
return 0;
|
2011-01-24 18:06:16 +00:00
|
|
|
|
|
|
|
*cookieoutlen = 0;
|
|
|
|
|
2011-02-17 13:17:59 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_GRAPHICS &&
|
|
|
|
qemuMigrationCookieAddGraphics(mig, driver, dom) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE &&
|
|
|
|
qemuMigrationCookieAddLockstate(mig, driver, dom) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2011-09-15 13:13:11 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_PERSISTENT &&
|
|
|
|
qemuMigrationCookieAddPersistent(mig, dom) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_NETWORK &&
|
|
|
|
qemuMigrationCookieAddNetwork(mig, driver, dom) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_NBD) &&
|
|
|
|
qemuMigrationCookieAddNBD(mig, driver, dom) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-05-04 19:00:13 +00:00
|
|
|
if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
|
2011-01-24 18:06:16 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
*cookieoutlen = strlen(*cookieout) + 1;
|
|
|
|
|
|
|
|
VIR_DEBUG("cookielen=%d cookie=%s", *cookieoutlen, *cookieout);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuMigrationCookiePtr
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationEatCookie(virQEMUDriverPtr driver,
|
2011-05-18 16:34:21 +00:00
|
|
|
virDomainObjPtr dom,
|
2011-01-24 18:06:16 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2011-01-24 18:06:16 +00:00
|
|
|
{
|
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
|
|
|
|
|
|
|
/* Parse & validate incoming cookie (if any) */
|
|
|
|
if (cookiein && cookieinlen &&
|
|
|
|
cookiein[cookieinlen-1] != '\0') {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration cookie was not NULL terminated"));
|
2011-01-24 18:06:16 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("cookielen=%d cookie='%s'", cookieinlen, NULLSTR(cookiein));
|
|
|
|
|
|
|
|
if (!(mig = qemuMigrationCookieNew(dom)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (cookiein && cookieinlen &&
|
|
|
|
qemuMigrationCookieXMLParseStr(mig,
|
2011-09-15 13:13:11 +00:00
|
|
|
driver,
|
2011-01-24 18:06:16 +00:00
|
|
|
cookiein,
|
|
|
|
flags) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
|
|
|
|
if (!mig->lockDriver) {
|
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Missing %s lock state for migration cookie"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
2011-05-18 16:34:21 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else if (STRNEQ(mig->lockDriver,
|
|
|
|
virLockManagerPluginGetName(driver->lockManager))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Source host lock driver %s different from target %s"),
|
|
|
|
mig->lockDriver,
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
2011-05-18 16:34:21 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
return mig;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-01-31 13:48:06 +00:00
|
|
|
/**
|
|
|
|
* qemuMigrationStartNBDServer:
|
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain
|
|
|
|
*
|
|
|
|
* Starts NBD server. This is a newer method to copy
|
|
|
|
* storage during migration than using 'blk' and 'inc'
|
|
|
|
* arguments in 'migrate' monitor command.
|
|
|
|
* Error is reported here.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
|
2013-03-22 13:52:25 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *listenAddr)
|
2013-01-31 13:48:06 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
unsigned short port = 0;
|
|
|
|
char *diskAlias = NULL;
|
|
|
|
size_t i;
|
2013-05-23 13:51:05 +00:00
|
|
|
const char *host;
|
|
|
|
|
|
|
|
if (STREQ(listenAddr, "[::]"))
|
|
|
|
host = "::";
|
|
|
|
else
|
|
|
|
host = listenAddr;
|
2013-01-31 13:48:06 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
|
|
|
|
/* skip shared, RO and source-less disks */
|
|
|
|
if (disk->shared || disk->readonly || !disk->src)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
if (virAsprintf(&diskAlias, "%s%s",
|
2013-07-04 10:14:12 +00:00
|
|
|
QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
|
2013-01-31 13:48:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!port &&
|
|
|
|
((virPortAllocatorAcquire(driver->remotePorts, &port) < 0) ||
|
2013-05-23 13:51:05 +00:00
|
|
|
(qemuMonitorNBDServerStart(priv->mon, host, port) < 0))) {
|
2013-01-31 13:48:06 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuMonitorNBDServerAdd(priv->mon, diskAlias, true) < 0) {
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->nbdPort = port;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(diskAlias);
|
2013-07-04 19:16:57 +00:00
|
|
|
if (ret < 0)
|
2013-01-31 13:48:06 +00:00
|
|
|
virPortAllocatorRelease(driver->remotePorts, port);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-11-23 14:42:51 +00:00
|
|
|
/**
|
|
|
|
* qemuMigrationDriveMirror:
|
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain
|
|
|
|
* @mig: migration cookie
|
|
|
|
* @host: where are we migrating to
|
|
|
|
* @speed: how much should the copying be limited
|
|
|
|
* @migrate_flags: migrate monitor command flags
|
|
|
|
*
|
|
|
|
* Run drive-mirror to feed NBD server running on dst and wait
|
|
|
|
* till the process switches into another phase where writes go
|
|
|
|
* simultaneously to both source and destination. And this switch
|
|
|
|
* is what we are waiting for before proceeding with the next
|
|
|
|
* disk. On success, update @migrate_flags so we don't tell
|
|
|
|
* 'migrate' command to do the very same operation.
|
|
|
|
*
|
|
|
|
* Returns 0 on success (@migrate_flags updated),
|
|
|
|
* -1 otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuMigrationDriveMirror(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr mig,
|
|
|
|
const char *host,
|
|
|
|
unsigned long speed,
|
|
|
|
unsigned int *migrate_flags)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
int mon_ret;
|
|
|
|
int port;
|
|
|
|
size_t i, lastGood = 0;
|
|
|
|
char *diskAlias = NULL;
|
|
|
|
char *nbd_dest = NULL;
|
2013-05-31 11:24:06 +00:00
|
|
|
char *hoststr = NULL;
|
2012-11-23 14:42:51 +00:00
|
|
|
unsigned int mirror_flags = VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT;
|
|
|
|
virErrorPtr err = NULL;
|
|
|
|
|
|
|
|
if (!(*migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!mig->nbd) {
|
|
|
|
/* Destination doesn't support NBD server.
|
|
|
|
* Fall back to previous implementation. */
|
|
|
|
VIR_DEBUG("Destination doesn't support NBD server "
|
|
|
|
"Falling back to previous implementation.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* steal NBD port and thus prevent its propagation back to destination */
|
|
|
|
port = mig->nbd->port;
|
|
|
|
mig->nbd->port = 0;
|
|
|
|
|
2013-05-31 11:24:06 +00:00
|
|
|
/* escape literal IPv6 address */
|
|
|
|
if (strchr(host, ':')) {
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(&hoststr, "[%s]", host) < 0)
|
2013-05-31 11:24:06 +00:00
|
|
|
goto error;
|
|
|
|
} else if (VIR_STRDUP(hoststr, host) < 0) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2012-11-23 14:42:51 +00:00
|
|
|
if (*migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC)
|
|
|
|
mirror_flags |= VIR_DOMAIN_BLOCK_REBASE_SHALLOW;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
virDomainBlockJobInfo info;
|
|
|
|
|
|
|
|
/* skip shared, RO and source-less disks */
|
|
|
|
if (disk->shared || disk->readonly || !disk->src)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
VIR_FREE(nbd_dest);
|
|
|
|
if ((virAsprintf(&diskAlias, "%s%s",
|
|
|
|
QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0) ||
|
|
|
|
(virAsprintf(&nbd_dest, "nbd:%s:%d:exportname=%s",
|
2013-07-04 10:14:12 +00:00
|
|
|
hoststr, port, diskAlias) < 0))
|
2012-11-23 14:42:51 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto error;
|
|
|
|
mon_ret = qemuMonitorDriveMirror(priv->mon, diskAlias, nbd_dest,
|
|
|
|
NULL, speed, mirror_flags);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
if (mon_ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
lastGood = i;
|
|
|
|
|
|
|
|
/* wait for completion */
|
|
|
|
while (true) {
|
|
|
|
/* Poll every 500ms for progress & to allow cancellation */
|
|
|
|
struct timespec ts = { .tv_sec = 0, .tv_nsec = 500 * 1000 * 1000ull };
|
|
|
|
|
|
|
|
memset(&info, 0, sizeof(info));
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto error;
|
|
|
|
if (priv->job.asyncAbort) {
|
|
|
|
/* explicitly do this *after* we entered the monitor,
|
|
|
|
* as this is a critical section so we are guaranteed
|
|
|
|
* priv->job.asyncAbort will not change */
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
_("canceled by client"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
mon_ret = qemuMonitorBlockJob(priv->mon, diskAlias, NULL, 0,
|
|
|
|
&info, BLOCK_JOB_INFO, true);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
if (mon_ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (info.cur == info.end) {
|
|
|
|
VIR_DEBUG("Drive mirroring of '%s' completed", diskAlias);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX Frankly speaking, we should listen to the events,
|
|
|
|
* instead of doing this. But this works for now and we
|
|
|
|
* are doing something similar in migration itself anyway */
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Okay, copied. Modify migrate_flags */
|
|
|
|
*migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
VIR_FREE(nbd_dest);
|
2013-05-31 11:24:06 +00:00
|
|
|
VIR_FREE(hoststr);
|
2012-11-23 14:42:51 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
error:
|
|
|
|
/* don't overwrite any errors */
|
|
|
|
err = virSaveLastError();
|
|
|
|
/* cancel any outstanding jobs */
|
|
|
|
while (lastGood) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[--lastGood];
|
|
|
|
|
|
|
|
/* skip shared, RO disks */
|
|
|
|
if (disk->shared || disk->readonly || !disk->src)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
if (virAsprintf(&diskAlias, "%s%s",
|
2013-07-04 10:14:12 +00:00
|
|
|
QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
|
2012-11-23 14:42:51 +00:00
|
|
|
continue;
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
|
|
|
if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, 0,
|
|
|
|
NULL, BLOCK_JOB_ABORT, true) < 0) {
|
|
|
|
VIR_WARN("Unable to cancel block-job on '%s'", diskAlias);
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
} else {
|
|
|
|
VIR_WARN("Unable to enter monitor. No block job cancelled");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (err)
|
|
|
|
virSetError(err);
|
|
|
|
virFreeError(err);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2012-11-27 15:34:24 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr mig)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!mig->nbd)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (qemuMonitorNBDServerStop(priv->mon) < 0)
|
|
|
|
VIR_WARN("Unable to stop NBD server");
|
|
|
|
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
virPortAllocatorRelease(driver->remotePorts, priv->nbdPort);
|
|
|
|
priv->nbdPort = 0;
|
|
|
|
}
|
|
|
|
|
2013-01-30 16:53:37 +00:00
|
|
|
static void
|
|
|
|
qemuMigrationCancelDriveMirror(qemuMigrationCookiePtr mig,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
size_t i;
|
|
|
|
char *diskAlias = NULL;
|
|
|
|
|
|
|
|
VIR_DEBUG("mig=%p nbdPort=%d", mig->nbd, priv->nbdPort);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
|
|
|
|
/* skip shared, RO and source-less disks */
|
|
|
|
if (disk->shared || disk->readonly || !disk->src)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
if (virAsprintf(&diskAlias, "%s%s",
|
2013-07-04 10:14:12 +00:00
|
|
|
QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
|
2013-01-30 16:53:37 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, 0,
|
|
|
|
NULL, BLOCK_JOB_ABORT, true) < 0)
|
|
|
|
VIR_WARN("Unable to stop block job on %s", diskAlias);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
/* Validate whether the domain is safe to migrate. If vm is NULL,
|
|
|
|
* then this is being run in the v2 Prepare stage on the destination
|
|
|
|
* (where we only have the target xml); if vm is provided, then this
|
|
|
|
* is being run in either v2 Perform or v3 Begin (where we also have
|
|
|
|
* access to all of the domain's metadata, such as whether it is
|
|
|
|
* marked autodestroy or has snapshots). While it would be nice to
|
|
|
|
* assume that checking on source is sufficient to prevent ever
|
|
|
|
* talking to the destination in the first place, we are stuck with
|
|
|
|
* the fact that older servers did not do checks on the source. */
|
2012-12-07 10:59:24 +00:00
|
|
|
bool
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationIsAllowed(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
2013-06-12 14:11:21 +00:00
|
|
|
virDomainDefPtr def, bool remote, bool abort_on_error)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
int nsnapshots;
|
2013-06-10 14:30:48 +00:00
|
|
|
int pauseReason;
|
2012-10-19 11:36:33 +00:00
|
|
|
bool forbid;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
|
|
|
|
if (vm) {
|
|
|
|
if (qemuProcessAutoDestroyActive(driver, vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is marked for auto destroy"));
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
return false;
|
|
|
|
}
|
2012-12-07 10:59:24 +00:00
|
|
|
|
|
|
|
/* perform these checks only when migrating to remote hosts */
|
|
|
|
if (remote) {
|
|
|
|
nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
|
|
|
|
if (nsnapshots < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (nsnapshots > 0) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("cannot migrate domain with %d snapshots"),
|
|
|
|
nsnapshots);
|
|
|
|
return false;
|
|
|
|
}
|
2013-06-10 14:30:48 +00:00
|
|
|
|
|
|
|
/* cancel migration if disk I/O error is emitted while migrating */
|
2013-06-12 14:11:21 +00:00
|
|
|
if (abort_on_error &&
|
|
|
|
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
|
2013-06-10 14:30:48 +00:00
|
|
|
pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot migrate domain with I/O error"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
}
|
2012-12-07 10:59:24 +00:00
|
|
|
|
blockjob: react to active block copy
For now, disk migration via block copy job is not implemented in
libvirt. But when we do implement it, we have to deal with the
fact that qemu does not yet provide an easy way to re-start a qemu
process with mirroring still intact. Paolo has proposed an idea
for a persistent dirty bitmap that might make this possible, but
until that design is complete, it's hard to say what changes
libvirt would need. Even something like 'virDomainSave' becomes
hairy, if you realize the implications that 'virDomainRestore'
would be stuck with recreating the same mirror layout.
But if we step back and look at the bigger picture, we realize that
the initial client of live storage migration via disk mirroring is
oVirt, which always uses transient domains, and that if a transient
domain is destroyed while a mirror exists, oVirt can easily restart
the storage migration by creating a new domain that visits just the
source storage, with no loss in data.
We can make life a lot easier by being cowards for now, forbidding
certain operations on a domain. This patch guarantees that we
never get in a state where we would have to restart a domain with
a mirroring block copy, by preventing saves, snapshots, migration,
hot unplug of a disk in use, and conversion to a persistent domain
(thankfully, it is still relatively easy to 'virsh undefine' a
running domain to temporarily make it transient, run tests on
'virsh blockcopy', then 'virsh define' to restore the persistence).
Later, if the qemu design is enhanced, we can relax our code.
The change to qemudDomainDefine looks a bit odd for undoing an
assignment, rather than probing up front to avoid the assignment,
but this is because of how virDomainAssignDef combines both a
lookup and assignment into a single function call.
* src/conf/domain_conf.h (virDomainHasDiskMirror): New prototype.
* src/conf/domain_conf.c (virDomainHasDiskMirror): New function.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemuDomainSnapshotCreateXML, qemuDomainRevertToSnapshot)
(qemuDomainBlockJobImpl, qemudDomainDefine): Prevent dangerous
actions while block copy is already in action.
* src/qemu/qemu_hotplug.c (qemuDomainDetachDiskDevice): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
2012-04-10 02:39:47 +00:00
|
|
|
if (virDomainHasDiskMirror(vm)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
2012-12-07 10:59:24 +00:00
|
|
|
_("domain has an active block job"));
|
blockjob: react to active block copy
For now, disk migration via block copy job is not implemented in
libvirt. But when we do implement it, we have to deal with the
fact that qemu does not yet provide an easy way to re-start a qemu
process with mirroring still intact. Paolo has proposed an idea
for a persistent dirty bitmap that might make this possible, but
until that design is complete, it's hard to say what changes
libvirt would need. Even something like 'virDomainSave' becomes
hairy, if you realize the implications that 'virDomainRestore'
would be stuck with recreating the same mirror layout.
But if we step back and look at the bigger picture, we realize that
the initial client of live storage migration via disk mirroring is
oVirt, which always uses transient domains, and that if a transient
domain is destroyed while a mirror exists, oVirt can easily restart
the storage migration by creating a new domain that visits just the
source storage, with no loss in data.
We can make life a lot easier by being cowards for now, forbidding
certain operations on a domain. This patch guarantees that we
never get in a state where we would have to restart a domain with
a mirroring block copy, by preventing saves, snapshots, migration,
hot unplug of a disk in use, and conversion to a persistent domain
(thankfully, it is still relatively easy to 'virsh undefine' a
running domain to temporarily make it transient, run tests on
'virsh blockcopy', then 'virsh define' to restore the persistence).
Later, if the qemu design is enhanced, we can relax our code.
The change to qemudDomainDefine looks a bit odd for undoing an
assignment, rather than probing up front to avoid the assignment,
but this is because of how virDomainAssignDef combines both a
lookup and assignment into a single function call.
* src/conf/domain_conf.h (virDomainHasDiskMirror): New prototype.
* src/conf/domain_conf.c (virDomainHasDiskMirror): New function.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemuDomainSnapshotCreateXML, qemuDomainRevertToSnapshot)
(qemuDomainBlockJobImpl, qemudDomainDefine): Prevent dangerous
actions while block copy is already in action.
* src/qemu/qemu_hotplug.c (qemuDomainDetachDiskDevice): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
2012-04-10 02:39:47 +00:00
|
|
|
return false;
|
|
|
|
}
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
|
|
|
|
def = vm->def;
|
|
|
|
}
|
2012-10-19 11:36:33 +00:00
|
|
|
|
|
|
|
/* Migration with USB host devices is allowed, all other devices are
|
|
|
|
* forbidden.
|
|
|
|
*/
|
|
|
|
forbid = false;
|
|
|
|
for (i = 0; i < def->nhostdevs; i++) {
|
|
|
|
virDomainHostdevDefPtr hostdev = def->hostdevs[i];
|
|
|
|
if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
|
|
|
|
hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB) {
|
|
|
|
forbid = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (forbid) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
2012-12-07 10:59:24 +00:00
|
|
|
_("domain has assigned non-USB host devices"));
|
2011-01-31 10:47:03 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-02-21 12:20:06 +00:00
|
|
|
static bool
|
|
|
|
qemuMigrationIsSafe(virDomainDefPtr def)
|
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
2012-02-21 12:20:06 +00:00
|
|
|
virDomainDiskDefPtr disk = def->disks[i];
|
|
|
|
|
2012-03-05 11:10:21 +00:00
|
|
|
/* Our code elsewhere guarantees shared disks are either readonly (in
|
|
|
|
* which case cache mode doesn't matter) or used with cache=none */
|
2012-02-21 12:20:06 +00:00
|
|
|
if (disk->src &&
|
2012-03-05 11:10:21 +00:00
|
|
|
!disk->shared &&
|
|
|
|
!disk->readonly &&
|
|
|
|
disk->cachemode != VIR_DOMAIN_DISK_CACHE_DISABLE) {
|
2013-04-11 16:28:35 +00:00
|
|
|
int rc;
|
2012-06-06 18:36:31 +00:00
|
|
|
|
|
|
|
if (disk->type == VIR_DOMAIN_DISK_TYPE_FILE) {
|
2013-04-11 16:28:35 +00:00
|
|
|
if ((rc = virStorageFileIsSharedFS(disk->src)) < 0)
|
|
|
|
return false;
|
|
|
|
else if (rc == 0)
|
2012-06-06 18:36:31 +00:00
|
|
|
continue;
|
2013-04-11 16:28:35 +00:00
|
|
|
if ((rc = virStorageFileIsClusterFS(disk->src)) < 0)
|
2012-06-06 18:36:31 +00:00
|
|
|
return false;
|
2013-04-11 16:28:35 +00:00
|
|
|
else if (rc == 1)
|
|
|
|
continue;
|
2012-07-02 18:55:26 +00:00
|
|
|
} else if (disk->type == VIR_DOMAIN_DISK_TYPE_NETWORK &&
|
|
|
|
disk->protocol == VIR_DOMAIN_DISK_PROTOCOL_RBD) {
|
|
|
|
continue;
|
2012-06-06 18:36:31 +00:00
|
|
|
}
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
|
|
|
|
_("Migration may lead to data corruption if disks"
|
|
|
|
" use cache != none"));
|
2012-02-21 12:20:06 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/** qemuMigrationSetOffline
|
|
|
|
* Pause domain for non-live migration.
|
|
|
|
*/
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationSetOffline(virQEMUDriverPtr driver,
|
2011-01-31 10:47:03 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
int ret;
|
2011-06-02 15:40:33 +00:00
|
|
|
VIR_DEBUG("driver=%p vm=%p", driver, vm);
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT);
|
2011-01-31 10:47:03 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
virDomainEventPtr event;
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-14 11:45:20 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSetCompression(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
enum qemuDomainAsyncJob job)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = qemuMonitorGetMigrationCapability(
|
|
|
|
priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATION_CAPS_XBZRLE);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
} else if (ret == 0) {
|
|
|
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("Compressed migration is not supported by "
|
|
|
|
"target QEMU binary"));
|
|
|
|
} else {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("Compressed migration is not supported by "
|
|
|
|
"source QEMU binary"));
|
|
|
|
}
|
|
|
|
ret = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qemuMonitorSetMigrationCapability(
|
|
|
|
priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATION_CAPS_XBZRLE);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-06-10 13:35:03 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationWaitForSpice(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
bool wait_for_spice = false;
|
|
|
|
bool spice_migrated = false;
|
2013-07-01 12:16:51 +00:00
|
|
|
size_t i = 0;
|
2013-06-10 13:35:03 +00:00
|
|
|
|
2013-07-01 12:16:51 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SEAMLESS_MIGRATION)) {
|
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
if (vm->def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
wait_for_spice = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-10 13:35:03 +00:00
|
|
|
|
|
|
|
if (!wait_for_spice)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
while (!spice_migrated) {
|
|
|
|
/* Poll every 50ms for progress & to allow cancellation */
|
|
|
|
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
if (qemuMonitorGetSpiceMigrationStatus(priv->mon,
|
|
|
|
&spice_migrated) < 0) {
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
virObjectLock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-14 11:45:20 +00:00
|
|
|
|
2011-04-20 16:33:27 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationUpdateJobStatus(virQEMUDriverPtr driver,
|
2011-04-20 16:33:27 +00:00
|
|
|
virDomainObjPtr vm,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
const char *job,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2011-04-20 16:33:27 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-04-20 15:10:37 +00:00
|
|
|
int ret;
|
2013-02-08 08:58:03 +00:00
|
|
|
qemuMonitorMigrationStatus status;
|
|
|
|
|
|
|
|
memset(&status, 0, sizeof(status));
|
2011-04-20 16:33:27 +00:00
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
|
|
|
|
if (ret < 0) {
|
|
|
|
/* Guest already exited; nothing further to update. */
|
2011-04-20 16:33:27 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2013-02-08 08:58:03 +00:00
|
|
|
ret = qemuMonitorGetMigrationStatus(priv->mon, &status);
|
2012-09-20 09:15:31 +00:00
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-04-20 16:33:27 +00:00
|
|
|
|
2013-02-08 08:58:03 +00:00
|
|
|
priv->job.status = status;
|
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
if (ret < 0 || virTimeMillisNow(&priv->job.info.timeElapsed) < 0) {
|
2011-06-06 08:34:33 +00:00
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
|
2011-04-20 16:33:27 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2011-06-06 08:34:33 +00:00
|
|
|
priv->job.info.timeElapsed -= priv->job.start;
|
2011-04-20 16:33:27 +00:00
|
|
|
|
2012-04-20 15:10:37 +00:00
|
|
|
ret = -1;
|
2013-02-08 08:58:03 +00:00
|
|
|
switch (priv->job.status.status) {
|
2011-04-20 16:33:27 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
|
2011-06-06 08:34:33 +00:00
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_NONE;
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("%s: %s"), job, _("is not active"));
|
2011-04-20 16:33:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
|
2013-02-08 08:58:03 +00:00
|
|
|
priv->job.info.fileTotal = priv->job.status.disk_total;
|
|
|
|
priv->job.info.fileRemaining = priv->job.status.disk_remaining;
|
|
|
|
priv->job.info.fileProcessed = priv->job.status.disk_transferred;
|
|
|
|
|
|
|
|
priv->job.info.memTotal = priv->job.status.ram_total;
|
|
|
|
priv->job.info.memRemaining = priv->job.status.ram_remaining;
|
|
|
|
priv->job.info.memProcessed = priv->job.status.ram_transferred;
|
|
|
|
|
|
|
|
priv->job.info.dataTotal =
|
|
|
|
priv->job.status.ram_total + priv->job.status.disk_total;
|
|
|
|
priv->job.info.dataRemaining =
|
|
|
|
priv->job.status.ram_remaining + priv->job.status.disk_remaining;
|
|
|
|
priv->job.info.dataProcessed =
|
|
|
|
priv->job.status.ram_transferred +
|
|
|
|
priv->job.status.disk_transferred;
|
2011-04-20 16:33:27 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
|
2013-06-10 13:35:03 +00:00
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_COMPLETED;
|
2011-04-20 16:33:27 +00:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
|
2011-06-06 08:34:33 +00:00
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("%s: %s"), job, _("unexpectedly failed"));
|
2011-04-20 16:33:27 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
|
2011-06-06 08:34:33 +00:00
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_CANCELLED;
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_ABORTED,
|
|
|
|
_("%s: %s"), job, _("canceled by client"));
|
2011-04-20 16:33:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
2011-09-23 06:56:56 +00:00
|
|
|
enum qemuDomainAsyncJob asyncJob,
|
2013-06-12 14:11:21 +00:00
|
|
|
virConnectPtr dconn, bool abort_on_error)
|
2011-04-20 16:33:27 +00:00
|
|
|
{
|
2011-01-31 10:47:03 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-05-13 10:11:47 +00:00
|
|
|
const char *job;
|
2013-06-10 14:05:45 +00:00
|
|
|
int pauseReason;
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
switch (priv->job.asyncJob) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
2011-05-13 10:11:47 +00:00
|
|
|
job = _("migration job");
|
|
|
|
break;
|
2011-06-30 09:23:50 +00:00
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
2011-05-13 10:11:47 +00:00
|
|
|
job = _("domain save job");
|
|
|
|
break;
|
2011-06-30 09:23:50 +00:00
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
2011-05-13 10:11:47 +00:00
|
|
|
job = _("domain core dump job");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
job = _("job");
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
while (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Poll every 50ms for progress & to allow cancellation */
|
|
|
|
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
|
|
|
|
2013-06-10 14:05:45 +00:00
|
|
|
/* cancel migration if disk I/O error is emitted while migrating */
|
2013-06-12 14:11:21 +00:00
|
|
|
if (abort_on_error &&
|
2013-06-10 14:05:45 +00:00
|
|
|
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
|
|
|
|
pauseReason == VIR_DOMAIN_PAUSED_IOERROR)
|
|
|
|
goto cancel;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-09-23 06:56:56 +00:00
|
|
|
if (dconn && virConnectIsAlive(dconn) <= 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Lost connection to destination host"));
|
2011-09-23 06:56:56 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
2011-06-06 08:34:33 +00:00
|
|
|
if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED)
|
2011-04-20 16:33:27 +00:00
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
2013-06-10 14:05:45 +00:00
|
|
|
|
|
|
|
cancel:
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
priv->job.asyncJob) == 0) {
|
|
|
|
qemuMonitorMigrateCancel(priv->mon);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("%s: %s"), job, _("failed due to I/O error"));
|
|
|
|
return -1;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 13:39:36 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainMigrateGraphicsRelocate(virQEMUDriverPtr driver,
|
2011-02-17 13:39:36 +00:00
|
|
|
virDomainObjPtr vm,
|
2013-06-18 10:17:18 +00:00
|
|
|
qemuMigrationCookiePtr cookie,
|
|
|
|
const char *graphicsuri)
|
2011-02-17 13:39:36 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-06-18 10:17:18 +00:00
|
|
|
int ret = -1;
|
|
|
|
const char *listenAddress = NULL;
|
qemu: Reformat listen address prior to checking
Currently, a listen address for a SPICE server can be specified. Later,
when the domain is migrated, we need to relocate the graphics which
involves telling new destination to the SPICE server. However, we can't
just assume the listen address is the new location, because the listen
address can be ANYCAST (0.0.0.0 for IPv4, :: for IPv6). In which case,
we want to pass the remote hostname. But there are some troubles with
ANYCAST. In both IPv4 and IPv6 it has many ways for specifying such
address. For instance, in IPv4: 0, 0.0, 0.0.0, 0.0.0.0. The number of
variations gets bigger in IPv6 world. Hence, in order to check for
ANYCAST address sanely, we should take the provided listen address,
parse it and format back in it's full form. Which is exactly what this
patch does.
2013-06-05 15:05:50 +00:00
|
|
|
virSocketAddr addr;
|
2013-06-18 10:17:18 +00:00
|
|
|
virURIPtr uri = NULL;
|
|
|
|
int type = -1;
|
|
|
|
int port = -1;
|
|
|
|
int tlsPort = -1;
|
|
|
|
const char *tlsSubject = NULL;
|
2011-02-17 13:39:36 +00:00
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (!cookie || (!cookie->graphics && !graphicsuri))
|
2011-02-17 13:39:36 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (graphicsuri && !(uri = virURIParse(graphicsuri)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (cookie->graphics) {
|
|
|
|
type = cookie->graphics->type;
|
|
|
|
|
|
|
|
listenAddress = cookie->graphics->listen;
|
|
|
|
|
|
|
|
if (!listenAddress ||
|
|
|
|
(virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
|
|
|
|
virSocketAddrIsWildcard(&addr)))
|
|
|
|
listenAddress = cookie->remoteHostname;
|
|
|
|
|
|
|
|
port = cookie->graphics->port;
|
|
|
|
tlsPort = cookie->graphics->tlsPort;
|
|
|
|
tlsSubject = cookie->graphics->tlsSubject;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uri) {
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2013-06-18 10:17:18 +00:00
|
|
|
|
|
|
|
if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("unknown graphics type %s"), uri->scheme);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uri->server)
|
|
|
|
listenAddress = uri->server;
|
|
|
|
if (uri->port > 0)
|
|
|
|
port = uri->port;
|
|
|
|
|
|
|
|
for (i = 0; i < uri->paramsCount; i++) {
|
|
|
|
virURIParamPtr param = uri->params + i;
|
|
|
|
|
|
|
|
if (STRCASEEQ(param->name, "tlsPort")) {
|
|
|
|
if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid tlsPort number: %s"),
|
|
|
|
param->value);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else if (STRCASEEQ(param->name, "tlsSubject")) {
|
|
|
|
tlsSubject = param->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-17 13:39:36 +00:00
|
|
|
|
|
|
|
/* QEMU doesn't support VNC relocation yet, so
|
|
|
|
* skip it to avoid generating an error
|
|
|
|
*/
|
2013-06-18 10:17:18 +00:00
|
|
|
if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2013-04-10 15:16:06 +00:00
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
|
|
|
ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
|
|
|
|
port, tlsPort, tlsSubject);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
2011-02-17 13:39:36 +00:00
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
cleanup:
|
|
|
|
virURIFree(uri);
|
2011-02-17 13:39:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainMigrateOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
|
2012-10-01 15:18:20 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr cookie)
|
|
|
|
{
|
2012-10-01 15:18:22 +00:00
|
|
|
virDomainNetDefPtr netptr;
|
|
|
|
int ret = -1;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < cookie->network->nnets; i++) {
|
|
|
|
netptr = vm->def->nets[i];
|
|
|
|
|
|
|
|
switch (cookie->network->net[i].vporttype) {
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_NONE:
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_8021QBG:
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_8021QBH:
|
2012-10-01 15:18:22 +00:00
|
|
|
break;
|
2012-10-01 15:18:20 +00:00
|
|
|
case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
|
2012-10-01 15:18:22 +00:00
|
|
|
if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
|
|
|
|
netptr->ifname) != 0) {
|
|
|
|
virReportSystemError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to run command to set OVS port data for "
|
|
|
|
"interface %s"), netptr->ifname);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
break;
|
2012-10-01 15:18:20 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-01 15:18:22 +00:00
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
2012-10-01 15:18:20 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-19 15:48:43 +00:00
|
|
|
/* This is called for outgoing non-p2p migrations when a connection to the
|
|
|
|
* client which initiated the migration was closed but we were waiting for it
|
|
|
|
* to follow up with the next phase, that is, in between
|
|
|
|
* qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
|
|
|
|
* qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
|
|
|
|
*/
|
2013-07-15 14:53:13 +00:00
|
|
|
static virDomainObjPtr
|
|
|
|
qemuMigrationCleanup(virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
|
|
|
void *opaque)
|
2012-03-19 15:48:43 +00:00
|
|
|
{
|
2013-07-15 14:53:13 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2012-03-19 15:48:43 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
|
|
|
|
vm->def->name, conn,
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
|
|
|
priv->job.phase));
|
|
|
|
|
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("The connection which started outgoing migration of domain %s"
|
|
|
|
" was closed; canceling the migration",
|
|
|
|
vm->def->name);
|
|
|
|
|
|
|
|
switch ((enum qemuMigrationJobPhase) priv->job.phase) {
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
/* just forget we were about to migrate */
|
|
|
|
qemuDomainObjDiscardAsyncJob(driver, vm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
VIR_WARN("Migration of domain %s finished but we don't know if the"
|
|
|
|
" domain was successfully started on destination or not",
|
|
|
|
vm->def->name);
|
|
|
|
/* clear the job and let higher levels decide what to do */
|
|
|
|
qemuDomainObjDiscardAsyncJob(driver, vm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
/* cannot be seen without an active migration API; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
|
|
|
/* all done; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
/* incoming migration; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
/* single phase outgoing migration; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
/* unreachable */
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return vm;
|
|
|
|
}
|
|
|
|
|
2013-06-25 07:44:14 +00:00
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* The caller is supposed to lock the vm and start a migration job. */
|
2013-06-25 07:44:14 +00:00
|
|
|
static char
|
|
|
|
*qemuMigrationBeginPhase(virQEMUDriverPtr driver,
|
2011-02-03 11:09:28 +00:00
|
|
|
virDomainObjPtr vm,
|
2011-05-18 09:26:30 +00:00
|
|
|
const char *xmlin,
|
2011-10-14 19:24:18 +00:00
|
|
|
const char *dname,
|
2011-02-03 11:09:28 +00:00
|
|
|
char **cookieout,
|
2012-02-21 12:20:06 +00:00
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags)
|
2011-02-03 11:09:28 +00:00
|
|
|
{
|
|
|
|
char *rv = NULL;
|
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-05-27 10:30:26 +00:00
|
|
|
virDomainDefPtr def = NULL;
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-02-01 17:04:15 +00:00
|
|
|
virCapsPtr caps = NULL;
|
2013-01-29 12:38:50 +00:00
|
|
|
unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
|
2013-06-12 14:11:21 +00:00
|
|
|
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
2011-10-14 19:24:18 +00:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
|
2012-02-21 12:20:06 +00:00
|
|
|
" cookieout=%p, cookieoutlen=%p, flags=%lx",
|
2011-10-14 19:24:18 +00:00
|
|
|
driver, vm, NULLSTR(xmlin), NULLSTR(dname),
|
2012-02-21 12:20:06 +00:00
|
|
|
cookieout, cookieoutlen, flags);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2013-02-01 17:04:15 +00:00
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
|
|
|
|
* Otherwise we will start the async job later in the perform phase losing
|
|
|
|
* change protection.
|
|
|
|
*/
|
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
|
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2013-06-12 14:11:21 +00:00
|
|
|
if (!qemuMigrationIsAllowed(driver, vm, NULL, true, abort_on_error))
|
2011-02-03 11:09:28 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-02-21 12:20:06 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_UNSAFE) && !qemuMigrationIsSafe(vm->def))
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
|
|
|
|
/* TODO support NBD for TUNNELLED migration */
|
2013-05-28 19:27:45 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("NBD in tunnelled migration is currently not supported"));
|
|
|
|
goto cleanup;
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
2013-05-28 19:27:45 +00:00
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
priv->nbdPort = 0;
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
|
2011-02-03 11:09:28 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm,
|
|
|
|
cookieout, cookieoutlen,
|
2013-01-29 12:38:50 +00:00
|
|
|
cookieFlags) < 0)
|
2011-02-03 11:09:28 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE) {
|
|
|
|
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
VIR_MIGRATE_NON_SHARED_INC)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration cannot handle "
|
|
|
|
"non-shared storage"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration must be specified with "
|
|
|
|
"the persistent flag set"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("tunnelled offline migration does not "
|
|
|
|
"make sense"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-27 10:30:26 +00:00
|
|
|
if (xmlin) {
|
2013-03-28 13:55:55 +00:00
|
|
|
if (!(def = virDomainDefParseString(xmlin, caps, driver->xmlopt,
|
2011-07-11 17:29:09 +00:00
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
2011-05-27 10:30:26 +00:00
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-19 22:40:32 +00:00
|
|
|
if (!virDomainDefCheckABIStability(vm->def, def))
|
2011-05-27 10:30:26 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-05-04 19:23:17 +00:00
|
|
|
rv = qemuDomainDefFormatLive(driver, def, false, true);
|
2011-05-27 10:30:26 +00:00
|
|
|
} else {
|
2012-05-04 19:23:17 +00:00
|
|
|
rv = qemuDomainDefFormatLive(driver, vm->def, false, true);
|
2011-05-27 10:30:26 +00:00
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuMigrationCookieFree(mig);
|
2013-02-01 17:04:15 +00:00
|
|
|
virObjectUnref(caps);
|
2011-05-27 10:30:26 +00:00
|
|
|
virDomainDefFree(def);
|
2011-02-03 11:09:28 +00:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2013-06-25 07:44:14 +00:00
|
|
|
char *
|
|
|
|
qemuMigrationBegin(virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *dname,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = conn->privateData;
|
|
|
|
char *xml = NULL;
|
|
|
|
enum qemuDomainAsyncJob asyncJob;
|
|
|
|
|
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
asyncJob = QEMU_ASYNC_JOB_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if there is any ejected media.
|
|
|
|
* We don't want to require them on the destination.
|
|
|
|
*/
|
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE) &&
|
|
|
|
qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
if (!(xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags)))
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
/* We keep the job active across API calls until the confirm() call.
|
|
|
|
* This prevents any other APIs being invoked while migration is taking
|
|
|
|
* place.
|
|
|
|
*/
|
2013-07-15 14:53:13 +00:00
|
|
|
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
|
|
|
|
qemuMigrationCleanup) < 0)
|
2013-06-25 07:44:14 +00:00
|
|
|
goto endjob;
|
|
|
|
if (qemuMigrationJobContinue(vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("domain disappeared"));
|
|
|
|
VIR_FREE(xml);
|
|
|
|
if (cookieout)
|
|
|
|
VIR_FREE(*cookieout);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
return xml;
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
if (qemuMigrationJobFinish(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Prepare is the first step, and it runs on the destination host.
|
|
|
|
*/
|
2011-06-13 08:48:34 +00:00
|
|
|
|
2012-03-16 06:56:19 +00:00
|
|
|
static void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPrepareCleanup(virQEMUDriverPtr driver,
|
2012-03-16 06:56:19 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
|
|
|
|
driver,
|
|
|
|
vm->def->name,
|
|
|
|
qemuDomainJobTypeToString(priv->job.active),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
|
|
|
|
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
|
|
|
|
return;
|
|
|
|
qemuDomainObjDiscardAsyncJob(driver, vm);
|
|
|
|
}
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
2011-06-13 08:48:34 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2013-06-07 12:12:28 +00:00
|
|
|
virDomainDefPtr *def,
|
2012-11-21 08:28:49 +00:00
|
|
|
virStreamPtr st,
|
2013-03-22 13:52:25 +00:00
|
|
|
unsigned int port,
|
2012-11-21 08:28:49 +00:00
|
|
|
unsigned long flags)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
2010-12-23 18:24:42 +00:00
|
|
|
int dataFD[2] = { -1, -1 };
|
2011-01-31 10:47:03 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = NULL;
|
2011-06-01 10:35:18 +00:00
|
|
|
unsigned long long now;
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-06-13 08:48:34 +00:00
|
|
|
bool tunnel = !!st;
|
2011-10-04 07:11:35 +00:00
|
|
|
char *origname = NULL;
|
2012-02-28 12:42:42 +00:00
|
|
|
char *xmlout = NULL;
|
2012-11-21 08:28:49 +00:00
|
|
|
unsigned int cookieFlags;
|
2013-02-01 17:04:15 +00:00
|
|
|
virCapsPtr caps = NULL;
|
2013-03-22 13:52:25 +00:00
|
|
|
const char *listenAddr = NULL;
|
|
|
|
char *migrateFrom = NULL;
|
2013-06-12 14:11:21 +00:00
|
|
|
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
if (virTimeMillisNow(&now) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
return -1;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE) {
|
|
|
|
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
VIR_MIGRATE_NON_SHARED_INC)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration cannot handle "
|
|
|
|
"non-shared storage"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration must be specified with "
|
|
|
|
"the persistent flag set"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (tunnel) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("tunnelled offline migration does not "
|
|
|
|
"make sense"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-01 17:04:15 +00:00
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-12 14:11:21 +00:00
|
|
|
if (!qemuMigrationIsAllowed(driver, NULL, *def, true, abort_on_error))
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-02-28 12:42:42 +00:00
|
|
|
/* Let migration hook filter domain XML */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
|
|
|
char *xml;
|
|
|
|
int hookret;
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
if (!(xml = qemuDomainDefFormatXML(driver, *def,
|
2012-10-09 11:15:46 +00:00
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE)))
|
2012-02-28 12:42:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
|
2012-02-28 12:42:42 +00:00
|
|
|
VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, &xmlout);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
if (hookret < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
} else if (hookret == 0) {
|
|
|
|
if (!*xmlout) {
|
|
|
|
VIR_DEBUG("Migrate hook filter returned nothing; using the"
|
|
|
|
" original XML");
|
|
|
|
} else {
|
|
|
|
virDomainDefPtr newdef;
|
|
|
|
|
|
|
|
VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
|
2013-03-28 13:55:55 +00:00
|
|
|
newdef = virDomainDefParseString(xmlout, caps, driver->xmlopt,
|
2012-02-28 12:42:42 +00:00
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE);
|
|
|
|
if (!newdef)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
if (!virDomainDefCheckABIStability(*def, newdef)) {
|
2012-02-28 12:42:42 +00:00
|
|
|
virDomainDefFree(newdef);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
virDomainDefFree(*def);
|
|
|
|
*def = newdef;
|
2012-02-28 12:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-22 13:52:25 +00:00
|
|
|
if (tunnel) {
|
|
|
|
/* QEMU will be started with -incoming stdio
|
|
|
|
* (which qemu_command might convert to exec:cat or fd:n)
|
|
|
|
*/
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(migrateFrom, "stdio") < 0)
|
2013-03-22 13:52:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
virQEMUCapsPtr qemuCaps = NULL;
|
|
|
|
struct addrinfo *info = NULL;
|
|
|
|
struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
|
|
|
|
.ai_socktype = SOCK_STREAM };
|
|
|
|
|
|
|
|
if (!(qemuCaps = virQEMUCapsCacheLookupCopy(driver->qemuCapsCache,
|
2013-06-07 12:12:28 +00:00
|
|
|
(*def)->emulator)))
|
2013-03-22 13:52:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Listen on :: instead of 0.0.0.0 if QEMU understands it
|
|
|
|
* and there is at least one IPv6 address configured
|
|
|
|
*/
|
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_IPV6_MIGRATION) &&
|
|
|
|
getaddrinfo("::", NULL, &hints, &info) == 0) {
|
|
|
|
freeaddrinfo(info);
|
|
|
|
listenAddr = "[::]";
|
|
|
|
} else {
|
|
|
|
listenAddr = "0.0.0.0";
|
|
|
|
}
|
|
|
|
virObjectUnref(qemuCaps);
|
|
|
|
|
|
|
|
/* QEMU will be started with -incoming [::]:port
|
|
|
|
* or -incoming 0.0.0.0:port
|
|
|
|
*/
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(&migrateFrom, "tcp:%s:%d", listenAddr, port) < 0)
|
2013-03-22 13:52:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
if (!(vm = virDomainObjListAdd(driver->domains, *def,
|
2013-03-31 18:03:42 +00:00
|
|
|
driver->xmlopt,
|
Merge virDomainObjListIsDuplicate into virDomainObjListAdd
The duplicate VM checking should be done atomically with
virDomainObjListAdd, so shoud not be a separate function.
Instead just use flags to indicate what kind of checks are
required.
This pair, used in virDomainCreateXML:
if (virDomainObjListIsDuplicate(privconn->domains, def, 1) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, false)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
This pair, used in virDomainRestoreFlags:
if (virDomainObjListIsDuplicate(privconn->domains, def, 1) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, true)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
This pair, used in virDomainDefineXML:
if (virDomainObjListIsDuplicate(privconn->domains, def, 0) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, false)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
0, NULL)))
goto cleanup;
2013-01-14 14:46:58 +00:00
|
|
|
VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
|
|
|
|
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
|
|
|
|
NULL)))
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
Merge virDomainObjListIsDuplicate into virDomainObjListAdd
The duplicate VM checking should be done atomically with
virDomainObjListAdd, so shoud not be a separate function.
Instead just use flags to indicate what kind of checks are
required.
This pair, used in virDomainCreateXML:
if (virDomainObjListIsDuplicate(privconn->domains, def, 1) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, false)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
This pair, used in virDomainRestoreFlags:
if (virDomainObjListIsDuplicate(privconn->domains, def, 1) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, true)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
This pair, used in virDomainDefineXML:
if (virDomainObjListIsDuplicate(privconn->domains, def, 0) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, false)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
0, NULL)))
goto cleanup;
2013-01-14 14:46:58 +00:00
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
*def = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
priv = vm->privateData;
|
2011-10-04 07:11:35 +00:00
|
|
|
priv->origname = origname;
|
|
|
|
origname = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
|
2013-01-29 12:38:50 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE |
|
|
|
|
QEMU_MIGRATION_COOKIE_NBD)))
|
2011-01-24 18:06:16 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
2011-07-19 00:27:31 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
/* Domain starts inactive, even if the domain XML had an id field. */
|
|
|
|
vm->def->id = -1;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE)
|
|
|
|
goto done;
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
if (tunnel &&
|
|
|
|
(pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
|
2010-12-23 18:24:42 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot create pipe for tunnelled migration"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2013-02-28 11:48:01 +00:00
|
|
|
if (qemuDomainObjBeginNestedJob(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Start the QEMU daemon, with the same command-line arguments plus
|
2011-06-13 08:48:34 +00:00
|
|
|
* -incoming $migrateFrom
|
2011-01-31 10:47:03 +00:00
|
|
|
*/
|
2012-03-08 13:20:19 +00:00
|
|
|
if (qemuProcessStart(dconn, driver, vm, migrateFrom, dataFD[0], NULL, NULL,
|
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
2013-02-27 16:53:08 +00:00
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY) < 0) {
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "migrated", false);
|
2013-02-28 11:48:01 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) < 0)
|
|
|
|
vm = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
if (tunnel) {
|
|
|
|
if (virFDStreamOpen(st, dataFD[1]) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot pass pipe for tunnelled migration"));
|
2013-01-14 11:45:20 +00:00
|
|
|
goto stop;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-06-13 08:48:34 +00:00
|
|
|
dataFD[1] = -1; /* 'st' owns the FD now & will close it */
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2013-01-14 11:45:20 +00:00
|
|
|
if (flags & VIR_MIGRATE_COMPRESSED &&
|
|
|
|
qemuMigrationSetCompression(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
|
|
|
goto stop;
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
if (mig->lockState) {
|
|
|
|
VIR_DEBUG("Received lockstate %s", mig->lockState);
|
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
priv->lockState = mig->lockState;
|
|
|
|
mig->lockState = NULL;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Received no lockstate");
|
|
|
|
}
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
done:
|
|
|
|
if (flags & VIR_MIGRATE_OFFLINE)
|
|
|
|
cookieFlags = 0;
|
|
|
|
else
|
|
|
|
cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS;
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if (mig->nbd &&
|
|
|
|
flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
|
2013-05-28 19:27:45 +00:00
|
|
|
if (qemuMigrationStartNBDServer(driver, vm, listenAddr) < 0) {
|
|
|
|
/* error already reported */
|
|
|
|
goto endjob;
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
2013-05-28 19:27:45 +00:00
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout,
|
|
|
|
cookieoutlen, cookieFlags) < 0) {
|
2011-01-24 18:06:16 +00:00
|
|
|
/* We could tear down the whole guest here, but
|
|
|
|
* cookie data is (so far) non-critical, so that
|
|
|
|
* seems a little harsh. We'll just warn for now.
|
|
|
|
*/
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
}
|
|
|
|
|
2012-03-16 06:56:19 +00:00
|
|
|
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
|
|
|
virDomainAuditStart(vm, "migrated", true);
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
/* We keep the job active across API calls until the finish() call.
|
|
|
|
* This prevents any other APIs being invoked while incoming
|
|
|
|
* migration is taking place.
|
|
|
|
*/
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuMigrationJobContinue(vm)) {
|
2011-01-31 10:47:03 +00:00
|
|
|
vm = NULL;
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("domain disappeared"));
|
2011-07-19 00:27:31 +00:00
|
|
|
goto cleanup;
|
2011-06-13 08:48:34 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
ret = 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
cleanup:
|
2013-03-22 13:52:25 +00:00
|
|
|
VIR_FREE(migrateFrom);
|
2011-10-04 07:11:35 +00:00
|
|
|
VIR_FREE(origname);
|
2012-02-28 12:42:42 +00:00
|
|
|
VIR_FREE(xmlout);
|
2010-12-23 18:24:42 +00:00
|
|
|
VIR_FORCE_CLOSE(dataFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(dataFD[1]);
|
2011-09-26 11:36:37 +00:00
|
|
|
if (vm) {
|
|
|
|
if (ret >= 0 || vm->persistent)
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-09-26 11:36:37 +00:00
|
|
|
else
|
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2013-07-04 19:16:57 +00:00
|
|
|
if (ret < 0) {
|
2013-01-31 13:48:06 +00:00
|
|
|
virPortAllocatorRelease(driver->remotePorts, priv->nbdPort);
|
|
|
|
priv->nbdPort = 0;
|
|
|
|
}
|
2011-09-26 11:36:37 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookieFree(mig);
|
2013-02-01 17:04:15 +00:00
|
|
|
virObjectUnref(caps);
|
2011-01-31 10:47:03 +00:00
|
|
|
return ret;
|
2011-07-19 00:27:31 +00:00
|
|
|
|
2013-01-14 11:45:20 +00:00
|
|
|
stop:
|
|
|
|
virDomainAuditStart(vm, "migrated", false);
|
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
|
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
endjob:
|
2013-02-28 11:48:01 +00:00
|
|
|
if (vm && !qemuMigrationJobFinish(driver, vm)) {
|
2011-07-19 00:27:31 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
goto cleanup;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
/*
|
|
|
|
* This version starts an empty VM listening on a localhost TCP port, and
|
|
|
|
* sets up the corresponding virStream to handle the incoming data.
|
|
|
|
*/
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
|
2011-06-13 08:48:34 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
virStreamPtr st,
|
2013-06-07 12:12:28 +00:00
|
|
|
virDomainDefPtr *def,
|
2012-11-21 08:28:49 +00:00
|
|
|
unsigned long flags)
|
2011-06-13 08:48:34 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
|
2013-04-29 10:29:02 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
|
2013-01-18 17:05:03 +00:00
|
|
|
"flags=%lx",
|
2011-06-13 08:48:34 +00:00
|
|
|
driver, dconn, NULLSTR(cookiein), cookieinlen,
|
2013-06-07 12:12:28 +00:00
|
|
|
cookieout, cookieoutlen, st, *def, flags);
|
2011-06-13 08:48:34 +00:00
|
|
|
|
2013-06-25 12:38:05 +00:00
|
|
|
if (st == NULL) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("tunnelled migration requested but NULL stream passed"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
|
2013-04-29 10:29:02 +00:00
|
|
|
cookieout, cookieoutlen, def,
|
2013-03-22 13:52:25 +00:00
|
|
|
st, 0, flags);
|
2011-06-13 08:48:34 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
|
2011-01-31 10:47:03 +00:00
|
|
|
virConnectPtr dconn,
|
2011-01-24 18:06:16 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 10:47:03 +00:00
|
|
|
const char *uri_in,
|
|
|
|
char **uri_out,
|
2013-06-07 12:12:28 +00:00
|
|
|
virDomainDefPtr *def,
|
2012-11-21 08:28:49 +00:00
|
|
|
unsigned long flags)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
static int port = 0;
|
|
|
|
int this_port;
|
|
|
|
char *hostname = NULL;
|
|
|
|
const char *p;
|
2013-03-22 13:52:25 +00:00
|
|
|
char *uri_str = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
int ret = -1;
|
2013-06-18 11:38:55 +00:00
|
|
|
virURIPtr uri = NULL;
|
2011-06-01 10:35:18 +00:00
|
|
|
|
2011-05-20 10:03:04 +00:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
|
2013-04-29 10:29:02 +00:00
|
|
|
"def=%p, flags=%lx",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, dconn, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
|
2013-06-07 12:12:28 +00:00
|
|
|
*def, flags);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-06-25 12:38:05 +00:00
|
|
|
*uri_out = NULL;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* The URI passed in may be NULL or a string "tcp://somehostname:port".
|
|
|
|
*
|
|
|
|
* If the URI passed in is NULL then we allocate a port number
|
|
|
|
* from our pool of port numbers and return a URI of
|
|
|
|
* "tcp://ourhostname:port".
|
|
|
|
*
|
|
|
|
* If the URI passed in is not NULL then we try to parse out the
|
|
|
|
* port number and use that (note that the hostname is assumed
|
|
|
|
* to be a correct hostname which refers to the target machine).
|
|
|
|
*/
|
|
|
|
if (uri_in == NULL) {
|
|
|
|
this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
|
|
|
|
if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0;
|
|
|
|
|
|
|
|
/* Get hostname */
|
2013-04-26 16:39:11 +00:00
|
|
|
if ((hostname = virGetHostname()) == NULL)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (STRPREFIX(hostname, "localhost")) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("hostname on destination resolved to localhost,"
|
|
|
|
" but migration requires an FQDN"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX this really should have been a properly well-formed
|
|
|
|
* URI, but we can't add in tcp:// now without breaking
|
2012-02-03 18:20:22 +00:00
|
|
|
* compatibility with old targets. We at least make the
|
2011-01-31 10:47:03 +00:00
|
|
|
* new targets accept both syntaxes though.
|
|
|
|
*/
|
|
|
|
/* Caller frees */
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
/* Check the URI starts with "tcp:". We will escape the
|
|
|
|
* URI when passing it to the qemu monitor, so bad
|
|
|
|
* characters in hostname part don't matter.
|
|
|
|
*/
|
2013-03-22 13:52:25 +00:00
|
|
|
if (!(p = STRSKIP(uri_in, "tcp:"))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("only tcp URIs are supported for KVM/QEMU"
|
|
|
|
" migrations"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-03-22 13:52:25 +00:00
|
|
|
/* Convert uri_in to well-formed URI with // after tcp: */
|
|
|
|
if (!(STRPREFIX(uri_in, "tcp://"))) {
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(&uri_str, "tcp://%s", p) < 0)
|
2013-03-22 13:52:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
uri = virURIParse(uri_str ? uri_str : uri_in);
|
|
|
|
VIR_FREE(uri_str);
|
|
|
|
|
|
|
|
if (uri == NULL) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, _("unable to parse URI: %s"),
|
|
|
|
uri_in);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uri->server == NULL) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
|
|
|
|
" URI: %s"), uri_in);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uri->port == 0) {
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Generate a port */
|
|
|
|
this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
|
|
|
|
if (port == QEMUD_MIGRATION_NUM_PORTS)
|
|
|
|
port = 0;
|
|
|
|
|
|
|
|
/* Caller frees */
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
} else {
|
2013-03-22 13:52:25 +00:00
|
|
|
this_port = uri->port;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*uri_out)
|
|
|
|
VIR_DEBUG("Generated uri_out=%s", *uri_out);
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
|
2013-04-29 10:29:02 +00:00
|
|
|
cookieout, cookieoutlen, def,
|
2013-03-22 13:52:25 +00:00
|
|
|
NULL, this_port, flags);
|
2011-01-31 10:47:03 +00:00
|
|
|
cleanup:
|
2013-06-18 11:38:55 +00:00
|
|
|
virURIFree(uri);
|
2011-01-31 10:47:03 +00:00
|
|
|
VIR_FREE(hostname);
|
|
|
|
if (ret != 0)
|
|
|
|
VIR_FREE(*uri_out);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-25 12:38:05 +00:00
|
|
|
virDomainDefPtr
|
|
|
|
qemuMigrationPrepareDef(virQEMUDriverPtr driver,
|
|
|
|
const char *dom_xml,
|
|
|
|
const char *dname)
|
|
|
|
{
|
|
|
|
virCapsPtr caps = NULL;
|
|
|
|
virDomainDefPtr def;
|
|
|
|
|
|
|
|
if (!dom_xml) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("no domain XML passed"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(def = virDomainDefParseString(dom_xml, caps, driver->xmlopt,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (dname) {
|
|
|
|
VIR_FREE(def->name);
|
|
|
|
if (VIR_STRDUP(def->name, dname) < 0) {
|
|
|
|
virDomainDefFree(def);
|
|
|
|
def = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnref(caps);
|
|
|
|
return def;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-25 12:55:10 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
unsigned int flags,
|
|
|
|
int retcode)
|
|
|
|
{
|
|
|
|
qemuMigrationCookiePtr mig;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int rv = -1;
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, conn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"flags=%x, retcode=%d",
|
|
|
|
driver, conn, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
flags, retcode);
|
|
|
|
|
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
|
|
|
|
|
|
|
qemuMigrationJobSetPhase(driver, vm,
|
|
|
|
retcode == 0
|
|
|
|
? QEMU_MIGRATION_PHASE_CONFIRM3
|
|
|
|
: QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);
|
|
|
|
|
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_OFFLINE)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Did the migration go as planned? If yes, kill off the
|
|
|
|
* domain object, but if no, resume CPUs
|
|
|
|
*/
|
|
|
|
if (retcode == 0) {
|
|
|
|
/* If guest uses SPICE and supports seamless migration we have to hold
|
|
|
|
* up domain shutdown until SPICE server transfers its data */
|
|
|
|
qemuMigrationWaitForSpice(driver, vm);
|
|
|
|
|
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
|
|
|
virDomainAuditStop(vm, "migrated");
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/* cancel any outstanding NBD jobs */
|
|
|
|
qemuMigrationCancelDriveMirror(mig, driver, vm);
|
|
|
|
|
|
|
|
/* run 'cont' on the destination, which allows migration on qemu
|
|
|
|
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
|
|
|
* older qemu's, but it also doesn't hurt anything there
|
|
|
|
*/
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_MIGRATED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
|
|
|
if (virGetLastError() == NULL)
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
rv = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuMigrationConfirm(virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
unsigned int flags,
|
|
|
|
int cancelled)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = conn->privateData;
|
|
|
|
enum qemuMigrationJobPhase phase;
|
|
|
|
virQEMUDriverConfigPtr cfg = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (cancelled)
|
|
|
|
phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
|
|
|
|
else
|
|
|
|
phase = QEMU_MIGRATION_PHASE_CONFIRM3;
|
|
|
|
|
|
|
|
qemuMigrationJobStartPhase(driver, vm, phase);
|
2013-07-15 14:53:13 +00:00
|
|
|
virCloseCallbacksUnset(driver->closeCallbacks, vm,
|
|
|
|
qemuMigrationCleanup);
|
2013-06-25 12:55:10 +00:00
|
|
|
|
|
|
|
ret = qemuMigrationConfirmPhase(driver, conn, vm,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
flags, cancelled);
|
|
|
|
|
|
|
|
if (qemuMigrationJobFinish(driver, vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
} else if (!virDomainObjIsActive(vm) &&
|
|
|
|
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
|
|
|
|
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
|
|
|
|
virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
|
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
enum qemuMigrationDestinationType {
|
|
|
|
MIGRATION_DEST_HOST,
|
2012-02-02 15:34:08 +00:00
|
|
|
MIGRATION_DEST_CONNECT_HOST,
|
2011-08-11 12:36:04 +00:00
|
|
|
MIGRATION_DEST_UNIX,
|
2011-08-11 13:47:02 +00:00
|
|
|
MIGRATION_DEST_FD,
|
2011-08-11 12:36:04 +00:00
|
|
|
};
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
enum qemuMigrationForwardType {
|
|
|
|
MIGRATION_FWD_DIRECT,
|
|
|
|
MIGRATION_FWD_STREAM,
|
|
|
|
};
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
typedef struct _qemuMigrationSpec qemuMigrationSpec;
|
|
|
|
typedef qemuMigrationSpec *qemuMigrationSpecPtr;
|
|
|
|
struct _qemuMigrationSpec {
|
|
|
|
enum qemuMigrationDestinationType destType;
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
const char *name;
|
|
|
|
int port;
|
|
|
|
} host;
|
|
|
|
|
|
|
|
struct {
|
2011-08-11 13:47:02 +00:00
|
|
|
char *file;
|
2011-08-11 12:36:04 +00:00
|
|
|
int sock;
|
|
|
|
} unix_socket;
|
2011-08-11 13:47:02 +00:00
|
|
|
|
|
|
|
struct {
|
|
|
|
int qemu;
|
|
|
|
int local;
|
|
|
|
} fd;
|
2011-08-11 12:36:04 +00:00
|
|
|
} dest;
|
|
|
|
|
|
|
|
enum qemuMigrationForwardType fwdType;
|
|
|
|
union {
|
|
|
|
virStreamPtr stream;
|
|
|
|
} fwd;
|
|
|
|
};
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
#define TUNNEL_SEND_BUF_SIZE 65536
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
|
|
|
|
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
|
|
|
|
struct _qemuMigrationIOThread {
|
|
|
|
virThread thread;
|
|
|
|
virStreamPtr st;
|
|
|
|
int sock;
|
|
|
|
virError err;
|
2012-04-23 14:17:55 +00:00
|
|
|
int wakeupRecvFD;
|
|
|
|
int wakeupSendFD;
|
2011-05-09 15:52:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void qemuMigrationIOFunc(void *arg)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2011-05-09 15:52:42 +00:00
|
|
|
qemuMigrationIOThreadPtr data = arg;
|
2012-04-23 14:17:55 +00:00
|
|
|
char *buffer = NULL;
|
|
|
|
struct pollfd fds[2];
|
|
|
|
int timeout = -1;
|
|
|
|
virErrorPtr err = NULL;
|
|
|
|
|
|
|
|
VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
|
|
|
|
data->st, data->sock);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-07-04 10:14:12 +00:00
|
|
|
if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0)
|
2012-04-23 14:17:55 +00:00
|
|
|
goto abrt;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
fds[0].fd = data->sock;
|
|
|
|
fds[1].fd = data->wakeupRecvFD;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
for (;;) {
|
2012-04-23 14:17:55 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
fds[0].events = fds[1].events = POLLIN;
|
|
|
|
fds[0].revents = fds[1].revents = 0;
|
|
|
|
|
|
|
|
ret = poll(fds, ARRAY_CARDINALITY(fds), timeout);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
if (errno == EAGAIN || errno == EINTR)
|
|
|
|
continue;
|
2011-01-31 10:47:03 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2012-04-23 14:17:55 +00:00
|
|
|
_("poll failed in migration tunnel"));
|
|
|
|
goto abrt;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2012-04-23 14:17:55 +00:00
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
/* We were asked to gracefully stop but reading would block. This
|
|
|
|
* can only happen if qemu told us migration finished but didn't
|
|
|
|
* close the migration fd. We handle this in the same way as EOF.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("QEMU forgot to close migration fd");
|
2011-01-31 10:47:03 +00:00
|
|
|
break;
|
2012-04-23 14:17:55 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
|
|
|
|
char stop = 0;
|
|
|
|
|
|
|
|
if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("failed to read from wakeup fd"));
|
|
|
|
goto abrt;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Migration tunnel was asked to %s",
|
|
|
|
stop ? "abort" : "finish");
|
|
|
|
if (stop) {
|
|
|
|
goto abrt;
|
|
|
|
} else {
|
|
|
|
timeout = 0;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
|
|
|
|
int nbytes;
|
|
|
|
|
|
|
|
nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
|
|
|
|
if (nbytes > 0) {
|
|
|
|
if (virStreamSend(data->st, buffer, nbytes) < 0)
|
|
|
|
goto error;
|
|
|
|
} else if (nbytes < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("tunnelled migration failed to read from qemu"));
|
|
|
|
goto abrt;
|
|
|
|
} else {
|
|
|
|
/* EOF; get out of here */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
if (virStreamFinish(data->st) < 0)
|
|
|
|
goto error;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FREE(buffer);
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
return;
|
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
abrt:
|
|
|
|
err = virSaveLastError();
|
|
|
|
if (err && err->code == VIR_ERR_OK) {
|
|
|
|
virFreeError(err);
|
|
|
|
err = NULL;
|
|
|
|
}
|
|
|
|
virStreamAbort(data->st);
|
|
|
|
if (err) {
|
|
|
|
virSetError(err);
|
|
|
|
virFreeError(err);
|
|
|
|
}
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
error:
|
|
|
|
virCopyLastError(&data->err);
|
|
|
|
virResetLastError();
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FREE(buffer);
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuMigrationIOThreadPtr
|
|
|
|
qemuMigrationStartTunnel(virStreamPtr st,
|
|
|
|
int sock)
|
|
|
|
{
|
2012-04-23 14:17:55 +00:00
|
|
|
qemuMigrationIOThreadPtr io = NULL;
|
|
|
|
int wakeupFD[2] = { -1, -1 };
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
if (pipe2(wakeupFD, O_CLOEXEC) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to make pipe"));
|
|
|
|
goto error;
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
if (VIR_ALLOC(io) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2012-04-23 14:17:55 +00:00
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
io->st = st;
|
|
|
|
io->sock = sock;
|
2012-04-23 14:17:55 +00:00
|
|
|
io->wakeupRecvFD = wakeupFD[0];
|
|
|
|
io->wakeupSendFD = wakeupFD[1];
|
2011-05-09 15:52:42 +00:00
|
|
|
|
|
|
|
if (virThreadCreate(&io->thread, true,
|
|
|
|
qemuMigrationIOFunc,
|
|
|
|
io) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to create migration thread"));
|
2012-04-23 14:17:55 +00:00
|
|
|
goto error;
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return io;
|
2012-04-23 14:17:55 +00:00
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FORCE_CLOSE(wakeupFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(wakeupFD[1]);
|
|
|
|
VIR_FREE(io);
|
|
|
|
return NULL;
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2012-04-23 14:17:55 +00:00
|
|
|
qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io, bool error)
|
2011-05-09 15:52:42 +00:00
|
|
|
{
|
|
|
|
int rv = -1;
|
2012-04-23 14:17:55 +00:00
|
|
|
char stop = error ? 1 : 0;
|
|
|
|
|
|
|
|
/* make sure the thread finishes its job and is joinable */
|
|
|
|
if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("failed to wakeup migration tunnel"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
virThreadJoin(&io->thread);
|
|
|
|
|
|
|
|
/* Forward error from the IO thread, to this thread */
|
|
|
|
if (io->err.code != VIR_ERR_OK) {
|
2012-04-23 14:17:55 +00:00
|
|
|
if (error)
|
|
|
|
rv = 0;
|
|
|
|
else
|
|
|
|
virSetError(&io->err);
|
2011-05-09 15:52:42 +00:00
|
|
|
virResetError(&io->err);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rv = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FORCE_CLOSE(io->wakeupSendFD);
|
|
|
|
VIR_FORCE_CLOSE(io->wakeupRecvFD);
|
2011-05-09 15:52:42 +00:00
|
|
|
VIR_FREE(io);
|
|
|
|
return rv;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationConnect(virQEMUDriverPtr driver,
|
2012-02-02 15:34:08 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationSpecPtr spec)
|
|
|
|
{
|
|
|
|
virNetSocketPtr sock;
|
|
|
|
const char *host;
|
|
|
|
char *port = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
host = spec->dest.host.name;
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(&port, "%d", spec->dest.host.port) < 0)
|
2012-02-02 15:34:08 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
spec->destType = MIGRATION_DEST_FD;
|
|
|
|
spec->dest.fd.qemu = -1;
|
|
|
|
|
|
|
|
if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
if (virNetSocketNewConnectTCP(host, port, &sock) == 0) {
|
|
|
|
spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
|
2012-07-11 13:35:51 +00:00
|
|
|
virObjectUnref(sock);
|
2012-02-02 15:34:08 +00:00
|
|
|
}
|
|
|
|
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
|
|
|
|
spec->dest.fd.qemu == -1)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-03-26 14:45:16 +00:00
|
|
|
/* Migration expects a blocking FD */
|
|
|
|
if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
|
|
|
|
virReportSystemError(errno, _("Unable to set FD %d blocking"),
|
|
|
|
spec->dest.fd.qemu);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(port);
|
|
|
|
if (ret < 0)
|
|
|
|
VIR_FORCE_CLOSE(spec->dest.fd.qemu);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationRun(virQEMUDriverPtr driver,
|
2011-08-11 12:36:04 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
unsigned long resource,
|
2011-09-23 06:56:56 +00:00
|
|
|
qemuMigrationSpecPtr spec,
|
2013-06-18 10:17:18 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *graphicsuri)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2011-04-20 12:12:43 +00:00
|
|
|
int ret = -1;
|
2011-08-11 12:36:04 +00:00
|
|
|
unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-03 11:09:28 +00:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-05-09 15:52:42 +00:00
|
|
|
qemuMigrationIOThreadPtr iothread = NULL;
|
2011-08-11 12:36:04 +00:00
|
|
|
int fd = -1;
|
2011-08-26 18:10:27 +00:00
|
|
|
unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
|
2012-04-20 12:07:49 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2013-01-29 12:38:50 +00:00
|
|
|
unsigned int cookieFlags = 0;
|
2013-06-12 14:11:21 +00:00
|
|
|
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s",
|
2011-08-11 12:36:04 +00:00
|
|
|
driver, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, resource,
|
2013-06-18 10:17:18 +00:00
|
|
|
spec, spec->destType, spec->fwdType, dconn,
|
|
|
|
NULLSTR(graphicsuri));
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
|
|
|
|
migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_INC) {
|
|
|
|
migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager) &&
|
|
|
|
!cookieout) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Migration with lock driver %s requires"
|
|
|
|
" cookie support"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
2011-05-18 16:34:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
|
|
|
|
cookieFlags | QEMU_MIGRATION_COOKIE_GRAPHICS);
|
|
|
|
if (!mig)
|
2011-02-03 11:09:28 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_WARN("unable to provide data for graphics client relocation");
|
|
|
|
|
2012-11-23 14:42:51 +00:00
|
|
|
/* this will update migrate_flags on success */
|
|
|
|
if (qemuMigrationDriveMirror(driver, vm, mig, spec->dest.host.name,
|
|
|
|
migrate_speed, &migrate_flags) < 0) {
|
|
|
|
/* error reported by helper func */
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
/* Before EnterMonitor, since qemuMigrationSetOffline already does that */
|
2011-05-20 12:29:42 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_LIVE) &&
|
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-01-14 11:45:20 +00:00
|
|
|
if (flags & VIR_MIGRATE_COMPRESSED &&
|
|
|
|
qemuMigrationSetCompression(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2011-06-30 09:23:50 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-11-08 13:49:55 +00:00
|
|
|
if (priv->job.asyncAbort) {
|
|
|
|
/* explicitly do this *after* we entered the monitor,
|
|
|
|
* as this is a critical section so we are guaranteed
|
|
|
|
* priv->job.asyncAbort will not change */
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2012-11-08 13:49:55 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
_("canceled by client"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-26 18:10:27 +00:00
|
|
|
if (qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0) {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-04-20 16:56:35 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-04-20 12:12:43 +00:00
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
/* connect to the destination qemu if needed */
|
|
|
|
if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
|
2012-02-15 15:48:54 +00:00
|
|
|
qemuMigrationConnect(driver, vm, spec) < 0) {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2012-02-02 15:34:08 +00:00
|
|
|
goto cleanup;
|
2012-02-15 15:48:54 +00:00
|
|
|
}
|
2012-02-02 15:34:08 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
switch (spec->destType) {
|
|
|
|
case MIGRATION_DEST_HOST:
|
|
|
|
ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
|
|
|
|
spec->dest.host.name,
|
|
|
|
spec->dest.host.port);
|
|
|
|
break;
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
case MIGRATION_DEST_CONNECT_HOST:
|
|
|
|
/* handled above and transformed into MIGRATION_DEST_FD */
|
|
|
|
break;
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
case MIGRATION_DEST_UNIX:
|
2013-02-01 13:48:58 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
|
2011-08-11 12:36:04 +00:00
|
|
|
ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags,
|
|
|
|
spec->dest.unix_socket.file);
|
|
|
|
} else {
|
|
|
|
const char *args[] = {
|
|
|
|
"nc", "-U", spec->dest.unix_socket.file, NULL
|
|
|
|
};
|
|
|
|
ret = qemuMonitorMigrateToCommand(priv->mon, migrate_flags, args);
|
|
|
|
}
|
|
|
|
break;
|
2011-08-11 13:47:02 +00:00
|
|
|
|
|
|
|
case MIGRATION_DEST_FD:
|
2012-05-30 09:20:44 +00:00
|
|
|
if (spec->fwdType != MIGRATION_FWD_DIRECT) {
|
2011-08-11 13:47:02 +00:00
|
|
|
fd = spec->dest.fd.local;
|
2012-05-30 09:20:44 +00:00
|
|
|
spec->dest.fd.local = -1;
|
|
|
|
}
|
2011-08-11 13:47:02 +00:00
|
|
|
ret = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
|
|
|
|
spec->dest.fd.qemu);
|
|
|
|
VIR_FORCE_CLOSE(spec->dest.fd.qemu);
|
|
|
|
break;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-08-11 12:36:04 +00:00
|
|
|
if (ret < 0)
|
2011-04-20 12:12:43 +00:00
|
|
|
goto cleanup;
|
|
|
|
ret = -1;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* From this point onwards we *must* call cancel to abort the
|
|
|
|
* migration on source if anything goes wrong */
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
if (spec->destType == MIGRATION_DEST_UNIX) {
|
|
|
|
/* It is also possible that the migrate didn't fail initially, but
|
|
|
|
* rather failed later on. Check its status before waiting for a
|
|
|
|
* connection from qemu which may never be initiated.
|
|
|
|
*/
|
|
|
|
if (qemuMigrationUpdateJobStatus(driver, vm, _("migration job"),
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto cancel;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
while ((fd = accept(spec->dest.unix_socket.sock, NULL, NULL)) < 0) {
|
|
|
|
if (errno == EAGAIN || errno == EINTR)
|
|
|
|
continue;
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("failed to accept connection from qemu"));
|
|
|
|
goto cancel;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
if (spec->fwdType != MIGRATION_FWD_DIRECT &&
|
|
|
|
!(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
|
2011-05-09 15:52:42 +00:00
|
|
|
goto cancel;
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
if (qemuMigrationWaitForCompletion(driver, vm,
|
2011-09-23 06:56:56 +00:00
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2013-06-12 14:11:21 +00:00
|
|
|
dconn, abort_on_error) < 0)
|
2011-08-11 12:36:04 +00:00
|
|
|
goto cleanup;
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2011-06-02 15:40:33 +00:00
|
|
|
/* When migration completed, QEMU will have paused the
|
|
|
|
* CPUs for us, but unless we're using the JSON monitor
|
|
|
|
* we won't have been notified of this, so might still
|
|
|
|
* think we're running. For v2 protocol this doesn't
|
|
|
|
* matter because we'll kill the VM soon, but for v3
|
|
|
|
* this is important because we stay paused until the
|
|
|
|
* confirm3 step, but need to release the lock state
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
ret = 0;
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
cleanup:
|
2012-04-20 12:07:49 +00:00
|
|
|
if (ret < 0 && !orig_err)
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2013-01-30 16:53:37 +00:00
|
|
|
/* cancel any outstanding NBD jobs */
|
|
|
|
qemuMigrationCancelDriveMirror(mig, driver, vm);
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
if (spec->fwdType != MIGRATION_FWD_DIRECT) {
|
2012-04-23 14:17:55 +00:00
|
|
|
if (iothread && qemuMigrationStopTunnel(iothread, ret < 0) < 0)
|
2011-08-11 12:36:04 +00:00
|
|
|
ret = -1;
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2011-08-11 12:36:04 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-05-03 09:55:49 +00:00
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK;
|
|
|
|
if (flags & VIR_MIGRATE_PERSIST_DEST)
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
|
2011-02-03 11:09:28 +00:00
|
|
|
if (ret == 0 &&
|
2013-01-29 12:38:50 +00:00
|
|
|
qemuMigrationBakeCookie(mig, driver, vm, cookieout,
|
|
|
|
cookieoutlen, cookieFlags) < 0) {
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
2012-10-01 15:18:20 +00:00
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
|
2012-04-20 12:07:49 +00:00
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
return ret;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
cancel:
|
2012-04-20 12:07:49 +00:00
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
2011-06-30 09:23:50 +00:00
|
|
|
qemuMonitorMigrateCancel(priv->mon);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-08-11 12:36:04 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform migration using QEMU's native TCP migrate support,
|
|
|
|
* not encrypted obviously
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
static int doNativeMigrate(virQEMUDriverPtr driver,
|
2011-08-11 12:36:04 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *uri,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
2011-09-23 06:56:56 +00:00
|
|
|
unsigned long resource,
|
2013-06-18 10:17:18 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *graphicsuri)
|
2011-08-11 12:36:04 +00:00
|
|
|
{
|
2011-08-11 13:47:02 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-02-24 18:48:55 +00:00
|
|
|
virURIPtr uribits = NULL;
|
2011-08-11 13:47:02 +00:00
|
|
|
int ret = -1;
|
2011-08-11 12:36:04 +00:00
|
|
|
qemuMigrationSpec spec;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
|
|
|
|
"graphicsuri=%s",
|
2011-08-11 12:36:04 +00:00
|
|
|
driver, vm, uri, NULLSTR(cookiein), cookieinlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
cookieout, cookieoutlen, flags, resource,
|
|
|
|
NULLSTR(graphicsuri));
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
|
2012-02-02 15:34:08 +00:00
|
|
|
char *tmp;
|
2011-08-11 12:36:04 +00:00
|
|
|
/* HACK: source host generates bogus URIs, so fix them up */
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(&tmp, "tcp://%s", uri + strlen("tcp:")) < 0)
|
2011-08-11 12:36:04 +00:00
|
|
|
return -1;
|
2012-02-24 18:48:55 +00:00
|
|
|
uribits = virURIParse(tmp);
|
2011-08-11 13:47:02 +00:00
|
|
|
VIR_FREE(tmp);
|
2011-08-11 12:36:04 +00:00
|
|
|
} else {
|
2012-02-24 18:48:55 +00:00
|
|
|
uribits = virURIParse(uri);
|
2011-08-11 12:36:04 +00:00
|
|
|
}
|
Centralize error reporting for URI parsing/formatting problems
Move error reporting out of the callers, into virURIParse
and virURIFormat, to get consistency.
* include/libvirt/virterror.h, src/util/virterror.c: Add VIR_FROM_URI
* src/util/viruri.c, src/util/viruri.h: Add error reporting
* src/esx/esx_driver.c, src/libvirt.c, src/libxl/libxl_driver.c,
src/lxc/lxc_driver.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/qemu/qemu_migration.c,
src/remote/remote_driver.c, src/uml/uml_driver.c,
src/vbox/vbox_tmpl.c, src/vmx/vmx.c, src/xen/xen_driver.c,
src/xen/xend_internal.c, tests/viruritest.c: Remove error
reporting
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-03-20 12:16:54 +00:00
|
|
|
if (!uribits)
|
2011-08-11 12:36:04 +00:00
|
|
|
return -1;
|
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD))
|
2012-02-02 15:34:08 +00:00
|
|
|
spec.destType = MIGRATION_DEST_CONNECT_HOST;
|
|
|
|
else
|
2011-08-11 13:47:02 +00:00
|
|
|
spec.destType = MIGRATION_DEST_HOST;
|
2012-02-02 15:34:08 +00:00
|
|
|
spec.dest.host.name = uribits->server;
|
|
|
|
spec.dest.host.port = uribits->port;
|
|
|
|
spec.fwdType = MIGRATION_FWD_DIRECT;
|
2011-08-11 13:47:02 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
|
2013-06-18 10:17:18 +00:00
|
|
|
cookieoutlen, flags, resource, &spec, dconn,
|
|
|
|
graphicsuri);
|
2011-08-11 13:47:02 +00:00
|
|
|
|
|
|
|
if (spec.destType == MIGRATION_DEST_FD)
|
|
|
|
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
|
|
|
|
|
2012-03-20 11:59:42 +00:00
|
|
|
virURIFree(uribits);
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
static int doTunnelMigrate(virQEMUDriverPtr driver,
|
2011-08-11 12:36:04 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStreamPtr st,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
2011-09-23 06:56:56 +00:00
|
|
|
unsigned long resource,
|
2013-06-18 10:17:18 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *graphicsuri)
|
2011-08-11 12:36:04 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-08-12 08:54:05 +00:00
|
|
|
virNetSocketPtr sock = NULL;
|
2011-08-11 12:36:04 +00:00
|
|
|
int ret = -1;
|
|
|
|
qemuMigrationSpec spec;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
|
|
|
|
"graphicsuri=%s",
|
2011-08-11 12:36:04 +00:00
|
|
|
driver, vm, st, NULLSTR(cookiein), cookieinlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
cookieout, cookieoutlen, flags, resource,
|
|
|
|
NULLSTR(graphicsuri));
|
2011-08-11 12:36:04 +00:00
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
|
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
|
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Source qemu is too old to support tunnelled migration"));
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-08-11 13:47:02 +00:00
|
|
|
return -1;
|
2011-08-11 12:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
spec.fwdType = MIGRATION_FWD_STREAM;
|
|
|
|
spec.fwd.stream = st;
|
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD)) {
|
2011-08-11 13:47:02 +00:00
|
|
|
int fds[2];
|
|
|
|
|
|
|
|
spec.destType = MIGRATION_DEST_FD;
|
|
|
|
spec.dest.fd.qemu = -1;
|
|
|
|
spec.dest.fd.local = -1;
|
|
|
|
|
2011-08-29 23:31:42 +00:00
|
|
|
if (pipe2(fds, O_CLOEXEC) == 0) {
|
2011-08-11 13:47:02 +00:00
|
|
|
spec.dest.fd.qemu = fds[1];
|
|
|
|
spec.dest.fd.local = fds[0];
|
|
|
|
}
|
|
|
|
if (spec.dest.fd.qemu == -1 ||
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
|
2011-08-29 23:31:42 +00:00
|
|
|
spec.dest.fd.qemu) < 0) {
|
2011-08-11 13:47:02 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot create pipe for tunnelled migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spec.destType = MIGRATION_DEST_UNIX;
|
|
|
|
spec.dest.unix_socket.sock = -1;
|
|
|
|
spec.dest.unix_socket.file = NULL;
|
|
|
|
|
|
|
|
if (virAsprintf(&spec.dest.unix_socket.file,
|
|
|
|
"%s/qemu.tunnelmigrate.src.%s",
|
2013-07-04 10:14:12 +00:00
|
|
|
cfg->libDir, vm->def->name) < 0)
|
2011-08-11 13:47:02 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virNetSocketNewListenUNIX(spec.dest.unix_socket.file, 0700,
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->user, cfg->group,
|
2011-08-11 13:47:02 +00:00
|
|
|
&sock) < 0 ||
|
|
|
|
virNetSocketListen(sock, 1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
spec.dest.unix_socket.sock = virNetSocketGetFD(sock);
|
|
|
|
}
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
|
2013-06-18 10:17:18 +00:00
|
|
|
cookieoutlen, flags, resource, &spec, dconn,
|
|
|
|
graphicsuri);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-04-20 12:12:43 +00:00
|
|
|
cleanup:
|
2011-08-11 13:47:02 +00:00
|
|
|
if (spec.destType == MIGRATION_DEST_FD) {
|
|
|
|
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
|
|
|
|
VIR_FORCE_CLOSE(spec.dest.fd.local);
|
|
|
|
} else {
|
2012-07-11 13:35:51 +00:00
|
|
|
virObjectUnref(sock);
|
2011-08-11 13:47:02 +00:00
|
|
|
VIR_FREE(spec.dest.unix_socket.file);
|
|
|
|
}
|
2011-04-20 12:12:43 +00:00
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-04-20 12:12:43 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* This is essentially a re-impl of virDomainMigrateVersion2
|
|
|
|
* from libvirt.c, but running in source libvirtd context,
|
|
|
|
* instead of client app context & also adding in tunnel
|
|
|
|
* handling */
|
2012-11-28 16:43:10 +00:00
|
|
|
static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
|
2011-09-26 16:51:47 +00:00
|
|
|
virConnectPtr sconn ATTRIBUTE_UNUSED,
|
2011-04-20 12:12:43 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
const char *dconnuri,
|
2011-04-20 12:12:43 +00:00
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
virDomainPtr ddomain = NULL;
|
|
|
|
char *uri_out = NULL;
|
2011-01-24 18:06:16 +00:00
|
|
|
char *cookie = NULL;
|
2011-04-20 12:48:58 +00:00
|
|
|
char *dom_xml = NULL;
|
|
|
|
int cookielen = 0, ret;
|
|
|
|
virErrorPtr orig_err = NULL;
|
2013-05-24 10:14:02 +00:00
|
|
|
bool cancelled;
|
2011-04-20 12:48:58 +00:00
|
|
|
virStreamPtr st = NULL;
|
2013-07-04 17:58:51 +00:00
|
|
|
unsigned long destflags;
|
2013-06-12 14:11:22 +00:00
|
|
|
|
2011-05-20 10:03:04 +00:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
|
2011-07-08 15:18:48 +00:00
|
|
|
"flags=%lx, dname=%s, resource=%lu",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, sconn, dconn, vm, NULLSTR(dconnuri),
|
|
|
|
flags, NULLSTR(dname), resource);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* In version 2 of the protocol, the prepare step is slightly
|
|
|
|
* different. We fetch the domain XML of the source domain
|
|
|
|
* and pass it to Prepare2.
|
|
|
|
*/
|
|
|
|
if (!(dom_xml = qemuDomainFormatXML(driver, vm,
|
2012-10-08 09:58:05 +00:00
|
|
|
QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE)))
|
2011-04-20 12:48:58 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
|
|
|
|
flags |= VIR_MIGRATE_PAUSED;
|
|
|
|
|
2013-07-04 17:58:51 +00:00
|
|
|
destflags = flags & ~VIR_MIGRATE_ABORT_ON_ERROR;
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
VIR_DEBUG("Prepare2 %p", dconn);
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
/*
|
|
|
|
* Tunnelled Migrate Version 2 does not support cookies
|
|
|
|
* due to missing parameters in the prepareTunnel() API.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!(st = virStreamNew(dconn, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-04-20 12:48:58 +00:00
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel
|
2013-06-12 14:11:22 +00:00
|
|
|
(dconn, st, destflags, dname, resource, dom_xml);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-04-20 12:48:58 +00:00
|
|
|
} else {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-04-20 12:48:58 +00:00
|
|
|
ret = dconn->driver->domainMigratePrepare2
|
|
|
|
(dconn, &cookie, &cookielen, NULL, &uri_out,
|
2013-06-12 14:11:22 +00:00
|
|
|
destflags, dname, resource, dom_xml);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-04-20 12:48:58 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(dom_xml);
|
|
|
|
if (ret == -1)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* the domain may have shutdown or crashed while we had the locks dropped
|
2013-02-06 18:17:20 +00:00
|
|
|
* in qemuDomainObjEnterRemote, so check again
|
2011-01-31 10:47:03 +00:00
|
|
|
*/
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
(uri_out == NULL)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("domainMigratePrepare2 did not set uri"));
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = true;
|
2011-04-20 12:48:58 +00:00
|
|
|
goto finish;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* Perform the migration. The driver isn't supposed to return
|
|
|
|
* until the migration is complete.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Perform %p", sconn);
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
|
2011-04-20 12:48:58 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED)
|
2011-02-03 11:09:28 +00:00
|
|
|
ret = doTunnelMigrate(driver, vm, st,
|
|
|
|
NULL, 0, NULL, NULL,
|
2013-06-18 10:17:18 +00:00
|
|
|
flags, resource, dconn, NULL);
|
2011-04-20 12:48:58 +00:00
|
|
|
else
|
|
|
|
ret = doNativeMigrate(driver, vm, uri_out,
|
|
|
|
cookie, cookielen,
|
|
|
|
NULL, NULL, /* No out cookie with v2 migration */
|
2013-06-18 10:17:18 +00:00
|
|
|
flags, resource, dconn, NULL);
|
2011-04-20 12:48:58 +00:00
|
|
|
|
|
|
|
/* Perform failed. Make sure Finish doesn't overwrite the error */
|
|
|
|
if (ret < 0)
|
|
|
|
orig_err = virSaveLastError();
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* If Perform returns < 0, then we need to cancel the VM
|
|
|
|
* startup on the destination
|
|
|
|
*/
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = ret < 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
finish:
|
2011-04-20 12:48:58 +00:00
|
|
|
/* In version 2 of the migration protocol, we pass the
|
|
|
|
* status code from the sender to the destination host,
|
|
|
|
* so it can do any cleanup if the migration failed.
|
|
|
|
*/
|
2011-01-31 10:47:03 +00:00
|
|
|
dname = dname ? dname : vm->def->name;
|
2011-04-20 12:48:58 +00:00
|
|
|
VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
ddomain = dconn->driver->domainMigrateFinish2
|
2011-04-20 12:48:58 +00:00
|
|
|
(dconn, dname, cookie, cookielen,
|
2013-06-12 14:11:22 +00:00
|
|
|
uri_out ? uri_out : dconnuri, destflags, cancelled);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
cleanup:
|
|
|
|
if (ddomain) {
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(ddomain);
|
2011-04-20 12:48:58 +00:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -1;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(st);
|
2011-04-20 12:48:58 +00:00
|
|
|
|
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
VIR_FREE(uri_out);
|
2011-01-24 18:06:16 +00:00
|
|
|
VIR_FREE(cookie);
|
2011-04-20 12:48:58 +00:00
|
|
|
|
|
|
|
return ret;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
/* This is essentially a re-impl of virDomainMigrateVersion3
|
|
|
|
* from libvirt.c, but running in source libvirtd context,
|
|
|
|
* instead of client app context & also adding in tunnel
|
|
|
|
* handling */
|
2013-06-25 13:49:21 +00:00
|
|
|
static int
|
|
|
|
doPeer2PeerMigrate3(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr sconn,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *dconnuri,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *dname,
|
|
|
|
const char *uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
const char *graphicsuri,
|
2013-06-25 13:49:21 +00:00
|
|
|
unsigned long long bandwidth,
|
|
|
|
bool useParams,
|
|
|
|
unsigned long flags)
|
2011-02-03 11:09:28 +00:00
|
|
|
{
|
|
|
|
virDomainPtr ddomain = NULL;
|
|
|
|
char *uri_out = NULL;
|
|
|
|
char *cookiein = NULL;
|
|
|
|
char *cookieout = NULL;
|
|
|
|
char *dom_xml = NULL;
|
|
|
|
int cookieinlen = 0;
|
|
|
|
int cookieoutlen = 0;
|
|
|
|
int ret = -1;
|
|
|
|
virErrorPtr orig_err = NULL;
|
2013-06-25 13:49:21 +00:00
|
|
|
bool cancelled = true;
|
2011-02-03 11:09:28 +00:00
|
|
|
virStreamPtr st = NULL;
|
2013-07-04 17:58:51 +00:00
|
|
|
unsigned long destflags;
|
2013-06-25 13:49:21 +00:00
|
|
|
virTypedParameterPtr params = NULL;
|
|
|
|
int nparams = 0;
|
|
|
|
int maxparams = 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"dname=%s, uri=%s, graphicsuri=%s, bandwidth=%llu, "
|
|
|
|
"useParams=%d, flags=%lx",
|
2013-06-25 13:49:21 +00:00
|
|
|
driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
|
2013-06-18 10:17:18 +00:00
|
|
|
NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri), bandwidth,
|
|
|
|
useParams, flags);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Unlike the virDomainMigrateVersion3 counterpart, we don't need
|
|
|
|
* to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
|
|
|
|
* bit here, because we are already running inside the context of
|
|
|
|
* a single job. */
|
|
|
|
|
2013-06-25 07:44:14 +00:00
|
|
|
dom_xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
|
|
|
|
&cookieout, &cookieoutlen, flags);
|
2011-02-03 11:09:28 +00:00
|
|
|
if (!dom_xml)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
if (useParams) {
|
|
|
|
if (virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (dname &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (uri &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_URI, uri) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (bandwidth &&
|
|
|
|
virTypedParamsAddULLong(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_BANDWIDTH,
|
|
|
|
bandwidth) < 0)
|
|
|
|
goto cleanup;
|
2013-06-18 10:17:18 +00:00
|
|
|
|
|
|
|
if (graphicsuri &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_GRAPHICS_URI,
|
|
|
|
graphicsuri) < 0)
|
|
|
|
goto cleanup;
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
|
|
|
|
flags |= VIR_MIGRATE_PAUSED;
|
|
|
|
|
2013-07-04 17:58:51 +00:00
|
|
|
destflags = flags & ~VIR_MIGRATE_ABORT_ON_ERROR;
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_DEBUG("Prepare3 %p", dconn);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
if (!(st = virStreamNew(dconn, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2013-06-25 13:49:21 +00:00
|
|
|
if (useParams) {
|
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel3Params
|
|
|
|
(dconn, st, params, nparams, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen, destflags);
|
|
|
|
} else {
|
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel3
|
|
|
|
(dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
|
|
|
destflags, dname, bandwidth, dom_xml);
|
|
|
|
}
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
} else {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2013-06-25 13:49:21 +00:00
|
|
|
if (useParams) {
|
|
|
|
ret = dconn->driver->domainMigratePrepare3Params
|
|
|
|
(dconn, params, nparams, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen, &uri_out, destflags);
|
|
|
|
} else {
|
|
|
|
ret = dconn->driver->domainMigratePrepare3
|
|
|
|
(dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
|
|
|
uri, &uri_out, destflags, dname, bandwidth, dom_xml);
|
|
|
|
}
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(dom_xml);
|
|
|
|
if (ret == -1)
|
|
|
|
goto cleanup;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE) {
|
|
|
|
VIR_DEBUG("Offline migration, skipping Perform phase");
|
|
|
|
VIR_FREE(cookieout);
|
|
|
|
cookieoutlen = 0;
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = false;
|
2012-11-21 08:28:49 +00:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
if (uri_out) {
|
|
|
|
uri = uri_out;
|
|
|
|
if (useParams &&
|
|
|
|
virTypedParamsReplaceString(¶ms, &nparams,
|
|
|
|
VIR_MIGRATE_PARAM_URI, uri_out) < 0)
|
|
|
|
goto finish;
|
|
|
|
} else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("domainMigratePrepare3 did not set uri"));
|
2011-02-03 11:09:28 +00:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the migration. The driver isn't supposed to return
|
|
|
|
* until the migration is complete. The src VM should remain
|
|
|
|
* running, but in paused state until the destination can
|
|
|
|
* confirm migration completion.
|
|
|
|
*/
|
2013-06-25 13:49:21 +00:00
|
|
|
VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_FREE(cookiein);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
2013-06-25 13:49:21 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
2011-02-03 11:09:28 +00:00
|
|
|
ret = doTunnelMigrate(driver, vm, st,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
flags, bandwidth, dconn, graphicsuri);
|
2013-06-25 13:49:21 +00:00
|
|
|
} else {
|
|
|
|
ret = doNativeMigrate(driver, vm, uri,
|
2011-02-03 11:09:28 +00:00
|
|
|
cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
flags, bandwidth, dconn, graphicsuri);
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
/* Perform failed. Make sure Finish doesn't overwrite the error */
|
2011-07-19 00:27:32 +00:00
|
|
|
if (ret < 0) {
|
2011-02-03 11:09:28 +00:00
|
|
|
orig_err = virSaveLastError();
|
2011-07-19 00:27:32 +00:00
|
|
|
} else {
|
|
|
|
qemuMigrationJobSetPhase(driver, vm,
|
|
|
|
QEMU_MIGRATION_PHASE_PERFORM3_DONE);
|
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
/* If Perform returns < 0, then we need to cancel the VM
|
|
|
|
* startup on the destination
|
|
|
|
*/
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = ret < 0;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
finish:
|
|
|
|
/*
|
|
|
|
* The status code from the source is passed to the destination.
|
|
|
|
* The dest can cleanup in the source indicated it failed to
|
|
|
|
* send all migration data. Returns NULL for ddomain if
|
|
|
|
* the dest was unable to complete migration.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
|
|
|
|
VIR_FREE(cookiein);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
2013-06-25 13:49:21 +00:00
|
|
|
|
|
|
|
if (useParams) {
|
|
|
|
if (virTypedParamsGetString(params, nparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
|
|
|
|
virTypedParamsReplaceString(¶ms, &nparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_NAME,
|
|
|
|
vm->def->name) < 0) {
|
|
|
|
ddomain = NULL;
|
|
|
|
} else {
|
|
|
|
qemuDomainObjEnterRemote(vm);
|
|
|
|
ddomain = dconn->driver->domainMigrateFinish3Params
|
|
|
|
(dconn, params, nparams, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen, destflags, cancelled);
|
|
|
|
qemuDomainObjExitRemote(vm);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dname = dname ? dname : vm->def->name;
|
|
|
|
qemuDomainObjEnterRemote(vm);
|
|
|
|
ddomain = dconn->driver->domainMigrateFinish3
|
|
|
|
(dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
|
|
|
dconnuri, uri, destflags, cancelled);
|
|
|
|
qemuDomainObjExitRemote(vm);
|
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
/* If ddomain is NULL, then we were unable to start
|
|
|
|
* the guest on the target, and must restart on the
|
|
|
|
* source. There is a small chance that the ddomain
|
|
|
|
* is NULL due to an RPC failure, in which case
|
|
|
|
* ddomain could in fact be running on the dest.
|
|
|
|
* The lock manager plugins should take care of
|
|
|
|
* safety in this scenario.
|
2011-02-03 11:09:28 +00:00
|
|
|
*/
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = ddomain == NULL;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-05-23 16:48:36 +00:00
|
|
|
/* If finish3 set an error, and we don't have an earlier
|
|
|
|
* one we need to preserve it in case confirm3 overwrites
|
|
|
|
*/
|
|
|
|
if (!orig_err)
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
/*
|
|
|
|
* If cancelled, then src VM will be restarted, else
|
|
|
|
* it will be killed
|
|
|
|
*/
|
2012-08-02 10:11:20 +00:00
|
|
|
VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_FREE(cookiein);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
2013-06-25 12:55:10 +00:00
|
|
|
ret = qemuMigrationConfirmPhase(driver, sconn, vm,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
flags, cancelled);
|
2011-02-03 11:09:28 +00:00
|
|
|
/* If Confirm3 returns -1, there's nothing more we can
|
|
|
|
* do, but fortunately worst case is that there is a
|
|
|
|
* domain left in 'paused' state on source.
|
|
|
|
*/
|
2011-08-16 09:24:25 +00:00
|
|
|
if (ret < 0)
|
|
|
|
VIR_WARN("Guest %s probably left in 'paused' state on source",
|
|
|
|
vm->def->name);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (ddomain) {
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(ddomain);
|
2011-02-03 11:09:28 +00:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(st);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
VIR_FREE(uri_out);
|
|
|
|
VIR_FREE(cookiein);
|
|
|
|
VIR_FREE(cookieout);
|
2013-06-25 13:49:21 +00:00
|
|
|
virTypedParamsFree(params, nparams);
|
2011-02-03 11:09:28 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
|
2011-04-20 12:48:58 +00:00
|
|
|
virConnectPtr sconn,
|
2011-01-31 10:47:03 +00:00
|
|
|
virDomainObjPtr vm,
|
2011-05-18 09:26:30 +00:00
|
|
|
const char *xmlin,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
const char *dconnuri,
|
2011-01-31 10:47:03 +00:00
|
|
|
const char *uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
const char *graphicsuri,
|
2011-01-31 10:47:03 +00:00
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
2011-06-06 03:05:34 +00:00
|
|
|
unsigned long resource,
|
|
|
|
bool *v3proto)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virConnectPtr dconn = NULL;
|
|
|
|
bool p2p;
|
2011-07-19 14:51:08 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2012-12-11 11:50:53 +00:00
|
|
|
bool offline = false;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2013-06-25 13:49:21 +00:00
|
|
|
bool useParams;
|
2011-07-19 14:51:08 +00:00
|
|
|
|
2011-05-20 10:03:04 +00:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"uri=%s, graphicsuri=%s, flags=%lx, dname=%s, resource=%lu",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
|
2013-06-18 10:17:18 +00:00
|
|
|
NULLSTR(uri), NULLSTR(graphicsuri), flags, NULLSTR(dname),
|
|
|
|
resource);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
/* the order of operations is important here; we make sure the
|
|
|
|
* destination side is completely setup before we touch the source
|
|
|
|
*/
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
dconn = virConnectOpen(dconnuri);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
if (dconn == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
2013-05-28 19:12:01 +00:00
|
|
|
_("Failed to connect to remote libvirt URI %s: %s"),
|
|
|
|
dconnuri, virGetLastErrorMessage());
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-01-31 10:47:03 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
|
|
|
|
cfg->keepAliveCount) < 0)
|
2011-09-16 11:50:56 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_P2P);
|
2011-06-06 03:05:34 +00:00
|
|
|
/* v3proto reflects whether the caller used Perform3, but with
|
2011-07-19 00:27:32 +00:00
|
|
|
* p2p migrate, regardless of whether Perform2 or Perform3
|
2011-06-06 03:05:34 +00:00
|
|
|
* were used, we decide protocol based on what target supports
|
|
|
|
*/
|
|
|
|
*v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_V3);
|
2013-06-25 13:49:21 +00:00
|
|
|
useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_PARAMS);
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE)
|
|
|
|
offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_OFFLINE);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
if (!p2p) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Destination libvirt does not support peer-to-peer migration protocol"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
/* Only xmlin, dname, uri, and bandwidth parameters can be used with
|
|
|
|
* old-style APIs. */
|
2013-06-18 10:17:18 +00:00
|
|
|
if (!useParams && graphicsuri) {
|
2013-06-25 13:49:21 +00:00
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("Migration APIs with extensible parameters are not "
|
|
|
|
"supported but extended parameters were passed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE && !offline) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("offline migration is not supported by "
|
|
|
|
"the destination host"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* domain may have been stopped while we were talking to remote daemon */
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Change protection is only required on the source side (us), and
|
|
|
|
* only for v3 migration when begin and perform are separate jobs.
|
|
|
|
* But peer-2-peer is already a single job, and we still want to
|
|
|
|
* talk to older destinations that would reject the flag.
|
|
|
|
* Therefore it is safe to clear the bit here. */
|
|
|
|
flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
if (*v3proto) {
|
|
|
|
ret = doPeer2PeerMigrate3(driver, sconn, dconn, dconnuri, vm, xmlin,
|
2013-06-18 10:17:18 +00:00
|
|
|
dname, uri, graphicsuri, resource,
|
|
|
|
useParams, flags);
|
2013-06-25 13:49:21 +00:00
|
|
|
} else {
|
2011-02-03 11:09:28 +00:00
|
|
|
ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
dconnuri, flags, dname, resource);
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-07-19 14:51:08 +00:00
|
|
|
orig_err = virSaveLastError();
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-07-19 14:51:08 +00:00
|
|
|
virConnectClose(dconn);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitRemote(vm);
|
2011-07-19 14:51:08 +00:00
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-01-31 10:47:03 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/*
|
|
|
|
* This implements perform part of the migration protocol when migration job
|
|
|
|
* does not need to be active across several APIs, i.e., peer2peer migration or
|
|
|
|
* perform phase of v2 non-peer2peer migration.
|
|
|
|
*/
|
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPerformJob(virQEMUDriverPtr driver,
|
2011-07-19 00:27:32 +00:00
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *dconnuri,
|
|
|
|
const char *uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
const char *graphicsuri,
|
2011-07-19 00:27:32 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource,
|
|
|
|
bool v3proto)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
int resume = 0;
|
2012-04-20 12:07:49 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2013-06-12 14:11:21 +00:00
|
|
|
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2013-06-12 14:11:21 +00:00
|
|
|
if (!qemuMigrationIsAllowed(driver, vm, NULL, true, abort_on_error))
|
2012-10-17 12:08:17 +00:00
|
|
|
goto endjob;
|
2011-06-23 10:41:57 +00:00
|
|
|
|
2012-02-21 12:20:06 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_UNSAFE) && !qemuMigrationIsSafe(vm->def))
|
2012-10-17 12:08:17 +00:00
|
|
|
goto endjob;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
|
2011-07-19 00:27:32 +00:00
|
|
|
ret = doPeer2PeerMigrate(driver, conn, vm, xmlin,
|
2013-06-18 10:17:18 +00:00
|
|
|
dconnuri, uri, graphicsuri, flags, dname,
|
2011-07-19 00:27:32 +00:00
|
|
|
resource, &v3proto);
|
2011-01-31 10:47:03 +00:00
|
|
|
} else {
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
|
|
|
|
ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
flags, resource, NULL, NULL);
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-07-19 00:27:32 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-05-23 12:50:11 +00:00
|
|
|
/*
|
|
|
|
* In v3 protocol, the source VM is not killed off until the
|
|
|
|
* confirm step.
|
|
|
|
*/
|
2011-07-19 00:27:32 +00:00
|
|
|
if (!v3proto) {
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "migrated");
|
2011-02-03 11:09:28 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-07-19 00:27:32 +00:00
|
|
|
resume = 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
endjob:
|
2012-04-20 12:07:49 +00:00
|
|
|
if (ret < 0)
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
2011-01-31 10:47:03 +00:00
|
|
|
/* we got here through some sort of failure; start the domain again */
|
2011-05-04 09:07:01 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Hm, we already know we are in error here. We don't want to
|
|
|
|
* overwrite the previous error, though, so we just throw something
|
|
|
|
* to the logs and hope for the best
|
|
|
|
*/
|
|
|
|
VIR_ERROR(_("Failed to resume guest %s after failure"),
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
}
|
2011-07-19 00:27:32 +00:00
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuMigrationJobFinish(driver, vm)) {
|
2011-07-19 00:27:32 +00:00
|
|
|
vm = NULL;
|
|
|
|
} else if (!virDomainObjIsActive(vm) &&
|
|
|
|
(!vm->persistent ||
|
|
|
|
(ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) {
|
|
|
|
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
|
2013-01-10 21:03:14 +00:00
|
|
|
virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
|
2012-04-20 12:07:49 +00:00
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
cleanup:
|
|
|
|
if (vm)
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-07-19 00:27:32 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This implements perform phase of v3 migration protocol.
|
|
|
|
*/
|
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPerformPhase(virQEMUDriverPtr driver,
|
2011-07-19 00:27:32 +00:00
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
const char *graphicsuri,
|
2011-07-19 00:27:32 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
unsigned long resource)
|
|
|
|
{
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
bool resume;
|
2012-07-11 13:35:46 +00:00
|
|
|
bool hasrefs;
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
/* If we didn't start the job in the begin phase, start it now. */
|
|
|
|
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
|
2013-07-15 14:53:13 +00:00
|
|
|
virCloseCallbacksUnset(driver->closeCallbacks, vm,
|
|
|
|
qemuMigrationCleanup);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
|
|
|
|
ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
flags, resource, NULL, graphicsuri);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
if (ret < 0 && resume &&
|
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
/* we got here through some sort of failure; start the domain again */
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Hm, we already know we are in error here. We don't want to
|
|
|
|
* overwrite the previous error, though, so we just throw something
|
|
|
|
* to the logs and hope for the best
|
|
|
|
*/
|
|
|
|
VIR_ERROR(_("Failed to resume guest %s after failure"),
|
|
|
|
vm->def->name);
|
2011-05-19 11:48:15 +00:00
|
|
|
}
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);
|
|
|
|
|
2013-07-15 14:53:13 +00:00
|
|
|
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
|
|
|
|
qemuMigrationCleanup) < 0)
|
2012-03-19 15:48:43 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
endjob:
|
|
|
|
if (ret < 0)
|
2012-07-11 13:35:46 +00:00
|
|
|
hasrefs = qemuMigrationJobFinish(driver, vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
else
|
2012-07-11 13:35:46 +00:00
|
|
|
hasrefs = qemuMigrationJobContinue(vm);
|
|
|
|
if (!hasrefs) {
|
2011-07-19 00:27:32 +00:00
|
|
|
vm = NULL;
|
|
|
|
} else if (!virDomainObjIsActive(vm) && !vm->persistent) {
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
vm = NULL;
|
2011-05-19 11:48:15 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationPerform(virQEMUDriverPtr driver,
|
2011-07-19 00:27:32 +00:00
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *dconnuri,
|
|
|
|
const char *uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
const char *graphicsuri,
|
2011-07-19 00:27:32 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource,
|
|
|
|
bool v3proto)
|
|
|
|
{
|
|
|
|
VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"uri=%s, graphicsuri=%s, "
|
|
|
|
"cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
|
|
|
|
"flags=%lx, dname=%s, resource=%lu, v3proto=%d",
|
2011-07-19 00:27:32 +00:00
|
|
|
driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
|
2013-06-18 10:17:18 +00:00
|
|
|
NULLSTR(uri), NULLSTR(graphicsuri),
|
|
|
|
NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
|
|
|
|
flags, NULLSTR(dname), resource, v3proto);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
|
|
|
|
if (cookieinlen) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("received unexpected cookie with P2P migration"));
|
2011-07-19 00:27:32 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri, uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
graphicsuri, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags, dname, resource, v3proto);
|
2011-07-19 00:27:32 +00:00
|
|
|
} else {
|
|
|
|
if (dconnuri) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
|
2011-07-19 00:27:32 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (v3proto) {
|
|
|
|
return qemuMigrationPerformPhase(driver, conn, vm, uri,
|
2013-06-18 10:17:18 +00:00
|
|
|
graphicsuri,
|
2011-07-19 00:27:32 +00:00
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
2011-08-11 12:36:04 +00:00
|
|
|
flags, resource);
|
2011-07-19 00:27:32 +00:00
|
|
|
} else {
|
|
|
|
return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri,
|
2013-06-18 10:17:18 +00:00
|
|
|
uri, graphicsuri,
|
|
|
|
cookiein, cookieinlen,
|
2011-07-19 00:27:32 +00:00
|
|
|
cookieout, cookieoutlen, flags,
|
|
|
|
dname, resource, v3proto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-03-27 20:00:01 +00:00
|
|
|
static int
|
2011-01-31 10:47:03 +00:00
|
|
|
qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) {
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-01-31 10:47:03 +00:00
|
|
|
int last_good_net = -1;
|
|
|
|
virDomainNetDefPtr net;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
net = def->nets[i];
|
2011-07-04 01:57:45 +00:00
|
|
|
if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
if (virNetDevVPortProfileAssociate(net->ifname,
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2012-07-17 12:07:59 +00:00
|
|
|
&net->mac,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
2012-03-06 01:12:39 +00:00
|
|
|
-1,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
def->uuid,
|
2012-03-27 20:00:01 +00:00
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
|
|
|
|
false) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("Port profile Associate failed for %s"),
|
|
|
|
net->ifname);
|
2011-01-31 10:47:03 +00:00
|
|
|
goto err_exit;
|
2012-03-27 20:00:01 +00:00
|
|
|
}
|
|
|
|
VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
|
2012-03-29 11:15:00 +00:00
|
|
|
|
2012-07-17 12:07:59 +00:00
|
|
|
if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
|
2012-03-29 11:15:00 +00:00
|
|
|
virDomainNetGetActualDirectDev(net), def->uuid,
|
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
|
|
|
|
goto err_exit;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
last_good_net = i;
|
|
|
|
}
|
|
|
|
|
2012-03-27 20:00:01 +00:00
|
|
|
return 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
err_exit:
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
for (i = 0; last_good_net != -1 && i < last_good_net; i++) {
|
2011-01-31 10:47:03 +00:00
|
|
|
net = def->nets[i];
|
2011-07-04 01:57:45 +00:00
|
|
|
if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2012-07-17 12:07:59 +00:00
|
|
|
&net->mac,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
2012-03-06 01:12:39 +00:00
|
|
|
-1,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
2012-03-27 20:00:01 +00:00
|
|
|
return -1;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virDomainPtr
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationFinish(virQEMUDriverPtr driver,
|
2011-01-31 10:47:03 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
2011-01-24 18:06:16 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 10:47:03 +00:00
|
|
|
unsigned long flags,
|
2011-05-23 12:50:11 +00:00
|
|
|
int retcode,
|
|
|
|
bool v3proto)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
virDomainPtr dom = NULL;
|
|
|
|
virDomainEventPtr event = NULL;
|
2013-05-24 10:14:02 +00:00
|
|
|
bool newVM = true;
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-07-19 00:27:31 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2011-09-15 13:13:11 +00:00
|
|
|
int cookie_flags = 0;
|
2011-10-04 07:11:35 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2013-02-01 17:04:15 +00:00
|
|
|
virCapsPtr caps = NULL;
|
2011-07-19 00:27:31 +00:00
|
|
|
|
2011-05-20 10:03:04 +00:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
2011-07-08 15:18:48 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%lx, retcode=%d",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, retcode);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-02-01 17:04:15 +00:00
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
qemuMigrationJobStartPhase(driver, vm,
|
|
|
|
v3proto ? QEMU_MIGRATION_PHASE_FINISH3
|
|
|
|
: QEMU_MIGRATION_PHASE_FINISH2);
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2012-03-16 06:56:19 +00:00
|
|
|
qemuDomainCleanupRemove(vm, qemuMigrationPrepareCleanup);
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK;
|
2011-09-15 13:13:11 +00:00
|
|
|
if (flags & VIR_MIGRATE_PERSIST_DEST)
|
|
|
|
cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
|
|
|
|
|
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein,
|
|
|
|
cookieinlen, cookie_flags)))
|
2011-07-19 00:27:31 +00:00
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
/* Did the migration go as planned? If yes, return the domain
|
|
|
|
* object, but if no, clean up the empty qemu process.
|
|
|
|
*/
|
|
|
|
if (retcode == 0) {
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-07-01 10:05:43 +00:00
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
|
|
|
if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
|
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
|
|
|
virDomainAuditStop(vm, "failed");
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
if (mig->network)
|
|
|
|
if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
|
|
|
|
VIR_WARN("unable to provide network data for relocation");
|
2012-03-27 20:00:01 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-11-27 15:34:24 +00:00
|
|
|
qemuMigrationStopNBDServer(driver, vm, mig);
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
if (flags & VIR_MIGRATE_PERSIST_DEST) {
|
2011-08-26 03:08:11 +00:00
|
|
|
virDomainDefPtr vmdef;
|
2011-01-31 10:47:03 +00:00
|
|
|
if (vm->persistent)
|
2013-05-24 10:14:02 +00:00
|
|
|
newVM = false;
|
2011-01-31 10:47:03 +00:00
|
|
|
vm->persistent = 1;
|
2011-09-15 13:13:11 +00:00
|
|
|
if (mig->persistent)
|
|
|
|
vm->newDef = vmdef = mig->persistent;
|
|
|
|
else
|
2013-03-31 18:03:42 +00:00
|
|
|
vmdef = virDomainObjGetPersistentDef(caps, driver->xmlopt, vm);
|
2013-01-10 21:03:14 +00:00
|
|
|
if (!vmdef || virDomainSaveConfig(cfg->configDir, vmdef) < 0) {
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Hmpf. Migration was successful, but making it persistent
|
|
|
|
* was not. If we report successful, then when this domain
|
|
|
|
* shuts down, management tools are in for a surprise. On the
|
|
|
|
* other hand, if we report failure, then the management tools
|
|
|
|
* might try to restart the domain on the source side, even
|
|
|
|
* though the domain is actually running on the destination.
|
|
|
|
* Return a NULL dom pointer, and hope that this is a rare
|
|
|
|
* situation and management tools are smart.
|
|
|
|
*/
|
2011-07-01 10:05:43 +00:00
|
|
|
|
|
|
|
/*
|
2011-07-19 00:27:31 +00:00
|
|
|
* However, in v3 protocol, the source VM is still available
|
|
|
|
* to restart during confirm() step, so we kill it off now.
|
2011-07-01 10:05:43 +00:00
|
|
|
*/
|
|
|
|
if (v3proto) {
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
|
|
|
virDomainAuditStop(vm, "failed");
|
|
|
|
}
|
2011-07-19 00:27:31 +00:00
|
|
|
if (newVM)
|
|
|
|
vm->persistent = 0;
|
2011-07-01 10:05:43 +00:00
|
|
|
}
|
2011-09-28 08:00:49 +00:00
|
|
|
if (!vmdef)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("can't get vmdef"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED,
|
|
|
|
newVM ?
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_ADDED :
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_UPDATED);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
event = NULL;
|
|
|
|
}
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
2011-01-31 10:47:03 +00:00
|
|
|
/* run 'cont' on the destination, which allows migration on qemu
|
|
|
|
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
|
|
|
* older qemu's, but it also doesn't hurt anything there
|
|
|
|
*/
|
2011-05-04 09:07:01 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, dconn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_MIGRATED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
|
2011-05-23 16:48:36 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
2011-05-23 16:48:36 +00:00
|
|
|
/* Need to save the current error, in case shutting
|
|
|
|
* down the process overwrites it
|
|
|
|
*/
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2011-05-23 12:50:11 +00:00
|
|
|
/*
|
|
|
|
* In v3 protocol, the source VM is still available to
|
|
|
|
* restart during confirm() step, so we kill it off
|
|
|
|
* now.
|
|
|
|
* In v2 protocol, the source is dead, so we leave
|
|
|
|
* target in paused state, in case admin can fix
|
|
|
|
* things up
|
|
|
|
*/
|
|
|
|
if (v3proto) {
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "failed");
|
2011-05-23 12:50:11 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-17 09:23:12 +00:00
|
|
|
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
|
2011-05-23 12:50:11 +00:00
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
2011-01-31 10:47:03 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
2012-11-21 08:28:49 +00:00
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
|
|
|
VIR_DOMAIN_PAUSED_USER);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2012-11-21 08:28:49 +00:00
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm) &&
|
2013-03-31 18:03:42 +00:00
|
|
|
virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0) {
|
2011-01-31 10:47:03 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
goto endjob;
|
|
|
|
}
|
2011-06-23 10:03:57 +00:00
|
|
|
|
|
|
|
/* Guest is successfully running, so cancel previous auto destroy */
|
|
|
|
qemuProcessAutoDestroyRemove(driver, vm);
|
2012-11-21 08:28:49 +00:00
|
|
|
} else if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "failed");
|
2011-01-31 10:47:03 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
|
|
|
}
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
endjob:
|
2011-08-02 20:18:01 +00:00
|
|
|
if (qemuMigrationJobFinish(driver, vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
} else if (!vm->persistent && !virDomainObjIsActive(vm)) {
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-08-02 20:18:01 +00:00
|
|
|
vm = NULL;
|
2011-07-19 00:27:31 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-10-04 07:11:35 +00:00
|
|
|
if (vm) {
|
|
|
|
VIR_FREE(priv->origname);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-10-04 07:11:35 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookieFree(mig);
|
2011-05-23 16:48:36 +00:00
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
2013-02-01 17:04:15 +00:00
|
|
|
virObjectUnref(caps);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-01-31 10:47:03 +00:00
|
|
|
return dom;
|
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
/* Helper function called while vm is active. */
|
2011-03-10 00:35:13 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
2011-03-10 00:35:13 +00:00
|
|
|
int fd, off_t offset, const char *path,
|
|
|
|
const char *compressor,
|
2011-08-23 20:01:51 +00:00
|
|
|
bool bypassSecurityDriver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2011-03-10 00:35:13 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int rc;
|
2013-07-08 10:08:46 +00:00
|
|
|
int ret = -1;
|
2011-03-10 00:35:13 +00:00
|
|
|
bool restoreLabel = false;
|
2011-03-25 17:02:27 +00:00
|
|
|
virCommandPtr cmd = NULL;
|
|
|
|
int pipeFD[2] = { -1, -1 };
|
2012-03-20 15:56:29 +00:00
|
|
|
unsigned long saveMigBandwidth = priv->migMaxBandwidth;
|
2013-01-17 10:59:23 +00:00
|
|
|
char *errbuf = NULL;
|
2012-03-20 15:56:29 +00:00
|
|
|
|
|
|
|
/* Increase migration bandwidth to unlimited since target is a file.
|
|
|
|
* Failure to change migration speed is not fatal. */
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon,
|
2012-08-03 16:34:06 +00:00
|
|
|
QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
|
|
|
|
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2012-03-20 15:56:29 +00:00
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
|
2011-03-25 17:02:27 +00:00
|
|
|
(!compressor || pipe(pipeFD) == 0)) {
|
2011-03-10 00:49:54 +00:00
|
|
|
/* All right! We can use fd migration, which means that qemu
|
2011-03-28 21:50:22 +00:00
|
|
|
* doesn't have to open() the file, so while we still have to
|
|
|
|
* grant SELinux access, we can do it on fd and avoid cleanup
|
|
|
|
* later, as well as skip futzing with cgroup. */
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
|
2011-06-24 13:43:43 +00:00
|
|
|
compressor ? pipeFD[1] : fd) < 0)
|
2011-03-28 21:50:22 +00:00
|
|
|
goto cleanup;
|
2011-03-10 00:49:54 +00:00
|
|
|
bypassSecurityDriver = true;
|
|
|
|
} else {
|
|
|
|
/* Phooey - we have to fall back on exec migration, where qemu
|
2011-08-23 20:01:51 +00:00
|
|
|
* has to popen() the file by name, and block devices have to be
|
|
|
|
* given cgroup ACL permission. We might also stumble on
|
2011-03-10 00:49:54 +00:00
|
|
|
* a race present in some qemu versions where it does a wait()
|
|
|
|
* that botches pclose. */
|
2013-03-21 14:40:29 +00:00
|
|
|
if (virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_DEVICES)) {
|
2013-07-08 10:08:46 +00:00
|
|
|
int rv = virCgroupAllowDevicePath(priv->cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path, "rw", rv == 0);
|
|
|
|
if (rv == 1) {
|
2011-08-23 20:01:51 +00:00
|
|
|
/* path was not a device, no further need for cgroup */
|
2013-07-08 10:08:46 +00:00
|
|
|
} else if (rv < 0) {
|
2011-03-10 00:49:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
}
|
2011-03-10 00:49:54 +00:00
|
|
|
if ((!bypassSecurityDriver) &&
|
|
|
|
virSecurityManagerSetSavedStateLabel(driver->securityManager,
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
vm->def, path) < 0)
|
2011-03-10 00:35:13 +00:00
|
|
|
goto cleanup;
|
2011-03-10 00:49:54 +00:00
|
|
|
restoreLabel = true;
|
2011-03-10 00:35:13 +00:00
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2011-06-30 09:23:50 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
if (!compressor) {
|
|
|
|
const char *args[] = { "cat", NULL };
|
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
|
2011-03-10 00:35:13 +00:00
|
|
|
priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX) {
|
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
fd);
|
|
|
|
} else {
|
|
|
|
rc = qemuMonitorMigrateToFile(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
args, path, offset);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const char *prog = compressor;
|
|
|
|
const char *args[] = {
|
|
|
|
prog,
|
|
|
|
"-c",
|
|
|
|
NULL
|
|
|
|
};
|
2011-03-25 17:02:27 +00:00
|
|
|
if (pipeFD[0] != -1) {
|
|
|
|
cmd = virCommandNewArgs(args);
|
|
|
|
virCommandSetInputFD(cmd, pipeFD[0]);
|
|
|
|
virCommandSetOutputFD(cmd, &fd);
|
2013-01-17 10:59:23 +00:00
|
|
|
virCommandSetErrorBuffer(cmd, &errbuf);
|
|
|
|
virCommandDoAsyncIO(cmd);
|
2011-03-25 17:02:27 +00:00
|
|
|
if (virSetCloseExec(pipeFD[1]) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to set cloexec flag"));
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-03-25 17:02:27 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virCommandRunAsync(cmd, NULL) < 0) {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-03-25 17:02:27 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
pipeFD[1]);
|
|
|
|
if (VIR_CLOSE(pipeFD[0]) < 0 ||
|
|
|
|
VIR_CLOSE(pipeFD[1]) < 0)
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_WARN("failed to close intermediate pipe");
|
2011-03-25 17:02:27 +00:00
|
|
|
} else {
|
|
|
|
rc = qemuMonitorMigrateToFile(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
args, path, offset);
|
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
}
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-03-10 00:35:13 +00:00
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-12 14:11:21 +00:00
|
|
|
rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob, NULL, false);
|
2011-03-10 00:35:13 +00:00
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-25 17:02:27 +00:00
|
|
|
if (cmd && virCommandWait(cmd, NULL) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2012-03-20 15:56:29 +00:00
|
|
|
/* Restore max migration bandwidth */
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
|
|
|
|
priv->migMaxBandwidth = saveMigBandwidth;
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2012-03-20 15:56:29 +00:00
|
|
|
}
|
|
|
|
|
2011-03-25 17:02:27 +00:00
|
|
|
VIR_FORCE_CLOSE(pipeFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(pipeFD[1]);
|
2013-01-17 10:59:23 +00:00
|
|
|
if (cmd) {
|
|
|
|
VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
|
|
|
|
VIR_FREE(errbuf);
|
|
|
|
virCommandFree(cmd);
|
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
if (restoreLabel && (!bypassSecurityDriver) &&
|
|
|
|
virSecurityManagerRestoreSavedStateLabel(driver->securityManager,
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
vm->def, path) < 0)
|
2011-03-10 00:35:13 +00:00
|
|
|
VIR_WARN("failed to restore save state label on %s", path);
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_DEVICES)) {
|
2013-07-08 10:08:46 +00:00
|
|
|
int rv = virCgroupDenyDevicePath(priv->cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RWM);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny", path, "rwm", rv == 0);
|
2011-03-10 00:35:13 +00:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2011-07-19 00:27:30 +00:00
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobStart(virQEMUDriverPtr driver,
|
2011-07-19 00:27:30 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
enum qemuDomainAsyncJob job)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJob(driver, vm, job) < 0)
|
2011-07-19 00:27:30 +00:00
|
|
|
return -1;
|
|
|
|
|
2011-07-19 00:27:36 +00:00
|
|
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
2011-07-19 00:27:30 +00:00
|
|
|
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
2011-07-19 00:27:36 +00:00
|
|
|
} else {
|
|
|
|
qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
|
2011-07-19 00:27:38 +00:00
|
|
|
JOB_MASK(QEMU_JOB_SUSPEND) |
|
2011-07-19 00:27:36 +00:00
|
|
|
JOB_MASK(QEMU_JOB_MIGRATION_OP));
|
|
|
|
}
|
2011-07-19 00:27:30 +00:00
|
|
|
|
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
|
2011-07-19 00:27:30 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
enum qemuMigrationJobPhase phase)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (phase < priv->job.phase) {
|
|
|
|
VIR_ERROR(_("migration protocol going backwards %s => %s"),
|
|
|
|
qemuMigrationJobPhaseTypeToString(priv->job.phase),
|
|
|
|
qemuMigrationJobPhaseTypeToString(phase));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjSetJobPhase(driver, vm, phase);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
|
2011-07-19 00:27:30 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
enum qemuMigrationJobPhase phase)
|
|
|
|
{
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-07-19 00:27:30 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, phase);
|
|
|
|
}
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
bool
|
2011-07-19 00:27:30 +00:00
|
|
|
qemuMigrationJobContinue(virDomainObjPtr vm)
|
|
|
|
{
|
2012-04-06 16:55:46 +00:00
|
|
|
qemuDomainObjReleaseAsyncJob(vm);
|
2012-07-11 13:35:46 +00:00
|
|
|
return virObjectUnref(vm);
|
2011-07-19 00:27:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
qemuMigrationJobIsActive(virDomainObjPtr vm,
|
|
|
|
enum qemuDomainAsyncJob job)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->job.asyncJob != job) {
|
|
|
|
const char *msg;
|
|
|
|
|
|
|
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
|
|
|
|
msg = _("domain '%s' is not processing incoming migration");
|
|
|
|
else
|
|
|
|
msg = _("domain '%s' is not being migrated");
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
|
2011-07-19 00:27:30 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
bool
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
|
2011-07-19 00:27:30 +00:00
|
|
|
{
|
|
|
|
return qemuDomainObjEndAsyncJob(driver, vm);
|
|
|
|
}
|