2010-12-16 15:23:41 +00:00
|
|
|
/*
|
|
|
|
* qemu_domain.h: QEMU domain private state
|
|
|
|
*
|
2012-03-17 15:54:44 +00:00
|
|
|
* Copyright (C) 2006-2012 Red Hat, Inc.
|
2010-12-16 15:23:41 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2010-12-16 15:23:41 +00:00
|
|
|
*
|
|
|
|
* Author: Daniel P. Berrange <berrange@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include "qemu_domain.h"
|
|
|
|
#include "qemu_command.h"
|
2011-05-04 11:55:38 +00:00
|
|
|
#include "qemu_capabilities.h"
|
2011-07-19 00:27:30 +00:00
|
|
|
#include "qemu_migration.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2010-12-16 15:23:41 +00:00
|
|
|
#include "c-ctype.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
#include "cpu/cpu.h"
|
2012-12-13 18:01:25 +00:00
|
|
|
#include "viruuid.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2011-10-18 14:15:42 +00:00
|
|
|
#include "domain_event.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2012-12-13 15:25:48 +00:00
|
|
|
#include "virstoragefile.h"
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
#include <sys/time.h>
|
2011-05-05 11:38:04 +00:00
|
|
|
#include <fcntl.h>
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
#include <libxml/xpathInternals.h>
|
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
|
|
|
#define QEMU_NAMESPACE_HREF "http://libvirt.org/schemas/domain/qemu/1.0"
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
|
|
|
|
"none",
|
|
|
|
"query",
|
|
|
|
"destroy",
|
|
|
|
"suspend",
|
|
|
|
"modify",
|
2011-07-19 00:27:39 +00:00
|
|
|
"abort",
|
2011-07-19 00:27:36 +00:00
|
|
|
"migration operation",
|
2011-06-06 08:28:38 +00:00
|
|
|
"none", /* async job is never stored in job.active */
|
|
|
|
"async nested",
|
|
|
|
);
|
|
|
|
|
|
|
|
VIR_ENUM_IMPL(qemuDomainAsyncJob, QEMU_ASYNC_JOB_LAST,
|
|
|
|
"none",
|
|
|
|
"migration out",
|
|
|
|
"migration in",
|
|
|
|
"save",
|
|
|
|
"dump",
|
2012-10-08 14:34:19 +00:00
|
|
|
"snapshot",
|
2011-06-06 08:28:38 +00:00
|
|
|
);
|
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-06-06 08:30:54 +00:00
|
|
|
const char *
|
|
|
|
qemuDomainAsyncJobPhaseToString(enum qemuDomainAsyncJob job,
|
|
|
|
int phase ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
switch (job) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
2011-07-19 00:27:30 +00:00
|
|
|
return qemuMigrationJobPhaseTypeToString(phase);
|
|
|
|
|
2011-06-06 08:30:54 +00:00
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
2012-10-08 14:34:19 +00:00
|
|
|
case QEMU_ASYNC_JOB_SNAPSHOT:
|
2011-06-06 08:30:54 +00:00
|
|
|
case QEMU_ASYNC_JOB_NONE:
|
|
|
|
case QEMU_ASYNC_JOB_LAST:
|
|
|
|
; /* fall through */
|
|
|
|
}
|
|
|
|
|
|
|
|
return "none";
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainAsyncJobPhaseFromString(enum qemuDomainAsyncJob job,
|
|
|
|
const char *phase)
|
|
|
|
{
|
|
|
|
if (!phase)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (job) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
2011-07-19 00:27:30 +00:00
|
|
|
return qemuMigrationJobPhaseTypeFromString(phase);
|
|
|
|
|
2011-06-06 08:30:54 +00:00
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
2012-10-08 14:34:19 +00:00
|
|
|
case QEMU_ASYNC_JOB_SNAPSHOT:
|
2011-06-06 08:30:54 +00:00
|
|
|
case QEMU_ASYNC_JOB_NONE:
|
|
|
|
case QEMU_ASYNC_JOB_LAST:
|
|
|
|
; /* fall through */
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STREQ(phase, "none"))
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* driver must be locked before calling */
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainEventQueue(virQEMUDriverPtr driver,
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainEventPtr event)
|
|
|
|
{
|
2011-05-12 12:54:07 +00:00
|
|
|
virDomainEventStateQueue(driver->domainEventState, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjInitJob(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
memset(&priv->job, 0, sizeof(priv->job));
|
|
|
|
|
|
|
|
if (virCondInit(&priv->job.cond) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (virCondInit(&priv->job.asyncCond) < 0) {
|
|
|
|
ignore_value(virCondDestroy(&priv->job.cond));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainObjResetJob(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
struct qemuDomainJobObj *job = &priv->job;
|
|
|
|
|
|
|
|
job->active = QEMU_JOB_NONE;
|
2012-04-06 16:55:46 +00:00
|
|
|
job->owner = 0;
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
struct qemuDomainJobObj *job = &priv->job;
|
|
|
|
|
|
|
|
job->asyncJob = QEMU_ASYNC_JOB_NONE;
|
2012-04-06 16:55:46 +00:00
|
|
|
job->asyncOwner = 0;
|
2011-06-06 08:30:54 +00:00
|
|
|
job->phase = 0;
|
2011-06-30 09:23:50 +00:00
|
|
|
job->mask = DEFAULT_JOB_MASK;
|
2011-06-06 08:34:33 +00:00
|
|
|
job->start = 0;
|
2012-06-12 03:06:33 +00:00
|
|
|
job->dump_memory_only = false;
|
2012-11-08 13:49:55 +00:00
|
|
|
job->asyncAbort = false;
|
2011-06-06 08:34:33 +00:00
|
|
|
memset(&job->info, 0, sizeof(job->info));
|
|
|
|
}
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
void
|
|
|
|
qemuDomainObjRestoreJob(virDomainObjPtr obj,
|
|
|
|
struct qemuDomainJobObj *job)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
|
|
|
memset(job, 0, sizeof(*job));
|
|
|
|
job->active = priv->job.active;
|
2012-04-06 16:55:46 +00:00
|
|
|
job->owner = priv->job.owner;
|
2011-07-04 21:33:39 +00:00
|
|
|
job->asyncJob = priv->job.asyncJob;
|
2012-04-06 16:55:46 +00:00
|
|
|
job->asyncOwner = priv->job.asyncOwner;
|
2011-06-06 08:30:54 +00:00
|
|
|
job->phase = priv->job.phase;
|
2011-07-04 21:33:39 +00:00
|
|
|
|
|
|
|
qemuDomainObjResetJob(priv);
|
|
|
|
qemuDomainObjResetAsyncJob(priv);
|
|
|
|
}
|
|
|
|
|
2012-04-06 16:55:46 +00:00
|
|
|
void
|
|
|
|
qemuDomainObjTransferJob(virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("Changing job owner from %d to %d",
|
|
|
|
priv->job.owner, virThreadSelfID());
|
|
|
|
priv->job.owner = virThreadSelfID();
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
static void
|
|
|
|
qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
ignore_value(virCondDestroy(&priv->job.cond));
|
2011-06-30 09:23:50 +00:00
|
|
|
ignore_value(virCondDestroy(&priv->job.asyncCond));
|
2011-06-06 08:34:33 +00:00
|
|
|
}
|
|
|
|
|
2012-04-06 17:42:34 +00:00
|
|
|
static bool
|
|
|
|
qemuDomainTrackJob(enum qemuDomainJob job)
|
|
|
|
{
|
|
|
|
return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
static void *qemuDomainObjPrivateAlloc(void)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(priv) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
if (qemuDomainObjInitJob(priv) < 0)
|
2011-11-23 14:51:28 +00:00
|
|
|
goto error;
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2011-10-06 10:24:47 +00:00
|
|
|
if (!(priv->cons = virConsoleAlloc()))
|
|
|
|
goto error;
|
|
|
|
|
2012-08-03 16:34:06 +00:00
|
|
|
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
|
2011-08-26 18:10:22 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return priv;
|
2011-11-23 14:51:28 +00:00
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(priv);
|
|
|
|
return NULL;
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qemuDomainObjPrivateFree(void *data)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = data;
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
virObjectUnref(priv->caps);
|
2011-05-04 11:55:38 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
qemuDomainPCIAddressSetFree(priv->pciaddrs);
|
2011-01-07 23:36:25 +00:00
|
|
|
virDomainChrSourceDefFree(priv->monConfig);
|
2011-06-06 08:34:33 +00:00
|
|
|
qemuDomainObjFreeJob(priv);
|
2010-12-16 15:23:41 +00:00
|
|
|
VIR_FREE(priv->vcpupids);
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
2011-10-04 07:11:35 +00:00
|
|
|
VIR_FREE(priv->origname);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2011-10-06 10:24:47 +00:00
|
|
|
virConsoleFree(priv->cons);
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
/* This should never be non-NULL if we get here, but just in case... */
|
|
|
|
if (priv->mon) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_ERROR(_("Unexpected QEMU monitor still active during domain deletion"));
|
2010-12-16 15:23:41 +00:00
|
|
|
qemuMonitorClose(priv->mon);
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
if (priv->agent) {
|
|
|
|
VIR_ERROR(_("Unexpected QEMU agent still active during domain deletion"));
|
|
|
|
qemuAgentClose(priv->agent);
|
|
|
|
}
|
2012-03-16 06:52:26 +00:00
|
|
|
VIR_FREE(priv->cleanupCallbacks);
|
2010-12-16 15:23:41 +00:00
|
|
|
VIR_FREE(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int qemuDomainObjPrivateXMLFormat(virBufferPtr buf, void *data)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = data;
|
|
|
|
const char *monitorpath;
|
2012-04-06 17:42:34 +00:00
|
|
|
enum qemuDomainJob job;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
|
|
|
/* priv->monitor_chr is set only for qemu */
|
|
|
|
if (priv->monConfig) {
|
2011-01-07 23:36:25 +00:00
|
|
|
switch (priv->monConfig->type) {
|
2010-12-16 15:23:41 +00:00
|
|
|
case VIR_DOMAIN_CHR_TYPE_UNIX:
|
2011-01-07 23:36:25 +00:00
|
|
|
monitorpath = priv->monConfig->data.nix.path;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_PTY:
|
2011-01-07 23:36:25 +00:00
|
|
|
monitorpath = priv->monConfig->data.file.path;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
virBufferEscapeString(buf, " <monitor path='%s'", monitorpath);
|
|
|
|
if (priv->monJSON)
|
|
|
|
virBufferAddLit(buf, " json='1'");
|
2011-04-30 16:34:49 +00:00
|
|
|
virBufferAsprintf(buf, " type='%s'/>\n",
|
2011-01-07 23:36:25 +00:00
|
|
|
virDomainChrTypeToString(priv->monConfig->type));
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (priv->nvcpupids) {
|
|
|
|
int i;
|
|
|
|
virBufferAddLit(buf, " <vcpus>\n");
|
|
|
|
for (i = 0 ; i < priv->nvcpupids ; i++) {
|
2011-04-30 16:34:49 +00:00
|
|
|
virBufferAsprintf(buf, " <vcpu pid='%d'/>\n", priv->vcpupids[i]);
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
virBufferAddLit(buf, " </vcpus>\n");
|
|
|
|
}
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
if (priv->caps) {
|
2011-05-04 11:55:38 +00:00
|
|
|
int i;
|
|
|
|
virBufferAddLit(buf, " <qemuCaps>\n");
|
|
|
|
for (i = 0 ; i < QEMU_CAPS_LAST ; i++) {
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, i)) {
|
2011-04-30 16:34:49 +00:00
|
|
|
virBufferAsprintf(buf, " <flag name='%s'/>\n",
|
2011-05-04 11:55:38 +00:00
|
|
|
qemuCapsTypeToString(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
virBufferAddLit(buf, " </qemuCaps>\n");
|
|
|
|
}
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
if (priv->lockState)
|
|
|
|
virBufferAsprintf(buf, " <lockstate>%s</lockstate>\n", priv->lockState);
|
|
|
|
|
2012-04-06 17:42:34 +00:00
|
|
|
job = priv->job.active;
|
|
|
|
if (!qemuDomainTrackJob(job))
|
|
|
|
priv->job.active = QEMU_JOB_NONE;
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (priv->job.active || priv->job.asyncJob) {
|
2011-06-06 08:30:54 +00:00
|
|
|
virBufferAsprintf(buf, " <job type='%s' async='%s'",
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainJobTypeToString(priv->job.active),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
2011-06-06 08:30:54 +00:00
|
|
|
if (priv->job.phase) {
|
|
|
|
virBufferAsprintf(buf, " phase='%s'",
|
|
|
|
qemuDomainAsyncJobPhaseToString(
|
|
|
|
priv->job.asyncJob, priv->job.phase));
|
|
|
|
}
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
2011-06-06 08:28:38 +00:00
|
|
|
}
|
2012-04-06 17:42:34 +00:00
|
|
|
priv->job.active = job;
|
2011-06-06 08:28:38 +00:00
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
if (priv->fakeReboot)
|
|
|
|
virBufferAsprintf(buf, " <fakereboot/>\n");
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, void *data)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = data;
|
|
|
|
char *monitorpath;
|
|
|
|
char *tmp;
|
|
|
|
int n, i;
|
|
|
|
xmlNodePtr *nodes = NULL;
|
2012-08-20 16:44:14 +00:00
|
|
|
qemuCapsPtr caps = NULL;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(priv->monConfig) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(monitorpath =
|
|
|
|
virXPathString("string(./monitor[1]/@path)", ctxt))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("no monitor path"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = virXPathString("string(./monitor[1]/@type)", ctxt);
|
|
|
|
if (tmp)
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->type = virDomainChrTypeFromString(tmp);
|
2010-12-16 15:23:41 +00:00
|
|
|
else
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->type = VIR_DOMAIN_CHR_TYPE_PTY;
|
2010-12-16 15:23:41 +00:00
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
|
|
|
if (virXPathBoolean("count(./monitor[@json = '1']) > 0", ctxt)) {
|
|
|
|
priv->monJSON = 1;
|
|
|
|
} else {
|
|
|
|
priv->monJSON = 0;
|
|
|
|
}
|
|
|
|
|
2011-01-07 23:36:25 +00:00
|
|
|
switch (priv->monConfig->type) {
|
2010-12-16 15:23:41 +00:00
|
|
|
case VIR_DOMAIN_CHR_TYPE_PTY:
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->data.file.path = monitorpath;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_UNIX:
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->data.nix.path = monitorpath;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
VIR_FREE(monitorpath);
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unsupported monitor type '%s'"),
|
|
|
|
virDomainChrTypeToString(priv->monConfig->type));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = virXPathNodeSet("./vcpus/vcpu", ctxt, &nodes);
|
|
|
|
if (n < 0)
|
|
|
|
goto error;
|
|
|
|
if (n) {
|
|
|
|
priv->nvcpupids = n;
|
|
|
|
if (VIR_REALLOC_N(priv->vcpupids, priv->nvcpupids) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < n ; i++) {
|
|
|
|
char *pidstr = virXMLPropString(nodes[i], "pid");
|
|
|
|
if (!pidstr)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (virStrToLong_i(pidstr, NULL, 10, &(priv->vcpupids[i])) < 0) {
|
|
|
|
VIR_FREE(pidstr);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(pidstr);
|
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
}
|
|
|
|
|
2011-05-04 11:55:38 +00:00
|
|
|
if ((n = virXPathNodeSet("./qemuCaps/flag", ctxt, &nodes)) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("failed to parse qemu capabilities flags"));
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (n > 0) {
|
2012-08-20 16:44:14 +00:00
|
|
|
if (!(caps = qemuCapsNew()))
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
for (i = 0 ; i < n ; i++) {
|
|
|
|
char *str = virXMLPropString(nodes[i], "name");
|
|
|
|
if (str) {
|
|
|
|
int flag = qemuCapsTypeFromString(str);
|
|
|
|
if (flag < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown qemu capabilities flag %s"), str);
|
2011-06-03 14:33:25 +00:00
|
|
|
VIR_FREE(str);
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2011-06-03 14:33:25 +00:00
|
|
|
VIR_FREE(str);
|
2012-08-20 16:44:14 +00:00
|
|
|
qemuCapsSet(caps, flag);
|
2011-05-04 11:55:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
priv->caps = caps;
|
2011-05-04 11:55:38 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
priv->lockState = virXPathString("string(./lockstate)", ctxt);
|
2011-05-04 11:55:38 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if ((tmp = virXPathString("string(./job[1]/@type)", ctxt))) {
|
|
|
|
int type;
|
|
|
|
|
|
|
|
if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown job type %s"), tmp);
|
2011-06-06 08:28:38 +00:00
|
|
|
VIR_FREE(tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
priv->job.active = type;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((tmp = virXPathString("string(./job[1]/@async)", ctxt))) {
|
|
|
|
int async;
|
|
|
|
|
|
|
|
if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown async job type %s"), tmp);
|
2011-06-06 08:28:38 +00:00
|
|
|
VIR_FREE(tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
priv->job.asyncJob = async;
|
2011-06-06 08:30:54 +00:00
|
|
|
|
|
|
|
if ((tmp = virXPathString("string(./job[1]/@phase)", ctxt))) {
|
|
|
|
priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
|
|
|
|
if (priv->job.phase < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown job phase %s"), tmp);
|
2011-06-06 08:30:54 +00:00
|
|
|
VIR_FREE(tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
}
|
2011-06-06 08:28:38 +00:00
|
|
|
}
|
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1;
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2011-01-07 23:36:25 +00:00
|
|
|
virDomainChrSourceDefFree(priv->monConfig);
|
2010-12-16 15:23:41 +00:00
|
|
|
priv->monConfig = NULL;
|
|
|
|
VIR_FREE(nodes);
|
2012-08-20 16:44:14 +00:00
|
|
|
virObjectUnref(caps);
|
2010-12-16 15:23:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainDefNamespaceFree(void *nsdata)
|
|
|
|
{
|
|
|
|
qemuDomainCmdlineDefPtr cmd = nsdata;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!cmd)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < cmd->num_args; i++)
|
|
|
|
VIR_FREE(cmd->args[i]);
|
|
|
|
for (i = 0; i < cmd->num_env; i++) {
|
|
|
|
VIR_FREE(cmd->env_name[i]);
|
|
|
|
VIR_FREE(cmd->env_value[i]);
|
|
|
|
}
|
|
|
|
VIR_FREE(cmd->args);
|
|
|
|
VIR_FREE(cmd->env_name);
|
|
|
|
VIR_FREE(cmd->env_value);
|
|
|
|
VIR_FREE(cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-10-18 16:22:49 +00:00
|
|
|
qemuDomainDefNamespaceParse(xmlDocPtr xml ATTRIBUTE_UNUSED,
|
|
|
|
xmlNodePtr root ATTRIBUTE_UNUSED,
|
2010-12-16 15:23:41 +00:00
|
|
|
xmlXPathContextPtr ctxt,
|
|
|
|
void **data)
|
|
|
|
{
|
|
|
|
qemuDomainCmdlineDefPtr cmd = NULL;
|
2011-10-18 16:22:49 +00:00
|
|
|
bool uses_qemu_ns = false;
|
2010-12-16 15:23:41 +00:00
|
|
|
xmlNodePtr *nodes = NULL;
|
|
|
|
int n, i;
|
|
|
|
|
2011-10-18 16:22:49 +00:00
|
|
|
if (xmlXPathRegisterNs(ctxt, BAD_CAST "qemu", BAD_CAST QEMU_NAMESPACE_HREF) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Failed to register xml namespace '%s'"),
|
|
|
|
QEMU_NAMESPACE_HREF);
|
2010-12-16 15:23:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_ALLOC(cmd) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* first handle the extra command-line arguments */
|
|
|
|
n = virXPathNodeSet("./qemu:commandline/qemu:arg", ctxt, &nodes);
|
|
|
|
if (n < 0)
|
|
|
|
goto error;
|
2011-10-18 16:22:49 +00:00
|
|
|
uses_qemu_ns |= n > 0;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
|
|
|
if (n && VIR_ALLOC_N(cmd->args, n) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
cmd->args[cmd->num_args] = virXMLPropString(nodes[i], "value");
|
|
|
|
if (cmd->args[cmd->num_args] == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("No qemu command-line argument specified"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
cmd->num_args++;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
|
|
|
/* now handle the extra environment variables */
|
|
|
|
n = virXPathNodeSet("./qemu:commandline/qemu:env", ctxt, &nodes);
|
|
|
|
if (n < 0)
|
|
|
|
goto error;
|
2011-10-18 16:22:49 +00:00
|
|
|
uses_qemu_ns |= n > 0;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
|
|
|
if (n && VIR_ALLOC_N(cmd->env_name, n) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (n && VIR_ALLOC_N(cmd->env_value, n) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
char *tmp;
|
|
|
|
|
|
|
|
tmp = virXMLPropString(nodes[i], "name");
|
|
|
|
if (tmp == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("No qemu environment name specified"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (tmp[0] == '\0') {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Empty qemu environment name specified"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (!c_isalpha(tmp[0]) && tmp[0] != '_') {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Invalid environment name, it must begin with a letter or underscore"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (strspn(tmp, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_") != strlen(tmp)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Invalid environment name, it must contain only alphanumerics and underscore"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd->env_name[cmd->num_env] = tmp;
|
|
|
|
|
|
|
|
cmd->env_value[cmd->num_env] = virXMLPropString(nodes[i], "value");
|
|
|
|
/* a NULL value for command is allowed, since it might be empty */
|
|
|
|
cmd->num_env++;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2011-10-18 16:22:49 +00:00
|
|
|
if (uses_qemu_ns)
|
|
|
|
*data = cmd;
|
|
|
|
else
|
|
|
|
VIR_FREE(cmd);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
qemuDomainDefNamespaceFree(cmd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDefNamespaceFormatXML(virBufferPtr buf,
|
|
|
|
void *nsdata)
|
|
|
|
{
|
|
|
|
qemuDomainCmdlineDefPtr cmd = nsdata;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (!cmd->num_args && !cmd->num_env)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
virBufferAddLit(buf, " <qemu:commandline>\n");
|
|
|
|
for (i = 0; i < cmd->num_args; i++)
|
|
|
|
virBufferEscapeString(buf, " <qemu:arg value='%s'/>\n",
|
|
|
|
cmd->args[i]);
|
|
|
|
for (i = 0; i < cmd->num_env; i++) {
|
2011-04-30 16:34:49 +00:00
|
|
|
virBufferAsprintf(buf, " <qemu:env name='%s'", cmd->env_name[i]);
|
2010-12-16 15:23:41 +00:00
|
|
|
if (cmd->env_value[i])
|
|
|
|
virBufferEscapeString(buf, " value='%s'", cmd->env_value[i]);
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
|
|
|
}
|
|
|
|
virBufferAddLit(buf, " </qemu:commandline>\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
qemuDomainDefNamespaceHref(void)
|
|
|
|
{
|
|
|
|
return "xmlns:qemu='" QEMU_NAMESPACE_HREF "'";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void qemuDomainSetPrivateDataHooks(virCapsPtr caps)
|
|
|
|
{
|
|
|
|
/* Domain XML parser hooks */
|
|
|
|
caps->privateDataAllocFunc = qemuDomainObjPrivateAlloc;
|
|
|
|
caps->privateDataFreeFunc = qemuDomainObjPrivateFree;
|
|
|
|
caps->privateDataXMLFormat = qemuDomainObjPrivateXMLFormat;
|
|
|
|
caps->privateDataXMLParse = qemuDomainObjPrivateXMLParse;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void qemuDomainSetNamespaceHooks(virCapsPtr caps)
|
|
|
|
{
|
|
|
|
/* Domain Namespace XML parser hooks */
|
|
|
|
caps->ns.parse = qemuDomainDefNamespaceParse;
|
|
|
|
caps->ns.free = qemuDomainDefNamespaceFree;
|
|
|
|
caps->ns.format = qemuDomainDefNamespaceFormatXML;
|
|
|
|
caps->ns.href = qemuDomainDefNamespaceHref;
|
|
|
|
}
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
static void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjSaveJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
|
2011-06-06 08:34:33 +00:00
|
|
|
{
|
2011-06-06 08:28:38 +00:00
|
|
|
if (!virDomainObjIsActive(obj)) {
|
|
|
|
/* don't write the state file yet, it will be written once the domain
|
|
|
|
* gets activated */
|
|
|
|
return;
|
|
|
|
}
|
2011-06-06 08:34:33 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, obj) < 0)
|
|
|
|
VIR_WARN("Failed to save status on vm %s", obj->def->name);
|
2011-06-06 08:34:33 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:30:54 +00:00
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
|
2011-06-06 08:30:54 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
int phase)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2012-04-06 16:55:46 +00:00
|
|
|
int me = virThreadSelfID();
|
2011-06-06 08:30:54 +00:00
|
|
|
|
|
|
|
if (!priv->job.asyncJob)
|
|
|
|
return;
|
|
|
|
|
2012-04-06 16:55:46 +00:00
|
|
|
VIR_DEBUG("Setting '%s' phase to '%s'",
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
|
|
|
|
|
|
|
|
if (priv->job.asyncOwner && me != priv->job.asyncOwner) {
|
|
|
|
VIR_WARN("'%s' async job is owned by thread %d",
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
priv->job.asyncOwner);
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:30:54 +00:00
|
|
|
priv->job.phase = phase;
|
2012-04-06 16:55:46 +00:00
|
|
|
priv->job.asyncOwner = me;
|
2011-06-06 08:30:54 +00:00
|
|
|
qemuDomainObjSaveJob(driver, obj);
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
void
|
2011-06-30 09:23:50 +00:00
|
|
|
qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
|
|
|
|
unsigned long long allowedJobs)
|
2011-06-06 08:34:33 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (!priv->job.asyncJob)
|
|
|
|
return;
|
|
|
|
|
|
|
|
priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
|
2011-06-30 09:23:50 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
|
|
|
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
|
|
|
|
qemuDomainObjResetJob(priv);
|
|
|
|
qemuDomainObjResetAsyncJob(priv);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjSaveJob(driver, obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
|
|
|
|
2012-04-06 16:55:46 +00:00
|
|
|
void
|
|
|
|
qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("Releasing ownership of '%s' async job",
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
|
|
|
|
|
|
|
if (priv->job.asyncOwner != virThreadSelfID()) {
|
|
|
|
VIR_WARN("'%s' async job is owned by thread %d",
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
priv->job.asyncOwner);
|
|
|
|
}
|
|
|
|
priv->job.asyncOwner = 0;
|
|
|
|
}
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
static bool
|
2011-09-29 13:14:13 +00:00
|
|
|
qemuDomainNestedJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob job)
|
2011-06-30 09:23:50 +00:00
|
|
|
{
|
|
|
|
return !priv->job.asyncJob || (priv->job.mask & JOB_MASK(job)) != 0;
|
2011-06-06 08:34:33 +00:00
|
|
|
}
|
|
|
|
|
2011-09-29 13:14:13 +00:00
|
|
|
bool
|
|
|
|
qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob job)
|
|
|
|
{
|
|
|
|
return !priv->job.active && qemuDomainNestedJobAllowed(priv, job);
|
|
|
|
}
|
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
/* Give up waiting for mutex after 30 seconds */
|
|
|
|
#define QEMU_JOB_WAIT_TIME (1000ull * 30)
|
|
|
|
|
2011-06-30 09:21:34 +00:00
|
|
|
/*
|
|
|
|
* obj must be locked before calling; driver_locked says if qemu_driver is
|
|
|
|
* locked or not.
|
|
|
|
*/
|
2011-06-06 08:28:38 +00:00
|
|
|
static int ATTRIBUTE_NONNULL(1)
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
|
2011-06-30 09:21:34 +00:00
|
|
|
bool driver_locked,
|
2011-06-30 09:23:50 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
enum qemuDomainJob job,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2011-06-01 10:35:18 +00:00
|
|
|
unsigned long long now;
|
2010-12-16 16:12:02 +00:00
|
|
|
unsigned long long then;
|
2011-06-30 09:23:50 +00:00
|
|
|
bool nested = job == QEMU_JOB_ASYNC_NESTED;
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-08-12 13:29:37 +00:00
|
|
|
priv->jobs_queued++;
|
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
if (virTimeMillisNow(&now) < 0)
|
2010-12-16 16:12:02 +00:00
|
|
|
return -1;
|
2011-06-01 10:35:18 +00:00
|
|
|
then = now + QEMU_JOB_WAIT_TIME;
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(obj);
|
2011-06-30 09:21:34 +00:00
|
|
|
if (driver_locked)
|
|
|
|
qemuDriverUnlock(driver);
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
retry:
|
2011-08-12 13:29:37 +00:00
|
|
|
if (driver->max_queued &&
|
|
|
|
priv->jobs_queued > driver->max_queued) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2011-09-29 13:14:13 +00:00
|
|
|
while (!nested && !qemuDomainNestedJobAllowed(priv, job)) {
|
2011-06-30 09:23:50 +00:00
|
|
|
if (virCondWaitUntil(&priv->job.asyncCond, &obj->lock, then) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
while (priv->job.active) {
|
2011-06-30 09:23:50 +00:00
|
|
|
if (virCondWaitUntil(&priv->job.cond, &obj->lock, then) < 0)
|
|
|
|
goto error;
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
2011-06-30 09:23:50 +00:00
|
|
|
|
|
|
|
/* No job is active but a new async job could have been started while obj
|
|
|
|
* was unlocked, so we need to recheck it. */
|
2011-09-29 13:14:13 +00:00
|
|
|
if (!nested && !qemuDomainNestedJobAllowed(priv, job))
|
2011-06-30 09:23:50 +00:00
|
|
|
goto retry;
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
qemuDomainObjResetJob(priv);
|
2011-06-30 09:23:50 +00:00
|
|
|
|
|
|
|
if (job != QEMU_JOB_ASYNC) {
|
2011-10-13 10:32:38 +00:00
|
|
|
VIR_DEBUG("Starting job: %s (async=%s)",
|
|
|
|
qemuDomainJobTypeToString(job),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
2011-06-30 09:23:50 +00:00
|
|
|
priv->job.active = job;
|
2012-04-06 16:55:46 +00:00
|
|
|
priv->job.owner = virThreadSelfID();
|
2011-06-30 09:23:50 +00:00
|
|
|
} else {
|
2011-10-13 10:32:38 +00:00
|
|
|
VIR_DEBUG("Starting async job: %s",
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob));
|
2011-06-30 09:23:50 +00:00
|
|
|
qemuDomainObjResetAsyncJob(priv);
|
|
|
|
priv->job.asyncJob = asyncJob;
|
2012-04-06 16:55:46 +00:00
|
|
|
priv->job.asyncOwner = virThreadSelfID();
|
2011-06-30 09:23:50 +00:00
|
|
|
priv->job.start = now;
|
|
|
|
}
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-06-30 09:21:34 +00:00
|
|
|
if (driver_locked) {
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(obj);
|
|
|
|
}
|
|
|
|
|
2012-04-06 17:42:34 +00:00
|
|
|
if (qemuDomainTrackJob(job))
|
|
|
|
qemuDomainObjSaveJob(driver, obj);
|
2011-06-06 08:28:38 +00:00
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
return 0;
|
2011-06-30 09:23:50 +00:00
|
|
|
|
|
|
|
error:
|
2012-04-06 16:55:46 +00:00
|
|
|
VIR_WARN("Cannot start job (%s, %s) for domain %s;"
|
|
|
|
" current job is (%s, %s) owned by (%d, %d)",
|
|
|
|
qemuDomainJobTypeToString(job),
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob),
|
|
|
|
obj->def->name,
|
|
|
|
qemuDomainJobTypeToString(priv->job.active),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
priv->job.owner, priv->job.asyncOwner);
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (errno == ETIMEDOUT)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_TIMEOUT,
|
|
|
|
"%s", _("cannot acquire state change lock"));
|
2011-08-12 13:29:37 +00:00
|
|
|
else if (driver->max_queued &&
|
|
|
|
priv->jobs_queued > driver->max_queued)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("cannot acquire state change lock "
|
|
|
|
"due to max_queued limit"));
|
2011-06-30 09:23:50 +00:00
|
|
|
else
|
|
|
|
virReportSystemError(errno,
|
|
|
|
"%s", _("cannot acquire job mutex"));
|
2011-08-12 13:29:37 +00:00
|
|
|
priv->jobs_queued--;
|
2011-06-30 09:23:50 +00:00
|
|
|
if (driver_locked) {
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(obj);
|
|
|
|
}
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
return -1;
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj must be locked before calling, driver must NOT be locked
|
2011-06-30 09:21:34 +00:00
|
|
|
*
|
|
|
|
* This must be called by anything that will change the VM state
|
|
|
|
* in any way, or anything that will use the QEMU monitor.
|
|
|
|
*
|
|
|
|
* Upon successful return, the object will have its ref count increased,
|
|
|
|
* successful calls must be followed by EndJob eventually
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
|
2011-06-06 08:28:38 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
enum qemuDomainJob job)
|
2011-06-30 09:23:50 +00:00
|
|
|
{
|
2011-06-06 08:28:38 +00:00
|
|
|
return qemuDomainObjBeginJobInternal(driver, false, obj, job,
|
2011-06-30 09:23:50 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE);
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
|
2011-06-06 08:28:38 +00:00
|
|
|
virDomainObjPtr obj,
|
2011-06-30 09:23:50 +00:00
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2011-06-30 09:21:34 +00:00
|
|
|
{
|
2011-06-06 08:28:38 +00:00
|
|
|
return qemuDomainObjBeginJobInternal(driver, false, obj, QEMU_JOB_ASYNC,
|
2011-06-30 09:23:50 +00:00
|
|
|
asyncJob);
|
2011-06-30 09:21:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj and driver must be locked before calling.
|
2010-12-16 16:12:02 +00:00
|
|
|
*
|
|
|
|
* This must be called by anything that will change the VM state
|
|
|
|
* in any way, or anything that will use the QEMU monitor.
|
2011-06-30 09:21:34 +00:00
|
|
|
*
|
|
|
|
* Upon successful return, the object will have its ref count increased,
|
|
|
|
* successful calls must be followed by EndJob eventually
|
2010-12-16 16:12:02 +00:00
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuDomainObjBeginJobWithDriver(virQEMUDriverPtr driver,
|
2011-06-30 09:23:50 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
enum qemuDomainJob job)
|
|
|
|
{
|
|
|
|
if (job <= QEMU_JOB_NONE || job >= QEMU_JOB_ASYNC) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Attempt to start invalid job"));
|
2011-06-30 09:23:50 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return qemuDomainObjBeginJobInternal(driver, true, obj, job,
|
|
|
|
QEMU_ASYNC_JOB_NONE);
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuDomainObjBeginAsyncJobWithDriver(virQEMUDriverPtr driver,
|
2011-06-30 09:23:50 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
return qemuDomainObjBeginJobInternal(driver, true, obj, QEMU_JOB_ASYNC,
|
|
|
|
asyncJob);
|
|
|
|
}
|
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj must be locked before calling, driver does not matter
|
2010-12-16 16:12:02 +00:00
|
|
|
*
|
|
|
|
* To be called after completing the work associated with the
|
|
|
|
* earlier qemuDomainBeginJob() call
|
|
|
|
*
|
2012-07-11 13:35:46 +00:00
|
|
|
* Returns true if @obj was still referenced, false if it was
|
|
|
|
* disposed of.
|
2010-12-16 16:12:02 +00:00
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
bool qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2012-04-06 17:42:34 +00:00
|
|
|
enum qemuDomainJob job = priv->job.active;
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-08-12 13:29:37 +00:00
|
|
|
priv->jobs_queued--;
|
|
|
|
|
2011-10-13 10:32:38 +00:00
|
|
|
VIR_DEBUG("Stopping job: %s (async=%s)",
|
2012-04-06 17:42:34 +00:00
|
|
|
qemuDomainJobTypeToString(job),
|
2011-10-13 10:32:38 +00:00
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
qemuDomainObjResetJob(priv);
|
2012-04-06 17:42:34 +00:00
|
|
|
if (qemuDomainTrackJob(job))
|
|
|
|
qemuDomainObjSaveJob(driver, obj);
|
2011-06-06 08:34:33 +00:00
|
|
|
virCondSignal(&priv->job.cond);
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
return virObjectUnref(obj);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
bool
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
|
2011-06-30 09:23:50 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2011-07-03 21:55:47 +00:00
|
|
|
|
2011-08-12 13:29:37 +00:00
|
|
|
priv->jobs_queued--;
|
|
|
|
|
2011-10-13 10:32:38 +00:00
|
|
|
VIR_DEBUG("Stopping async job: %s",
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
qemuDomainObjResetAsyncJob(priv);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjSaveJob(driver, obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
virCondBroadcast(&priv->job.asyncCond);
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
return virObjectUnref(obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
|
|
|
|
2012-11-08 13:49:55 +00:00
|
|
|
void
|
|
|
|
qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("Requesting abort of async job: %s",
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
|
|
|
|
|
|
|
priv->job.asyncAbort = true;
|
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver,
|
2011-06-06 08:28:38 +00:00
|
|
|
bool driver_locked,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
|
|
|
|
if (asyncJob != priv->job.asyncJob) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unexpected async job %d"), asyncJob);
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2012-04-10 14:39:33 +00:00
|
|
|
if (priv->job.asyncOwner != virThreadSelfID())
|
|
|
|
VIR_WARN("This thread doesn't seem to be the async job owner: %d",
|
|
|
|
priv->job.asyncOwner);
|
2011-07-27 23:13:22 +00:00
|
|
|
if (qemuDomainObjBeginJobInternal(driver, driver_locked, obj,
|
|
|
|
QEMU_JOB_ASYNC_NESTED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0)
|
2011-06-30 09:23:50 +00:00
|
|
|
return -1;
|
|
|
|
if (!virDomainObjIsActive(obj)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("domain is no longer running"));
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
/* Still referenced by the containing async job. */
|
|
|
|
ignore_value(qemuDomainObjEndJob(driver, obj));
|
2011-06-30 09:23:50 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2012-04-10 14:39:33 +00:00
|
|
|
} else if (priv->job.asyncOwner == virThreadSelfID()) {
|
|
|
|
VIR_WARN("This thread seems to be the async job owner; entering"
|
|
|
|
" monitor without asking for a nested job is dangerous");
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
qemuMonitorLock(priv->mon);
|
2012-07-11 13:35:47 +00:00
|
|
|
virObjectRef(priv->mon);
|
2011-11-29 12:33:23 +00:00
|
|
|
ignore_value(virTimeMillisNow(&priv->monStart));
|
2010-12-16 16:12:02 +00:00
|
|
|
virDomainObjUnlock(obj);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (driver_locked)
|
2011-07-03 21:55:47 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2011-06-30 09:23:50 +00:00
|
|
|
|
|
|
|
return 0;
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
static void ATTRIBUTE_NONNULL(1)
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver,
|
2011-06-06 08:28:38 +00:00
|
|
|
bool driver_locked,
|
2011-07-03 21:55:47 +00:00
|
|
|
virDomainObjPtr obj)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2012-07-11 13:35:47 +00:00
|
|
|
bool hasRefs;
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
hasRefs = virObjectUnref(priv->mon);
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
if (hasRefs)
|
2010-12-16 16:12:02 +00:00
|
|
|
qemuMonitorUnlock(priv->mon);
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (driver_locked)
|
2011-07-03 21:55:47 +00:00
|
|
|
qemuDriverLock(driver);
|
2010-12-16 16:12:02 +00:00
|
|
|
virDomainObjLock(obj);
|
|
|
|
|
2011-05-31 16:34:20 +00:00
|
|
|
priv->monStart = 0;
|
2012-07-11 13:35:47 +00:00
|
|
|
if (!hasRefs)
|
2010-12-16 16:12:02 +00:00
|
|
|
priv->mon = NULL;
|
2011-06-30 09:23:50 +00:00
|
|
|
|
2011-07-27 23:13:22 +00:00
|
|
|
if (priv->job.active == QEMU_JOB_ASYNC_NESTED) {
|
|
|
|
qemuDomainObjResetJob(priv);
|
|
|
|
qemuDomainObjSaveJob(driver, obj);
|
|
|
|
virCondSignal(&priv->job.cond);
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(obj);
|
2011-07-27 23:13:22 +00:00
|
|
|
}
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2011-07-03 21:55:47 +00:00
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj must be locked before calling, driver must be unlocked
|
2011-07-03 21:55:47 +00:00
|
|
|
*
|
|
|
|
* To be called immediately before any QEMU monitor API call
|
2011-06-30 09:23:50 +00:00
|
|
|
* Must have already either called qemuDomainObjBeginJob() and checked
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
* that the VM is still active; may not be used for nested async jobs.
|
2011-07-03 21:55:47 +00:00
|
|
|
*
|
|
|
|
* To be followed with qemuDomainObjExitMonitor() once complete
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj)
|
2011-07-03 21:55:47 +00:00
|
|
|
{
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
ignore_value(qemuDomainObjEnterMonitorInternal(driver, false, obj,
|
|
|
|
QEMU_ASYNC_JOB_NONE));
|
2011-07-03 21:55:47 +00:00
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
/* obj must NOT be locked before calling, driver must be unlocked
|
2011-07-03 21:55:47 +00:00
|
|
|
*
|
|
|
|
* Should be paired with an earlier qemuDomainObjEnterMonitor() call
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjExitMonitor(virQEMUDriverPtr driver,
|
2011-06-06 08:28:38 +00:00
|
|
|
virDomainObjPtr obj)
|
2011-07-03 21:55:47 +00:00
|
|
|
{
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitorInternal(driver, false, obj);
|
2011-07-03 21:55:47 +00:00
|
|
|
}
|
2010-12-16 16:12:02 +00:00
|
|
|
|
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj must be locked before calling, driver must be locked
|
2010-12-16 16:12:02 +00:00
|
|
|
*
|
|
|
|
* To be called immediately before any QEMU monitor API call
|
2011-06-30 09:23:50 +00:00
|
|
|
* Must have already either called qemuDomainObjBeginJobWithDriver() and
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
* checked that the VM is still active; may not be used for nested async jobs.
|
2010-12-16 16:12:02 +00:00
|
|
|
*
|
|
|
|
* To be followed with qemuDomainObjExitMonitorWithDriver() once complete
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjEnterMonitorWithDriver(virQEMUDriverPtr driver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
ignore_value(qemuDomainObjEnterMonitorInternal(driver, true, obj,
|
|
|
|
QEMU_ASYNC_JOB_NONE));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj and driver must be locked before calling
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
*
|
|
|
|
* To be called immediately before any QEMU monitor API call.
|
|
|
|
* Must have already either called qemuDomainObjBeginJobWithDriver()
|
|
|
|
* and checked that the VM is still active, with asyncJob of
|
|
|
|
* QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
|
|
|
|
* with the same asyncJob.
|
|
|
|
*
|
|
|
|
* Returns 0 if job was started, in which case this must be followed with
|
|
|
|
* qemuDomainObjExitMonitorWithDriver(); or -1 if the job could not be
|
|
|
|
* started (probably because the vm exited in the meantime).
|
|
|
|
*/
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
return qemuDomainObjEnterMonitorInternal(driver, true, obj, asyncJob);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
/* obj must NOT be locked before calling, driver must be unlocked,
|
2010-12-16 16:12:02 +00:00
|
|
|
* and will be locked after returning
|
|
|
|
*
|
|
|
|
* Should be paired with an earlier qemuDomainObjEnterMonitorWithDriver() call
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjExitMonitorWithDriver(virQEMUDriverPtr driver,
|
2010-12-16 16:12:02 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitorInternal(driver, true, obj);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
|
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjEnterAgentInternal(virQEMUDriverPtr driver,
|
2011-10-05 17:31:54 +00:00
|
|
|
bool driver_locked,
|
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
|
|
|
qemuAgentLock(priv->agent);
|
2012-07-11 13:35:47 +00:00
|
|
|
virObjectRef(priv->agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
ignore_value(virTimeMillisNow(&priv->agentStart));
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
if (driver_locked)
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ATTRIBUTE_NONNULL(1)
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjExitAgentInternal(virQEMUDriverPtr driver,
|
2011-10-05 17:31:54 +00:00
|
|
|
bool driver_locked,
|
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2012-07-11 13:35:47 +00:00
|
|
|
bool hasRefs;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
hasRefs = virObjectUnref(priv->agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2012-07-11 13:35:47 +00:00
|
|
|
if (hasRefs)
|
2011-10-05 17:31:54 +00:00
|
|
|
qemuAgentUnlock(priv->agent);
|
|
|
|
|
|
|
|
if (driver_locked)
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(obj);
|
|
|
|
|
|
|
|
priv->agentStart = 0;
|
2012-07-11 13:35:47 +00:00
|
|
|
if (!hasRefs)
|
2011-10-05 17:31:54 +00:00
|
|
|
priv->agent = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj must be locked before calling, driver must be unlocked
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
2012-11-28 12:32:40 +00:00
|
|
|
* To be called immediately before any QEMU agent API call.
|
|
|
|
* Must have already called qemuDomainObjBeginJob() and checked
|
|
|
|
* that the VM is still active.
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
|
|
|
* To be followed with qemuDomainObjExitAgent() once complete
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjEnterAgent(virQEMUDriverPtr driver,
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
ignore_value(qemuDomainObjEnterAgentInternal(driver, false, obj));
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
/* obj must NOT be locked before calling, driver must be unlocked
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
|
|
|
* Should be paired with an earlier qemuDomainObjEnterAgent() call
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjExitAgent(virQEMUDriverPtr driver,
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjExitAgentInternal(driver, false, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-11-28 16:43:10 +00:00
|
|
|
* obj must be locked before calling, driver must be locked
|
2011-10-05 17:31:54 +00:00
|
|
|
*
|
2012-11-28 12:32:40 +00:00
|
|
|
* To be called immediately before any QEMU agent API call.
|
|
|
|
* Must have already called qemuDomainObjBeginJobWithDriver() and
|
2011-10-05 17:31:54 +00:00
|
|
|
* checked that the VM is still active; may not be used for nested async jobs.
|
|
|
|
*
|
|
|
|
* To be followed with qemuDomainObjExitAgentWithDriver() once complete
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjEnterAgentWithDriver(virQEMUDriverPtr driver,
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
ignore_value(qemuDomainObjEnterAgentInternal(driver, true, obj));
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
/* obj must NOT be locked before calling, driver must be unlocked,
|
2011-10-05 17:31:54 +00:00
|
|
|
* and will be locked after returning
|
|
|
|
*
|
|
|
|
* Should be paired with an earlier qemuDomainObjEnterAgentWithDriver() call
|
|
|
|
*/
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjExitAgentWithDriver(virQEMUDriverPtr driver,
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjExitAgentInternal(driver, true, obj);
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjEnterRemoteWithDriver(virQEMUDriverPtr driver,
|
2010-12-16 16:12:02 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(obj);
|
2010-12-16 16:12:02 +00:00
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjExitRemoteWithDriver(virQEMUDriverPtr driver,
|
2010-12-16 16:12:02 +00:00
|
|
|
virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(obj);
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(obj);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
|
2012-05-04 19:00:13 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainDefFormatBuf(virQEMUDriverPtr driver,
|
2012-05-04 19:00:13 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
unsigned int flags,
|
|
|
|
virBuffer *buf)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2012-05-04 19:00:13 +00:00
|
|
|
int ret = -1;
|
2011-01-31 10:47:03 +00:00
|
|
|
virCPUDefPtr cpu = NULL;
|
2011-12-19 14:41:16 +00:00
|
|
|
virCPUDefPtr def_cpu = def->cpu;
|
2012-05-04 19:23:17 +00:00
|
|
|
virDomainControllerDefPtr *controllers = NULL;
|
|
|
|
int ncontrollers = 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
/* Update guest CPU requirements according to host CPU */
|
2011-12-19 14:41:16 +00:00
|
|
|
if ((flags & VIR_DOMAIN_XML_UPDATE_CPU) &&
|
|
|
|
def_cpu &&
|
|
|
|
(def_cpu->mode != VIR_CPU_MODE_CUSTOM || def_cpu->model)) {
|
2012-10-16 19:11:29 +00:00
|
|
|
if (!driver->caps ||
|
|
|
|
!driver->caps->host.cpu ||
|
|
|
|
!driver->caps->host.cpu->model) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("cannot get host CPU capabilities"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-12-19 14:41:16 +00:00
|
|
|
if (!(cpu = virCPUDefCopy(def_cpu)) ||
|
|
|
|
cpuUpdate(cpu, driver->caps->host.cpu) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
def->cpu = cpu;
|
|
|
|
}
|
|
|
|
|
2012-10-08 09:58:05 +00:00
|
|
|
if ((flags & VIR_DOMAIN_XML_MIGRATABLE)) {
|
2012-05-04 19:23:17 +00:00
|
|
|
int i;
|
|
|
|
virDomainControllerDefPtr usb = NULL;
|
|
|
|
|
|
|
|
/* If only the default USB controller is present, we can remove it
|
|
|
|
* and make the XML compatible with older versions of libvirt which
|
|
|
|
* didn't support USB controllers in the XML but always added the
|
|
|
|
* default one to qemu anyway.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < def->ncontrollers; i++) {
|
|
|
|
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_USB) {
|
|
|
|
if (usb) {
|
|
|
|
usb = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
usb = def->controllers[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (usb && usb->idx == 0 && usb->model == -1) {
|
|
|
|
VIR_DEBUG("Removing default USB controller from domain '%s'"
|
|
|
|
" for migration compatibility", def->name);
|
|
|
|
controllers = def->controllers;
|
|
|
|
ncontrollers = def->ncontrollers;
|
|
|
|
if (VIR_ALLOC_N(def->controllers, ncontrollers - 1) < 0) {
|
|
|
|
controllers = NULL;
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
def->ncontrollers = 0;
|
|
|
|
for (i = 0; i < ncontrollers; i++) {
|
|
|
|
if (controllers[i] != usb)
|
|
|
|
def->controllers[def->ncontrollers++] = controllers[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-04 19:00:13 +00:00
|
|
|
ret = virDomainDefFormatInternal(def, flags, buf);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
def->cpu = def_cpu;
|
|
|
|
virCPUDefFree(cpu);
|
2012-05-04 19:23:17 +00:00
|
|
|
if (controllers) {
|
|
|
|
VIR_FREE(def->controllers);
|
|
|
|
def->controllers = controllers;
|
|
|
|
def->ncontrollers = ncontrollers;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
char *qemuDomainDefFormatXML(virQEMUDriverPtr driver,
|
2012-05-04 19:00:13 +00:00
|
|
|
virDomainDefPtr def,
|
2012-10-08 09:58:05 +00:00
|
|
|
unsigned int flags)
|
2012-05-04 19:00:13 +00:00
|
|
|
{
|
|
|
|
virBuffer buf = VIR_BUFFER_INITIALIZER;
|
|
|
|
|
2012-10-08 09:58:05 +00:00
|
|
|
if (qemuDomainDefFormatBuf(driver, def, flags, &buf) < 0) {
|
2012-05-04 19:00:13 +00:00
|
|
|
virBufferFreeAndReset(&buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virBufferError(&buf)) {
|
|
|
|
virReportOOMError();
|
|
|
|
virBufferFreeAndReset(&buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return virBufferContentAndReset(&buf);
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
char *qemuDomainFormatXML(virQEMUDriverPtr driver,
|
2011-05-27 10:30:26 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-10-08 09:58:05 +00:00
|
|
|
unsigned int flags)
|
2011-05-27 10:30:26 +00:00
|
|
|
{
|
|
|
|
virDomainDefPtr def;
|
|
|
|
|
|
|
|
if ((flags & VIR_DOMAIN_XML_INACTIVE) && vm->newDef)
|
|
|
|
def = vm->newDef;
|
|
|
|
else
|
|
|
|
def = vm->def;
|
|
|
|
|
2012-10-08 09:58:05 +00:00
|
|
|
return qemuDomainDefFormatXML(driver, def, flags);
|
2011-05-27 10:30:26 +00:00
|
|
|
}
|
|
|
|
|
2012-03-09 15:42:46 +00:00
|
|
|
char *
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainDefFormatLive(virQEMUDriverPtr driver,
|
2012-03-09 15:42:46 +00:00
|
|
|
virDomainDefPtr def,
|
2012-05-04 19:23:17 +00:00
|
|
|
bool inactive,
|
|
|
|
bool compatible)
|
2012-03-09 15:42:46 +00:00
|
|
|
{
|
|
|
|
unsigned int flags = QEMU_DOMAIN_FORMAT_LIVE_FLAGS;
|
|
|
|
|
|
|
|
if (inactive)
|
|
|
|
flags |= VIR_DOMAIN_XML_INACTIVE;
|
2012-10-08 09:58:05 +00:00
|
|
|
if (compatible)
|
|
|
|
flags |= VIR_DOMAIN_XML_MIGRATABLE;
|
2012-03-09 15:42:46 +00:00
|
|
|
|
2012-10-08 09:58:05 +00:00
|
|
|
return qemuDomainDefFormatXML(driver, def, flags);
|
2012-03-09 15:42:46 +00:00
|
|
|
}
|
|
|
|
|
2011-05-27 10:30:26 +00:00
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjTaint(virQEMUDriverPtr driver,
|
2011-05-04 10:59:20 +00:00
|
|
|
virDomainObjPtr obj,
|
2011-05-05 11:48:07 +00:00
|
|
|
enum virDomainTaintFlags taint,
|
|
|
|
int logFD)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
2011-05-05 11:48:07 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
|
|
|
|
2011-05-04 10:59:20 +00:00
|
|
|
if (virDomainObjTaint(obj, taint)) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(obj->def->uuid, uuidstr);
|
|
|
|
|
|
|
|
VIR_WARN("Domain id=%d name='%s' uuid=%s is tainted: %s",
|
|
|
|
obj->def->id,
|
|
|
|
obj->def->name,
|
|
|
|
uuidstr,
|
|
|
|
virDomainTaintTypeToString(taint));
|
2011-05-05 11:48:07 +00:00
|
|
|
|
|
|
|
/* We don't care about errors logging taint info, so
|
|
|
|
* preserve original error, and clear any error that
|
|
|
|
* is raised */
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
if (qemuDomainAppendLog(driver, obj, logFD,
|
|
|
|
"Domain id=%d is tainted: %s\n",
|
|
|
|
obj->def->id,
|
|
|
|
virDomainTaintTypeToString(taint)) < 0)
|
|
|
|
virResetLastError();
|
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjCheckTaint(virQEMUDriverPtr driver,
|
2011-05-05 11:48:07 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
int logFD)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2011-06-23 10:40:24 +00:00
|
|
|
if (driver->privileged &&
|
|
|
|
(!driver->clearEmulatorCapabilities ||
|
|
|
|
driver->user == 0 ||
|
|
|
|
driver->group == 0))
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logFD);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
|
|
|
if (obj->def->namespaceData) {
|
|
|
|
qemuDomainCmdlineDefPtr qemucmd = obj->def->namespaceData;
|
|
|
|
if (qemucmd->num_args || qemucmd->num_env)
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CUSTOM_ARGV, logFD);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
2011-08-18 10:56:56 +00:00
|
|
|
if (obj->def->cpu && obj->def->cpu->mode == VIR_CPU_MODE_HOST_PASSTHROUGH)
|
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOST_CPU, logFD);
|
|
|
|
|
2011-05-04 10:59:20 +00:00
|
|
|
for (i = 0 ; i < obj->def->ndisks ; i++)
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjCheckDiskTaint(driver, obj, obj->def->disks[i], logFD);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
|
|
|
for (i = 0 ; i < obj->def->nnets ; i++)
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjCheckNetTaint(driver, obj, obj->def->nets[i], logFD);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjCheckDiskTaint(virQEMUDriverPtr driver,
|
2011-05-04 10:59:20 +00:00
|
|
|
virDomainObjPtr obj,
|
2011-05-05 11:48:07 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
int logFD)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
2012-10-15 21:47:42 +00:00
|
|
|
if ((!disk->format || disk->format == VIR_STORAGE_FILE_AUTO) &&
|
2011-05-04 10:59:20 +00:00
|
|
|
driver->allowDiskFormatProbing)
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_DISK_PROBING, logFD);
|
2012-01-31 04:52:00 +00:00
|
|
|
|
2012-02-01 00:54:08 +00:00
|
|
|
if (disk->rawio == 1)
|
2012-01-31 04:52:00 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logFD);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjCheckNetTaint(virQEMUDriverPtr driver,
|
2011-05-04 10:59:20 +00:00
|
|
|
virDomainObjPtr obj,
|
2011-05-05 11:48:07 +00:00
|
|
|
virDomainNetDefPtr net,
|
|
|
|
int logFD)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
config: report error when script given for inappropriate interface type
This fixes https://bugzilla.redhat.com/show_bug.cgi?id=638633
Although scripts are not used by interfaces of type other than
"ethernet" in qemu, due to the fact that the parser stores the script
name in a union that is only valid when type is ethernet or bridge,
there is no way for anyone except the parser itself to catch the
problem of specifying an interface script for an inappropriate
interface type (by the time the parsed data gets back to the code that
called the parser, all evidence that a script was specified is
forgotten).
Since the parser itself should be agnostic to which type of interface
allows scripts (an example of why: a script specified for an interface
of type bridge is valid for xen domains, but not for qemu domains),
the solution here is to move the script out of the union(s) in the
DomainNetDef, always populate it when specified (regardless of
interface type), and let the driver decide whether or not it is
appropriate.
Currently the qemu, xen, libxml, and uml drivers recognize the script
parameter and do something with it (the uml driver only to report that
it isn't supported). Those drivers have been updated to log a
CONFIG_UNSUPPORTED error when a script is specified for an interface
type that's inappropriate for that particular hypervisor.
(NB: There was earlier discussion of solving this problem by adding a
VALIDATE flag to all libvirt APIs that accept XML, which would cause
the XML to be validated against the RNG files. One statement during
that discussion was that the RNG shouldn't contain hypervisor-specific
things, though, and a proper solution to this problem would require
that (again, because a script for an interface of type "bridge" is
accepted by xen, but not by qemu).
2012-01-06 17:59:47 +00:00
|
|
|
/* script is only useful for NET_TYPE_ETHERNET (qemu) and
|
|
|
|
* NET_TYPE_BRIDGE (xen), but could be (incorrectly) specified for
|
|
|
|
* any interface type. In any case, it's adding user sauce into
|
|
|
|
* the soup, so it should taint the domain.
|
|
|
|
*/
|
|
|
|
if (net->script != NULL)
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_SHELL_SCRIPTS, logFD);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
2011-05-05 11:38:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainOpenLogHelper(virQEMUDriverPtr driver,
|
2011-05-05 11:38:04 +00:00
|
|
|
virDomainObjPtr vm,
|
2011-07-06 22:42:06 +00:00
|
|
|
int oflags,
|
2011-05-05 11:38:04 +00:00
|
|
|
mode_t mode)
|
|
|
|
{
|
|
|
|
char *logfile;
|
|
|
|
int fd = -1;
|
2012-09-21 09:37:53 +00:00
|
|
|
bool trunc = false;
|
2011-05-05 11:38:04 +00:00
|
|
|
|
|
|
|
if (virAsprintf(&logfile, "%s/%s.log", driver->logDir, vm->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-09-21 09:37:53 +00:00
|
|
|
/* To make SELinux happy we always need to open in append mode.
|
|
|
|
* So we fake O_TRUNC by calling ftruncate after open instead
|
|
|
|
*/
|
|
|
|
if (oflags & O_TRUNC) {
|
|
|
|
oflags &= ~O_TRUNC;
|
|
|
|
oflags |= O_APPEND;
|
|
|
|
trunc = true;
|
|
|
|
}
|
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
if ((fd = open(logfile, oflags, mode)) < 0) {
|
2011-05-05 11:38:04 +00:00
|
|
|
virReportSystemError(errno, _("failed to create logfile %s"),
|
|
|
|
logfile);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virSetCloseExec(fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to set close-on-exec flag on %s"),
|
|
|
|
logfile);
|
2012-09-21 09:37:53 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (trunc &&
|
|
|
|
ftruncate(fd, 0) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to truncate %s"),
|
|
|
|
logfile);
|
2011-05-05 11:38:04 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(logfile);
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainCreateLog(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
2011-07-06 22:42:06 +00:00
|
|
|
bool append)
|
2011-05-05 11:38:04 +00:00
|
|
|
{
|
2011-07-06 22:42:06 +00:00
|
|
|
int oflags;
|
2011-05-05 11:38:04 +00:00
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
oflags = O_CREAT | O_WRONLY;
|
2011-05-05 11:38:04 +00:00
|
|
|
/* Only logrotate files in /var/log, so only append if running privileged */
|
|
|
|
if (driver->privileged || append)
|
2011-07-06 22:42:06 +00:00
|
|
|
oflags |= O_APPEND;
|
2011-05-05 11:38:04 +00:00
|
|
|
else
|
2011-07-06 22:42:06 +00:00
|
|
|
oflags |= O_TRUNC;
|
2011-05-05 11:38:04 +00:00
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
return qemuDomainOpenLogHelper(driver, vm, oflags, S_IRUSR | S_IWUSR);
|
2011-05-05 11:38:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainOpenLog(virQEMUDriverPtr driver, virDomainObjPtr vm, off_t pos)
|
2011-05-05 11:38:04 +00:00
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
off_t off;
|
|
|
|
int whence;
|
|
|
|
|
|
|
|
if ((fd = qemuDomainOpenLogHelper(driver, vm, O_RDONLY, 0)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (pos < 0) {
|
|
|
|
off = 0;
|
|
|
|
whence = SEEK_END;
|
|
|
|
} else {
|
|
|
|
off = pos;
|
|
|
|
whence = SEEK_SET;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (lseek(fd, off, whence) < 0) {
|
|
|
|
if (whence == SEEK_END)
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to seek to end of log for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
else
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to seek to %lld from start for %s"),
|
|
|
|
(long long)off, vm->def->name);
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuDomainAppendLog(virQEMUDriverPtr driver,
|
2011-05-05 11:40:50 +00:00
|
|
|
virDomainObjPtr obj,
|
|
|
|
int logFD,
|
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
int fd = logFD;
|
|
|
|
va_list argptr;
|
|
|
|
char *message = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
va_start(argptr, fmt);
|
|
|
|
|
|
|
|
if ((fd == -1) &&
|
|
|
|
(fd = qemuDomainCreateLog(driver, obj, true)) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virVasprintf(&message, fmt, argptr) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (safewrite(fd, message, strlen(message)) < 0) {
|
|
|
|
virReportSystemError(errno, _("Unable to write to domain logfile %s"),
|
|
|
|
obj->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
va_end(argptr);
|
|
|
|
|
|
|
|
if (fd != logFD)
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
2011-06-22 11:32:04 +00:00
|
|
|
VIR_FREE(message);
|
2011-05-05 11:40:50 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
/* Locate an appropriate 'qemu-img' binary. */
|
|
|
|
const char *
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuFindQemuImgBinary(virQEMUDriverPtr driver)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
|
|
|
if (!driver->qemuImgBinary) {
|
|
|
|
driver->qemuImgBinary = virFindFileInPath("kvm-img");
|
|
|
|
if (!driver->qemuImgBinary)
|
|
|
|
driver->qemuImgBinary = virFindFileInPath("qemu-img");
|
|
|
|
if (!driver->qemuImgBinary)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("unable to find kvm-img or qemu-img"));
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return driver->qemuImgBinary;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
|
|
|
|
virDomainSnapshotObjPtr snapshot,
|
|
|
|
char *snapshotDir)
|
|
|
|
{
|
|
|
|
char *newxml = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
char *snapDir = NULL;
|
|
|
|
char *snapFile = NULL;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
|
|
|
|
virUUIDFormat(vm->def->uuid, uuidstr);
|
|
|
|
newxml = virDomainSnapshotDefFormat(uuidstr, snapshot->def,
|
2012-03-09 15:42:46 +00:00
|
|
|
QEMU_DOMAIN_FORMAT_LIVE_FLAGS, 1);
|
|
|
|
if (newxml == NULL)
|
2011-09-21 19:08:51 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virAsprintf(&snapDir, "%s/%s", snapshotDir, vm->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virFileMakePath(snapDir) < 0) {
|
|
|
|
virReportSystemError(errno, _("cannot create snapshot directory '%s'"),
|
|
|
|
snapDir);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virAsprintf(&snapFile, "%s/%s.xml", snapDir, snapshot->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-10-29 12:15:55 +00:00
|
|
|
ret = virXMLSaveFile(snapFile, NULL, "snapshot-edit", newxml);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(snapFile);
|
|
|
|
VIR_FREE(snapDir);
|
|
|
|
VIR_FREE(newxml);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The domain is expected to be locked and inactive. Return -1 on normal
|
|
|
|
* failure, 1 if we skipped a disk due to try_all. */
|
2012-03-17 15:54:44 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotForEachQcow2Raw(virQEMUDriverPtr driver,
|
2012-03-17 15:54:44 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
const char *name,
|
|
|
|
const char *op,
|
|
|
|
bool try_all,
|
|
|
|
int ndisks)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
|
|
|
const char *qemuimgarg[] = { NULL, "snapshot", NULL, NULL, NULL, NULL };
|
|
|
|
int i;
|
|
|
|
bool skipped = false;
|
|
|
|
|
|
|
|
qemuimgarg[0] = qemuFindQemuImgBinary(driver);
|
|
|
|
if (qemuimgarg[0] == NULL) {
|
|
|
|
/* qemuFindQemuImgBinary set the error */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuimgarg[2] = op;
|
2012-03-17 15:54:44 +00:00
|
|
|
qemuimgarg[3] = name;
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2012-03-17 15:54:44 +00:00
|
|
|
for (i = 0; i < ndisks; i++) {
|
2011-09-21 19:08:51 +00:00
|
|
|
/* FIXME: we also need to handle LVM here */
|
2011-10-04 23:13:48 +00:00
|
|
|
if (def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
|
2012-10-15 21:47:42 +00:00
|
|
|
if (def->disks[i]->format > 0 &&
|
|
|
|
def->disks[i]->format != VIR_STORAGE_FILE_QCOW2) {
|
2011-09-21 19:08:51 +00:00
|
|
|
if (try_all) {
|
|
|
|
/* Continue on even in the face of error, since other
|
|
|
|
* disks in this VM may have the same snapshot name.
|
|
|
|
*/
|
|
|
|
VIR_WARN("skipping snapshot action on %s",
|
2011-10-04 23:13:48 +00:00
|
|
|
def->disks[i]->dst);
|
2011-09-21 19:08:51 +00:00
|
|
|
skipped = true;
|
|
|
|
continue;
|
2012-03-17 15:54:44 +00:00
|
|
|
} else if (STREQ(op, "-c") && i) {
|
|
|
|
/* We must roll back partial creation by deleting
|
|
|
|
* all earlier snapshots. */
|
|
|
|
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
|
|
|
|
"-d", false, i);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("Disk device '%s' does not support"
|
|
|
|
" snapshotting"),
|
|
|
|
def->disks[i]->dst);
|
2011-09-21 19:08:51 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-10-04 23:13:48 +00:00
|
|
|
qemuimgarg[4] = def->disks[i]->src;
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
if (virRun(qemuimgarg, NULL) < 0) {
|
|
|
|
if (try_all) {
|
|
|
|
VIR_WARN("skipping snapshot action on %s",
|
2011-10-04 23:13:48 +00:00
|
|
|
def->disks[i]->dst);
|
2011-09-21 19:08:51 +00:00
|
|
|
skipped = true;
|
|
|
|
continue;
|
2012-03-17 15:54:44 +00:00
|
|
|
} else if (STREQ(op, "-c") && i) {
|
|
|
|
/* We must roll back partial creation by deleting
|
|
|
|
* all earlier snapshots. */
|
|
|
|
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
|
|
|
|
"-d", false, i);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return skipped ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2012-03-17 15:54:44 +00:00
|
|
|
/* The domain is expected to be locked and inactive. Return -1 on normal
|
|
|
|
* failure, 1 if we skipped a disk due to try_all. */
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotForEachQcow2(virQEMUDriverPtr driver,
|
2012-03-17 15:54:44 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainSnapshotObjPtr snap,
|
|
|
|
const char *op,
|
|
|
|
bool try_all)
|
|
|
|
{
|
|
|
|
/* Prefer action on the disks in use at the time the snapshot was
|
|
|
|
* created; but fall back to current definition if dealing with a
|
|
|
|
* snapshot created prior to libvirt 0.9.5. */
|
|
|
|
virDomainDefPtr def = snap->def->dom;
|
|
|
|
|
|
|
|
if (!def)
|
|
|
|
def = vm->def;
|
|
|
|
return qemuDomainSnapshotForEachQcow2Raw(driver, def, snap->def->name,
|
|
|
|
op, try_all, def->ndisks);
|
|
|
|
}
|
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* Discard one snapshot (or its metadata), without reparenting any children. */
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotDiscard(virQEMUDriverPtr driver,
|
2011-09-21 19:08:51 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainSnapshotObjPtr snap,
|
|
|
|
bool update_current,
|
|
|
|
bool metadata_only)
|
|
|
|
{
|
|
|
|
char *snapFile = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
virDomainSnapshotObjPtr parentsnap = NULL;
|
|
|
|
|
|
|
|
if (!metadata_only) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
/* Ignore any skipped disks */
|
|
|
|
if (qemuDomainSnapshotForEachQcow2(driver, vm, snap, "-d",
|
|
|
|
true) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
priv = vm->privateData;
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
/* we continue on even in the face of error */
|
|
|
|
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virAsprintf(&snapFile, "%s/%s/%s.xml", driver->snapshotDir,
|
|
|
|
vm->def->name, snap->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (snap == vm->current_snapshot) {
|
|
|
|
if (update_current && snap->def->parent) {
|
snapshot: make virDomainSnapshotObjList opaque
We were failing to react to allocation failure when initializing
a snapshot object list. Changing things to store a pointer
instead of a complete object adds one more possible point of
allocation failure, but at the same time, will make it easier to
react to failure now, as well as making it easier for a future
patch to split all virDomainSnapshotPtr handling into a separate
file, as I continue to add even more snapshot code.
Luckily, there was only one client outside of domain_conf.c that
was actually peeking inside the object, and a new wrapper function
was easy.
* src/conf/domain_conf.h (_virDomainObj): Use a pointer.
(virDomainSnapshotObjListInit): Rename.
(virDomainSnapshotObjListFree, virDomainSnapshotForEach): New
declarations.
(_virDomainSnapshotObjList): Move definitions...
* src/conf/domain_conf.c: ...here.
(virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit):
Rename...
(virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to
these.
(virDomainSnapshotForEach): New function.
(virDomainObjDispose, virDomainListPopulate): Adjust callers.
* src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard)
(qemuDomainSnapshotDiscardAllMetadata): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotLoad)
(qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotListNames, qemuDomainSnapshotNum)
(qemuDomainListAllSnapshots)
(qemuDomainSnapshotListChildrenNames)
(qemuDomainSnapshotNumChildren)
(qemuDomainSnapshotListAllChildren)
(qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent)
(qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent)
(qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot)
(qemuDomainSnapshotDelete): Likewise.
* src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
|
|
|
parentsnap = virDomainSnapshotFindByName(vm->snapshots,
|
2011-09-21 19:08:51 +00:00
|
|
|
snap->def->parent);
|
|
|
|
if (!parentsnap) {
|
|
|
|
VIR_WARN("missing parent snapshot matching name '%s'",
|
|
|
|
snap->def->parent);
|
|
|
|
} else {
|
|
|
|
parentsnap->def->current = true;
|
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, parentsnap,
|
|
|
|
driver->snapshotDir) < 0) {
|
|
|
|
VIR_WARN("failed to set parent snapshot '%s' as current",
|
|
|
|
snap->def->parent);
|
|
|
|
parentsnap->def->current = false;
|
|
|
|
parentsnap = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
vm->current_snapshot = parentsnap;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlink(snapFile) < 0)
|
|
|
|
VIR_WARN("Failed to unlink %s", snapFile);
|
snapshot: make virDomainSnapshotObjList opaque
We were failing to react to allocation failure when initializing
a snapshot object list. Changing things to store a pointer
instead of a complete object adds one more possible point of
allocation failure, but at the same time, will make it easier to
react to failure now, as well as making it easier for a future
patch to split all virDomainSnapshotPtr handling into a separate
file, as I continue to add even more snapshot code.
Luckily, there was only one client outside of domain_conf.c that
was actually peeking inside the object, and a new wrapper function
was easy.
* src/conf/domain_conf.h (_virDomainObj): Use a pointer.
(virDomainSnapshotObjListInit): Rename.
(virDomainSnapshotObjListFree, virDomainSnapshotForEach): New
declarations.
(_virDomainSnapshotObjList): Move definitions...
* src/conf/domain_conf.c: ...here.
(virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit):
Rename...
(virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to
these.
(virDomainSnapshotForEach): New function.
(virDomainObjDispose, virDomainListPopulate): Adjust callers.
* src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard)
(qemuDomainSnapshotDiscardAllMetadata): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotLoad)
(qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotListNames, qemuDomainSnapshotNum)
(qemuDomainListAllSnapshots)
(qemuDomainSnapshotListChildrenNames)
(qemuDomainSnapshotNumChildren)
(qemuDomainSnapshotListAllChildren)
(qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent)
(qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent)
(qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot)
(qemuDomainSnapshotDelete): Likewise.
* src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
|
|
|
virDomainSnapshotObjListRemove(vm->snapshots, snap);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(snapFile);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hash iterator callback to discard multiple snapshots. */
|
|
|
|
void qemuDomainSnapshotDiscardAll(void *payload,
|
|
|
|
const void *name ATTRIBUTE_UNUSED,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
virDomainSnapshotObjPtr snap = payload;
|
2012-11-28 17:29:44 +00:00
|
|
|
virQEMUSnapRemovePtr curr = data;
|
2011-09-21 19:08:51 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (snap->def->current)
|
|
|
|
curr->current = true;
|
|
|
|
err = qemuDomainSnapshotDiscard(curr->driver, curr->vm, snap, false,
|
|
|
|
curr->metadata_only);
|
|
|
|
if (err && !curr->err)
|
|
|
|
curr->err = err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
|
2011-09-21 19:08:51 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2012-11-28 17:29:44 +00:00
|
|
|
virQEMUSnapRemove rem;
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
rem.driver = driver;
|
|
|
|
rem.vm = vm;
|
|
|
|
rem.metadata_only = true;
|
|
|
|
rem.err = 0;
|
snapshot: make virDomainSnapshotObjList opaque
We were failing to react to allocation failure when initializing
a snapshot object list. Changing things to store a pointer
instead of a complete object adds one more possible point of
allocation failure, but at the same time, will make it easier to
react to failure now, as well as making it easier for a future
patch to split all virDomainSnapshotPtr handling into a separate
file, as I continue to add even more snapshot code.
Luckily, there was only one client outside of domain_conf.c that
was actually peeking inside the object, and a new wrapper function
was easy.
* src/conf/domain_conf.h (_virDomainObj): Use a pointer.
(virDomainSnapshotObjListInit): Rename.
(virDomainSnapshotObjListFree, virDomainSnapshotForEach): New
declarations.
(_virDomainSnapshotObjList): Move definitions...
* src/conf/domain_conf.c: ...here.
(virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit):
Rename...
(virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to
these.
(virDomainSnapshotForEach): New function.
(virDomainObjDispose, virDomainListPopulate): Adjust callers.
* src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard)
(qemuDomainSnapshotDiscardAllMetadata): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotLoad)
(qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotListNames, qemuDomainSnapshotNum)
(qemuDomainListAllSnapshots)
(qemuDomainSnapshotListChildrenNames)
(qemuDomainSnapshotNumChildren)
(qemuDomainSnapshotListAllChildren)
(qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent)
(qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent)
(qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot)
(qemuDomainSnapshotDelete): Likewise.
* src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
|
|
|
virDomainSnapshotForEach(vm->snapshots, qemuDomainSnapshotDiscardAll,
|
|
|
|
&rem);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
return rem.err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The caller must hold a lock on both driver and vm, and there must
|
|
|
|
* be no remaining references to vm.
|
|
|
|
*/
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainRemoveInactive(virQEMUDriverPtr driver,
|
2011-09-21 19:08:51 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2011-09-21 22:08:42 +00:00
|
|
|
char *snapDir;
|
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* Remove any snapshot metadata prior to removing the domain */
|
|
|
|
if (qemuDomainSnapshotDiscardAllMetadata(driver, vm) < 0) {
|
|
|
|
VIR_WARN("unable to remove all snapshots for domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2011-09-21 22:08:42 +00:00
|
|
|
else if (virAsprintf(&snapDir, "%s/%s", driver->snapshotDir,
|
|
|
|
vm->def->name) < 0) {
|
|
|
|
VIR_WARN("unable to remove snapshot directory %s/%s",
|
|
|
|
driver->snapshotDir, vm->def->name);
|
|
|
|
} else {
|
|
|
|
if (rmdir(snapDir) < 0 && errno != ENOENT)
|
|
|
|
VIR_WARN("unable to remove snapshot directory %s", snapDir);
|
|
|
|
VIR_FREE(snapDir);
|
|
|
|
}
|
2011-09-21 19:08:51 +00:00
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
}
|
2011-09-28 10:10:13 +00:00
|
|
|
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
|
2011-09-28 10:10:13 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
bool value)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->fakeReboot == value)
|
|
|
|
return;
|
|
|
|
|
|
|
|
priv->fakeReboot = value;
|
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
}
|
2011-10-18 08:51:06 +00:00
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainCheckDiskPresence(virQEMUDriverPtr driver,
|
2011-10-18 08:51:06 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-03-14 10:08:52 +00:00
|
|
|
bool cold_boot)
|
2011-10-18 08:51:06 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
int i;
|
|
|
|
virDomainDiskDefPtr disk;
|
2011-10-26 09:12:45 +00:00
|
|
|
char uuid[VIR_UUID_STRING_BUFLEN];
|
2011-10-18 14:15:42 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2011-10-18 08:51:06 +00:00
|
|
|
|
|
|
|
virUUIDFormat(vm->def->uuid, uuid);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
disk = vm->def->disks[i];
|
|
|
|
|
|
|
|
if (!disk->startupPolicy || !disk->src)
|
|
|
|
continue;
|
|
|
|
|
2011-10-26 09:12:45 +00:00
|
|
|
if (virFileAccessibleAs(disk->src, F_OK,
|
|
|
|
driver->user,
|
|
|
|
driver->group) >= 0) {
|
|
|
|
/* disk accessible */
|
2011-10-18 08:51:06 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ((enum virDomainStartupPolicy) disk->startupPolicy) {
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_OPTIONAL:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_MANDATORY:
|
2011-10-26 09:12:45 +00:00
|
|
|
virReportSystemError(errno,
|
2011-10-18 08:51:06 +00:00
|
|
|
_("cannot access file '%s'"),
|
|
|
|
disk->src);
|
|
|
|
goto cleanup;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_REQUISITE:
|
2012-03-14 10:08:52 +00:00
|
|
|
if (cold_boot) {
|
2011-10-26 09:12:45 +00:00
|
|
|
virReportSystemError(errno,
|
2011-10-18 08:51:06 +00:00
|
|
|
_("cannot access file '%s'"),
|
|
|
|
disk->src);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_DEFAULT:
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_LAST:
|
|
|
|
/* this should never happen */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-10-26 09:12:45 +00:00
|
|
|
VIR_DEBUG("Dropping disk '%s' on domain '%s' (UUID '%s') "
|
|
|
|
"due to inaccessible source '%s'",
|
2011-10-18 08:51:06 +00:00
|
|
|
disk->dst, vm->def->name, uuid, disk->src);
|
|
|
|
|
2011-10-18 14:15:42 +00:00
|
|
|
event = virDomainEventDiskChangeNewFromObj(vm, disk->src, NULL, disk->info.alias,
|
2011-11-04 12:16:19 +00:00
|
|
|
VIR_DOMAIN_EVENT_DISK_CHANGE_MISSING_ON_START);
|
2011-10-18 14:15:42 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
2011-10-18 08:51:06 +00:00
|
|
|
VIR_FREE(disk->src);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
2012-03-16 06:52:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The vm must be locked when any of the following cleanup functions is
|
|
|
|
* called.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainCleanupAdd(virDomainObjPtr vm,
|
|
|
|
qemuDomainCleanupCallback cb)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
|
|
|
|
|
|
|
|
for (i = 0; i < priv->ncleanupCallbacks; i++) {
|
|
|
|
if (priv->cleanupCallbacks[i] == cb)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_RESIZE_N(priv->cleanupCallbacks,
|
|
|
|
priv->ncleanupCallbacks_max,
|
|
|
|
priv->ncleanupCallbacks, 1) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->cleanupCallbacks[priv->ncleanupCallbacks++] = cb;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
qemuDomainCleanupRemove(virDomainObjPtr vm,
|
|
|
|
qemuDomainCleanupCallback cb)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
|
|
|
|
|
|
|
|
for (i = 0; i < priv->ncleanupCallbacks; i++) {
|
|
|
|
if (priv->cleanupCallbacks[i] == cb) {
|
|
|
|
memmove(priv->cleanupCallbacks + i,
|
|
|
|
priv->cleanupCallbacks + i + 1,
|
|
|
|
priv->ncleanupCallbacks - i - 1);
|
|
|
|
priv->ncleanupCallbacks--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_SHRINK_N(priv->cleanupCallbacks,
|
|
|
|
priv->ncleanupCallbacks_max,
|
|
|
|
priv->ncleanupCallbacks_max - priv->ncleanupCallbacks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainCleanupRun(virQEMUDriverPtr driver,
|
2012-03-16 06:52:26 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%s", driver, vm->def->name);
|
|
|
|
|
|
|
|
/* run cleanup callbacks in reverse order */
|
|
|
|
for (i = priv->ncleanupCallbacks - 1; i >= 0; i--) {
|
|
|
|
if (priv->cleanupCallbacks[i])
|
|
|
|
priv->cleanupCallbacks[i](driver, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(priv->cleanupCallbacks);
|
|
|
|
priv->ncleanupCallbacks = 0;
|
|
|
|
priv->ncleanupCallbacks_max = 0;
|
|
|
|
}
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainDetermineDiskChain(virQEMUDriverPtr driver,
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
bool force)
|
|
|
|
{
|
|
|
|
bool probe = driver->allowDiskFormatProbing;
|
|
|
|
|
2012-11-21 18:06:25 +00:00
|
|
|
if (!disk->src || disk->type == VIR_DOMAIN_DISK_TYPE_NETWORK)
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (disk->backingChain) {
|
|
|
|
if (force) {
|
|
|
|
virStorageFileFreeMetadata(disk->backingChain);
|
|
|
|
disk->backingChain = NULL;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
disk->backingChain = virStorageFileGetMetadata(disk->src, disk->format,
|
|
|
|
driver->user, driver->group,
|
|
|
|
probe);
|
|
|
|
if (!disk->backingChain)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|