libvirt/src/qemu/qemu_domain.c

2522 lines
74 KiB
C
Raw Normal View History

/*
* qemu_domain.c: QEMU domain private state
*
* Copyright (C) 2006-2014 Red Hat, Inc.
* Copyright (C) 2006 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
* Author: Daniel P. Berrange <berrange@redhat.com>
*/
#include <config.h>
#include "qemu_domain.h"
#include "qemu_command.h"
#include "qemu_capabilities.h"
#include "qemu_migration.h"
2012-12-12 18:06:53 +00:00
#include "viralloc.h"
2012-12-12 17:59:27 +00:00
#include "virlog.h"
#include "virerror.h"
#include "c-ctype.h"
#include "cpu/cpu.h"
2012-12-13 18:01:25 +00:00
#include "viruuid.h"
#include "virfile.h"
#include "domain_addr.h"
#include "domain_event.h"
#include "virtime.h"
#include "virstoragefile.h"
#include "virstring.h"
#include "storage/storage_driver.h"
#include <sys/time.h>
#include <fcntl.h>
#include <libxml/xpathInternals.h>
#define VIR_FROM_THIS VIR_FROM_QEMU
VIR_LOG_INIT("qemu.qemu_domain");
#define QEMU_NAMESPACE_HREF "http://libvirt.org/schemas/domain/qemu/1.0"
VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"none",
"query",
"destroy",
"suspend",
"modify",
"abort",
"migration operation",
"none", /* async job is never stored in job.active */
"async nested",
);
VIR_ENUM_IMPL(qemuDomainAsyncJob, QEMU_ASYNC_JOB_LAST,
"none",
"migration out",
"migration in",
"save",
"dump",
"snapshot",
);
const char *
qemuDomainAsyncJobPhaseToString(enum qemuDomainAsyncJob job,
int phase ATTRIBUTE_UNUSED)
{
switch (job) {
case QEMU_ASYNC_JOB_MIGRATION_OUT:
case QEMU_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeToString(phase);
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
case QEMU_ASYNC_JOB_SNAPSHOT:
case QEMU_ASYNC_JOB_NONE:
case QEMU_ASYNC_JOB_LAST:
; /* fall through */
}
return "none";
}
int
qemuDomainAsyncJobPhaseFromString(enum qemuDomainAsyncJob job,
const char *phase)
{
if (!phase)
return 0;
switch (job) {
case QEMU_ASYNC_JOB_MIGRATION_OUT:
case QEMU_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeFromString(phase);
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
case QEMU_ASYNC_JOB_SNAPSHOT:
case QEMU_ASYNC_JOB_NONE:
case QEMU_ASYNC_JOB_LAST:
; /* fall through */
}
if (STREQ(phase, "none"))
return 0;
else
return -1;
}
void qemuDomainEventQueue(virQEMUDriverPtr driver,
virObjectEventPtr event)
{
virObjectEventStateQueue(driver->domainEventState, event);
}
static int
qemuDomainObjInitJob(qemuDomainObjPrivatePtr priv)
{
memset(&priv->job, 0, sizeof(priv->job));
if (virCondInit(&priv->job.cond) < 0)
return -1;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (virCondInit(&priv->job.asyncCond) < 0) {
virCondDestroy(&priv->job.cond);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
return -1;
}
return 0;
}
static void
qemuDomainObjResetJob(qemuDomainObjPrivatePtr priv)
{
struct qemuDomainJobObj *job = &priv->job;
job->active = QEMU_JOB_NONE;
job->owner = 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
static void
qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
{
struct qemuDomainJobObj *job = &priv->job;
job->asyncJob = QEMU_ASYNC_JOB_NONE;
job->asyncOwner = 0;
job->phase = 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
job->mask = DEFAULT_JOB_MASK;
job->start = 0;
job->dump_memory_only = false;
job->asyncAbort = false;
memset(&job->status, 0, sizeof(job->status));
memset(&job->info, 0, sizeof(job->info));
}
void
qemuDomainObjRestoreJob(virDomainObjPtr obj,
struct qemuDomainJobObj *job)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
memset(job, 0, sizeof(*job));
job->active = priv->job.active;
job->owner = priv->job.owner;
job->asyncJob = priv->job.asyncJob;
job->asyncOwner = priv->job.asyncOwner;
job->phase = priv->job.phase;
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
}
void
qemuDomainObjTransferJob(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_DEBUG("Changing job owner from %llu to %llu",
priv->job.owner, virThreadSelfID());
priv->job.owner = virThreadSelfID();
}
static void
qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
{
virCondDestroy(&priv->job.cond);
virCondDestroy(&priv->job.asyncCond);
}
static bool
qemuDomainTrackJob(enum qemuDomainJob job)
{
return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
}
static void *
qemuDomainObjPrivateAlloc(void)
{
qemuDomainObjPrivatePtr priv;
if (VIR_ALLOC(priv) < 0)
return NULL;
if (qemuDomainObjInitJob(priv) < 0) {
virReportSystemError(errno, "%s",
_("Unable to init qemu driver mutexes"));
goto error;
}
if (virCondInit(&priv->unplugFinished) < 0)
goto error;
if (!(priv->devs = virChrdevAlloc()))
goto error;
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
return priv;
error:
VIR_FREE(priv);
return NULL;
}
static void
qemuDomainObjPrivateFree(void *data)
{
qemuDomainObjPrivatePtr priv = data;
virObjectUnref(priv->qemuCaps);
virCgroupFree(&priv->cgroup);
virDomainPCIAddressSetFree(priv->pciaddrs);
virDomainCCWAddressSetFree(priv->ccwaddrs);
virDomainChrSourceDefFree(priv->monConfig);
qemuDomainObjFreeJob(priv);
VIR_FREE(priv->vcpupids);
VIR_FREE(priv->lockState);
VIR_FREE(priv->origname);
virCondDestroy(&priv->unplugFinished);
virChrdevFree(priv->devs);
/* This should never be non-NULL if we get here, but just in case... */
if (priv->mon) {
VIR_ERROR(_("Unexpected QEMU monitor still active during domain deletion"));
qemuMonitorClose(priv->mon);
}
if (priv->agent) {
VIR_ERROR(_("Unexpected QEMU agent still active during domain deletion"));
qemuAgentClose(priv->agent);
}
VIR_FREE(priv->cleanupCallbacks);
VIR_FREE(priv);
}
static int
qemuDomainObjPrivateXMLFormat(virBufferPtr buf, void *data)
{
qemuDomainObjPrivatePtr priv = data;
const char *monitorpath;
enum qemuDomainJob job;
/* priv->monitor_chr is set only for qemu */
if (priv->monConfig) {
switch (priv->monConfig->type) {
case VIR_DOMAIN_CHR_TYPE_UNIX:
monitorpath = priv->monConfig->data.nix.path;
break;
default:
case VIR_DOMAIN_CHR_TYPE_PTY:
monitorpath = priv->monConfig->data.file.path;
break;
}
virBufferEscapeString(buf, "<monitor path='%s'", monitorpath);
if (priv->monJSON)
virBufferAddLit(buf, " json='1'");
virBufferAsprintf(buf, " type='%s'/>\n",
virDomainChrTypeToString(priv->monConfig->type));
}
if (priv->nvcpupids) {
size_t i;
virBufferAddLit(buf, "<vcpus>\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < priv->nvcpupids; i++) {
virBufferAsprintf(buf, "<vcpu pid='%d'/>\n", priv->vcpupids[i]);
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</vcpus>\n");
}
if (priv->qemuCaps) {
size_t i;
virBufferAddLit(buf, "<qemuCaps>\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < QEMU_CAPS_LAST; i++) {
if (virQEMUCapsGet(priv->qemuCaps, i)) {
virBufferAsprintf(buf, "<flag name='%s'/>\n",
virQEMUCapsTypeToString(i));
}
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</qemuCaps>\n");
}
if (priv->lockState)
virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState);
job = priv->job.active;
if (!qemuDomainTrackJob(job))
priv->job.active = QEMU_JOB_NONE;
if (priv->job.active || priv->job.asyncJob) {
virBufferAsprintf(buf, "<job type='%s' async='%s'",
qemuDomainJobTypeToString(priv->job.active),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.phase) {
virBufferAsprintf(buf, " phase='%s'",
qemuDomainAsyncJobPhaseToString(
priv->job.asyncJob, priv->job.phase));
}
virBufferAddLit(buf, "/>\n");
}
priv->job.active = job;
if (priv->fakeReboot)
virBufferAddLit(buf, "<fakereboot/>\n");
if (priv->qemuDevices && *priv->qemuDevices) {
char **tmp = priv->qemuDevices;
virBufferAddLit(buf, "<devices>\n");
virBufferAdjustIndent(buf, 2);
while (*tmp) {
virBufferAsprintf(buf, "<device alias='%s'/>\n", *tmp);
tmp++;
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</devices>\n");
}
if (priv->quiesced)
virBufferAddLit(buf, "<quiesced/>\n");
return 0;
}
static int
qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt, void *data)
{
qemuDomainObjPrivatePtr priv = data;
char *monitorpath;
char *tmp;
int n;
size_t i;
xmlNodePtr *nodes = NULL;
virQEMUCapsPtr qemuCaps = NULL;
if (VIR_ALLOC(priv->monConfig) < 0)
goto error;
if (!(monitorpath =
virXPathString("string(./monitor[1]/@path)", ctxt))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("no monitor path"));
goto error;
}
tmp = virXPathString("string(./monitor[1]/@type)", ctxt);
if (tmp)
priv->monConfig->type = virDomainChrTypeFromString(tmp);
else
priv->monConfig->type = VIR_DOMAIN_CHR_TYPE_PTY;
VIR_FREE(tmp);
priv->monJSON = virXPathBoolean("count(./monitor[@json = '1']) > 0",
ctxt) > 0;
switch (priv->monConfig->type) {
case VIR_DOMAIN_CHR_TYPE_PTY:
priv->monConfig->data.file.path = monitorpath;
break;
case VIR_DOMAIN_CHR_TYPE_UNIX:
priv->monConfig->data.nix.path = monitorpath;
break;
default:
VIR_FREE(monitorpath);
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unsupported monitor type '%s'"),
virDomainChrTypeToString(priv->monConfig->type));
goto error;
}
n = virXPathNodeSet("./vcpus/vcpu", ctxt, &nodes);
if (n < 0)
goto error;
if (n) {
priv->nvcpupids = n;
if (VIR_REALLOC_N(priv->vcpupids, priv->nvcpupids) < 0)
goto error;
for (i = 0; i < n; i++) {
char *pidstr = virXMLPropString(nodes[i], "pid");
if (!pidstr)
goto error;
if (virStrToLong_i(pidstr, NULL, 10, &(priv->vcpupids[i])) < 0) {
VIR_FREE(pidstr);
goto error;
}
VIR_FREE(pidstr);
}
VIR_FREE(nodes);
}
if ((n = virXPathNodeSet("./qemuCaps/flag", ctxt, &nodes)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("failed to parse qemu capabilities flags"));
goto error;
}
if (n > 0) {
if (!(qemuCaps = virQEMUCapsNew()))
goto error;
for (i = 0; i < n; i++) {
char *str = virXMLPropString(nodes[i], "name");
if (str) {
int flag = virQEMUCapsTypeFromString(str);
if (flag < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown qemu capabilities flag %s"), str);
VIR_FREE(str);
goto error;
}
VIR_FREE(str);
virQEMUCapsSet(qemuCaps, flag);
}
}
priv->qemuCaps = qemuCaps;
}
VIR_FREE(nodes);
priv->lockState = virXPathString("string(./lockstate)", ctxt);
if ((tmp = virXPathString("string(./job[1]/@type)", ctxt))) {
int type;
if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job type %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
priv->job.active = type;
}
if ((tmp = virXPathString("string(./job[1]/@async)", ctxt))) {
int async;
if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown async job type %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
priv->job.asyncJob = async;
if ((tmp = virXPathString("string(./job[1]/@phase)", ctxt))) {
priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
if (priv->job.phase < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job phase %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
}
}
priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1;
if ((n = virXPathNodeSet("./devices/device", ctxt, &nodes)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse qemu device list"));
goto error;
}
if (n > 0) {
/* NULL-terminated list */
if (VIR_ALLOC_N(priv->qemuDevices, n + 1) < 0)
goto error;
for (i = 0; i < n; i++) {
priv->qemuDevices[i] = virXMLPropString(nodes[i], "alias");
if (!priv->qemuDevices[i]) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse qemu device list"));
goto error;
}
}
}
VIR_FREE(nodes);
priv->quiesced = virXPathBoolean("boolean(./quiesced)", ctxt) == 1;
return 0;
error:
virDomainChrSourceDefFree(priv->monConfig);
priv->monConfig = NULL;
VIR_FREE(nodes);
virStringFreeList(priv->qemuDevices);
priv->qemuDevices = NULL;
virObjectUnref(qemuCaps);
return -1;
}
virDomainXMLPrivateDataCallbacks virQEMUDriverPrivateDataCallbacks = {
.alloc = qemuDomainObjPrivateAlloc,
.free = qemuDomainObjPrivateFree,
.parse = qemuDomainObjPrivateXMLParse,
.format = qemuDomainObjPrivateXMLFormat,
};
static void
qemuDomainDefNamespaceFree(void *nsdata)
{
qemuDomainCmdlineDefPtr cmd = nsdata;
qemuDomainCmdlineDefFree(cmd);
}
static int
qemuDomainDefNamespaceParse(xmlDocPtr xml ATTRIBUTE_UNUSED,
xmlNodePtr root ATTRIBUTE_UNUSED,
xmlXPathContextPtr ctxt,
void **data)
{
qemuDomainCmdlineDefPtr cmd = NULL;
bool uses_qemu_ns = false;
xmlNodePtr *nodes = NULL;
int n;
size_t i;
if (xmlXPathRegisterNs(ctxt, BAD_CAST "qemu", BAD_CAST QEMU_NAMESPACE_HREF) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to register xml namespace '%s'"),
QEMU_NAMESPACE_HREF);
return -1;
}
if (VIR_ALLOC(cmd) < 0)
return -1;
/* first handle the extra command-line arguments */
n = virXPathNodeSet("./qemu:commandline/qemu:arg", ctxt, &nodes);
if (n < 0)
goto error;
uses_qemu_ns |= n > 0;
if (n && VIR_ALLOC_N(cmd->args, n) < 0)
goto error;
for (i = 0; i < n; i++) {
cmd->args[cmd->num_args] = virXMLPropString(nodes[i], "value");
if (cmd->args[cmd->num_args] == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("No qemu command-line argument specified"));
goto error;
}
cmd->num_args++;
}
VIR_FREE(nodes);
/* now handle the extra environment variables */
n = virXPathNodeSet("./qemu:commandline/qemu:env", ctxt, &nodes);
if (n < 0)
goto error;
uses_qemu_ns |= n > 0;
if (n && VIR_ALLOC_N(cmd->env_name, n) < 0)
goto error;
if (n && VIR_ALLOC_N(cmd->env_value, n) < 0)
goto error;
for (i = 0; i < n; i++) {
char *tmp;
tmp = virXMLPropString(nodes[i], "name");
if (tmp == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("No qemu environment name specified"));
goto error;
}
if (tmp[0] == '\0') {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Empty qemu environment name specified"));
goto error;
}
if (!c_isalpha(tmp[0]) && tmp[0] != '_') {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Invalid environment name, it must begin with a letter or underscore"));
goto error;
}
if (strspn(tmp, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_") != strlen(tmp)) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Invalid environment name, it must contain only alphanumerics and underscore"));
goto error;
}
cmd->env_name[cmd->num_env] = tmp;
cmd->env_value[cmd->num_env] = virXMLPropString(nodes[i], "value");
/* a NULL value for command is allowed, since it might be empty */
cmd->num_env++;
}
VIR_FREE(nodes);
if (uses_qemu_ns)
*data = cmd;
else
VIR_FREE(cmd);
return 0;
error:
VIR_FREE(nodes);
qemuDomainDefNamespaceFree(cmd);
return -1;
}
static int
qemuDomainDefNamespaceFormatXML(virBufferPtr buf,
void *nsdata)
{
qemuDomainCmdlineDefPtr cmd = nsdata;
size_t i;
if (!cmd->num_args && !cmd->num_env)
return 0;
virBufferAddLit(buf, "<qemu:commandline>\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < cmd->num_args; i++)
virBufferEscapeString(buf, "<qemu:arg value='%s'/>\n",
cmd->args[i]);
for (i = 0; i < cmd->num_env; i++) {
virBufferAsprintf(buf, "<qemu:env name='%s'", cmd->env_name[i]);
if (cmd->env_value[i])
virBufferEscapeString(buf, " value='%s'", cmd->env_value[i]);
virBufferAddLit(buf, "/>\n");
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</qemu:commandline>\n");
return 0;
}
static const char *
qemuDomainDefNamespaceHref(void)
{
return "xmlns:qemu='" QEMU_NAMESPACE_HREF "'";
}
virDomainXMLNamespace virQEMUDriverDomainXMLNamespace = {
.parse = qemuDomainDefNamespaceParse,
.free = qemuDomainDefNamespaceFree,
.format = qemuDomainDefNamespaceFormatXML,
.href = qemuDomainDefNamespaceHref,
};
static int
qemuDomainDefPostParse(virDomainDefPtr def,
virCapsPtr caps,
void *opaque ATTRIBUTE_UNUSED)
{
bool addDefaultUSB = true;
qemu: fix handling of default/implicit devices for q35 This patch adds in special handling for a few devices that need to be treated differently for q35 domains: usb - there is no implicit/default usb controller for the q35 machinetype. This is done because normally the default usb controller is added to a domain by just adding "-usb" to the qemu commandline, and it's assumed that this will add a single piix3 usb1 controller at slot 1 function 2. That's not what happens when the machinetype is q35, though. Instead, adding -usb to the commandline adds 3 usb (version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather than having <controller type='usb' index='0'/> translate into 3 separate devices on the PCI bus, it's cleaner to not automatically add a default usb device; one can always be added explicitly if desired. Or we may decide that on q35 machines, 3 usb controllers will be automatically added when none is given. But for this initial commit, at least we aren't locking ourselves into something we later won't want. video - qemu always initializes the primary video device immediately after any integrated devices for the machinetype. Unless instructed otherwise (by using "-device vga..." instead of "-vga" which libvirt uses in many cases to work around deficiencies and bugs in various qemu versions) qemu will always pick the first unused slot. In the case of the "pc" machinetype and its derivatives, this is always slot 2, but on q35 machinetypes, the first free slot is slot 1 (since the q35's integrated peripheral devices are placed in other slots, e.g. slot 0x1f). In order to make the PCI address of the video device predictable, that slot (1 or 2, depending on machinetype) is reserved even when no video device has been specified. sata - a q35 machine always has a sata controller implicitly added at slot 0x1F, function 2. There is no way to avoid this controller, so we always add it. Note that the xml2xml tests for the pcie-root and q35 cases were changed to use DO_TEST_DIFFERENT() so that we can check for the sata controller being automatically added. This is especially important because we can't check for it in the xml2argv output (it has no effect on that output since it's an implicit device). ide - q35 has no ide controllers. isa and smbus controllers - these two are always present in a q35 (at slot 0x1F functions 0 and 3) but we have no way of modelling them in our config. We do need to reserve those functions so that the user doesn't attempt to put anything else there though. (note that the "pc" machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
bool addImplicitSATA = false;
bool addPCIRoot = false;
bool addPCIeRoot = false;
bool addDefaultMemballoon = true;
bool addDefaultUSBKBD = false;
bool addDefaultUSBMouse = false;
/* check for emulator and create a default one if needed */
if (!def->emulator &&
!(def->emulator = virDomainDefGetDefaultEmulator(def, caps)))
return -1;
/* Add implicit PCI root controller if the machine has one */
switch (def->os.arch) {
case VIR_ARCH_I686:
case VIR_ARCH_X86_64:
if (!def->os.machine)
break;
if (STREQ(def->os.machine, "isapc")) {
addDefaultUSB = false;
break;
}
if (STRPREFIX(def->os.machine, "pc-q35") ||
STREQ(def->os.machine, "q35")) {
addPCIeRoot = true;
addDefaultUSB = false;
qemu: fix handling of default/implicit devices for q35 This patch adds in special handling for a few devices that need to be treated differently for q35 domains: usb - there is no implicit/default usb controller for the q35 machinetype. This is done because normally the default usb controller is added to a domain by just adding "-usb" to the qemu commandline, and it's assumed that this will add a single piix3 usb1 controller at slot 1 function 2. That's not what happens when the machinetype is q35, though. Instead, adding -usb to the commandline adds 3 usb (version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather than having <controller type='usb' index='0'/> translate into 3 separate devices on the PCI bus, it's cleaner to not automatically add a default usb device; one can always be added explicitly if desired. Or we may decide that on q35 machines, 3 usb controllers will be automatically added when none is given. But for this initial commit, at least we aren't locking ourselves into something we later won't want. video - qemu always initializes the primary video device immediately after any integrated devices for the machinetype. Unless instructed otherwise (by using "-device vga..." instead of "-vga" which libvirt uses in many cases to work around deficiencies and bugs in various qemu versions) qemu will always pick the first unused slot. In the case of the "pc" machinetype and its derivatives, this is always slot 2, but on q35 machinetypes, the first free slot is slot 1 (since the q35's integrated peripheral devices are placed in other slots, e.g. slot 0x1f). In order to make the PCI address of the video device predictable, that slot (1 or 2, depending on machinetype) is reserved even when no video device has been specified. sata - a q35 machine always has a sata controller implicitly added at slot 0x1F, function 2. There is no way to avoid this controller, so we always add it. Note that the xml2xml tests for the pcie-root and q35 cases were changed to use DO_TEST_DIFFERENT() so that we can check for the sata controller being automatically added. This is especially important because we can't check for it in the xml2argv output (it has no effect on that output since it's an implicit device). ide - q35 has no ide controllers. isa and smbus controllers - these two are always present in a q35 (at slot 0x1F functions 0 and 3) but we have no way of modelling them in our config. We do need to reserve those functions so that the user doesn't attempt to put anything else there though. (note that the "pc" machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
addImplicitSATA = true;
break;
}
if (!STRPREFIX(def->os.machine, "pc-0.") &&
!STRPREFIX(def->os.machine, "pc-1.") &&
!STRPREFIX(def->os.machine, "pc-i440") &&
!STREQ(def->os.machine, "pc") &&
!STRPREFIX(def->os.machine, "rhel"))
break;
addPCIRoot = true;
break;
case VIR_ARCH_ARMV7L:
addDefaultUSB = false;
addDefaultMemballoon = false;
break;
case VIR_ARCH_AARCH64:
addDefaultUSB = false;
addDefaultMemballoon = false;
break;
case VIR_ARCH_PPC64:
addPCIRoot = true;
addDefaultUSBKBD = true;
addDefaultUSBMouse = true;
break;
case VIR_ARCH_ALPHA:
case VIR_ARCH_PPC:
case VIR_ARCH_PPCEMB:
case VIR_ARCH_SH4:
case VIR_ARCH_SH4EB:
addPCIRoot = true;
break;
default:
break;
}
if (addDefaultUSB &&
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_USB, 0, -1) < 0)
return -1;
qemu: fix handling of default/implicit devices for q35 This patch adds in special handling for a few devices that need to be treated differently for q35 domains: usb - there is no implicit/default usb controller for the q35 machinetype. This is done because normally the default usb controller is added to a domain by just adding "-usb" to the qemu commandline, and it's assumed that this will add a single piix3 usb1 controller at slot 1 function 2. That's not what happens when the machinetype is q35, though. Instead, adding -usb to the commandline adds 3 usb (version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather than having <controller type='usb' index='0'/> translate into 3 separate devices on the PCI bus, it's cleaner to not automatically add a default usb device; one can always be added explicitly if desired. Or we may decide that on q35 machines, 3 usb controllers will be automatically added when none is given. But for this initial commit, at least we aren't locking ourselves into something we later won't want. video - qemu always initializes the primary video device immediately after any integrated devices for the machinetype. Unless instructed otherwise (by using "-device vga..." instead of "-vga" which libvirt uses in many cases to work around deficiencies and bugs in various qemu versions) qemu will always pick the first unused slot. In the case of the "pc" machinetype and its derivatives, this is always slot 2, but on q35 machinetypes, the first free slot is slot 1 (since the q35's integrated peripheral devices are placed in other slots, e.g. slot 0x1f). In order to make the PCI address of the video device predictable, that slot (1 or 2, depending on machinetype) is reserved even when no video device has been specified. sata - a q35 machine always has a sata controller implicitly added at slot 0x1F, function 2. There is no way to avoid this controller, so we always add it. Note that the xml2xml tests for the pcie-root and q35 cases were changed to use DO_TEST_DIFFERENT() so that we can check for the sata controller being automatically added. This is especially important because we can't check for it in the xml2argv output (it has no effect on that output since it's an implicit device). ide - q35 has no ide controllers. isa and smbus controllers - these two are always present in a q35 (at slot 0x1F functions 0 and 3) but we have no way of modelling them in our config. We do need to reserve those functions so that the user doesn't attempt to put anything else there though. (note that the "pc" machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
if (addImplicitSATA &&
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_SATA, 0, -1) < 0)
return -1;
if (addPCIRoot &&
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0,
VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT) < 0)
return -1;
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
/* When a machine has a pcie-root, make sure that there is always
* a dmi-to-pci-bridge controller added as bus 1, and a pci-bridge
* as bus 2, so that standard PCI devices can be connected
*/
if (addPCIeRoot) {
if (virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0,
VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT) < 0 ||
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 1,
VIR_DOMAIN_CONTROLLER_MODEL_DMI_TO_PCI_BRIDGE) < 0 ||
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 2,
VIR_DOMAIN_CONTROLLER_MODEL_PCI_BRIDGE) < 0) {
return -1;
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
}
}
if (addDefaultMemballoon && !def->memballoon) {
virDomainMemballoonDefPtr memballoon;
if (VIR_ALLOC(memballoon) < 0)
return -1;
memballoon->model = VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO;
def->memballoon = memballoon;
}
if (addDefaultUSBKBD &&
def->ngraphics > 0 &&
virDomainDefMaybeAddInput(def,
VIR_DOMAIN_INPUT_TYPE_KBD,
VIR_DOMAIN_INPUT_BUS_USB) < 0)
return -1;
if (addDefaultUSBMouse &&
def->ngraphics > 0 &&
virDomainDefMaybeAddInput(def,
VIR_DOMAIN_INPUT_TYPE_MOUSE,
VIR_DOMAIN_INPUT_BUS_USB) < 0)
return -1;
return 0;
}
static const char *
maint: avoid 'const fooPtr' in domain_conf 'const fooPtr' is the same as 'foo * const' (the pointer won't change, but it's contents can). But in general, if an interface is trying to be const-correct, it should be using 'const foo *' (the pointer is to data that can't be changed). Fix up offenders in src/conf/domain_conf, and their fallout. Several things to note: virObjectLock() requires a non-const argument; if this were C++, we could treat the locking field as 'mutable' and allow locking an otherwise 'const' object, but that is a more invasive change, so I instead dropped attempts to be const-correct on domain lookup. virXMLPropString and friends require a non-const xmlNodePtr - this is because libxml2 is not a const-correct library. We could make the src/util/virxml wrappers cast away const, but I figured it was easier to not try to mark xmlNodePtr as const. Finally, virDomainDeviceDefCopy was a rather hard conversion - it calls virDomainDeviceDefPostParse, which in turn in the xen driver was actually modifying the domain outside of the current device being visited. We should not be adding a device on the first per-device callback, but waiting until after all per-device callbacks are complete. * src/conf/domain_conf.h (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainObjAssignDef, virDomainObjListAdd): Drop attempt at const. (virDomainDeviceDefCopy): Use intended type. (virDomainDeviceDefParse, virDomainDeviceDefPostParseCallback) (virDomainVideoDefaultType, virDomainVideoDefaultRAM) (virDomainChrGetDomainPtrs): Make const-correct. * src/conf/domain_conf.c (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainDeviceDefCopy, virDomainObjListAdd) (virDomainObjAssignDef, virDomainHostdevSubsysUsbDefParseXML) (virDomainHostdevSubsysPciOrigStatesDefParseXML) (virDomainHostdevSubsysPciDefParseXML) (virDomainHostdevSubsysScsiDefParseXML) (virDomainControllerModelTypeFromString) (virDomainTPMDefParseXML, virDomainTimerDefParseXML) (virDomainSoundCodecDefParseXML, virDomainSoundDefParseXML) (virDomainWatchdogDefParseXML, virDomainRNGDefParseXML) (virDomainMemballoonDefParseXML, virDomainNVRAMDefParseXML) (virSysinfoParseXML, virDomainVideoAccelDefParseXML) (virDomainVideoDefParseXML, virDomainHostdevDefParseXML) (virDomainRedirdevDefParseXML) (virDomainRedirFilterUsbDevDefParseXML) (virDomainRedirFilterDefParseXML, virDomainIdMapEntrySort) (virDomainIdmapDefParseXML, virDomainVcpuPinDefParseXML) (virDiskNameToBusDeviceIndex, virDomainDeviceDefCopy) (virDomainVideoDefaultType, virDomainHostdevAssignAddress) (virDomainDeviceDefPostParseInternal, virDomainDeviceDefPostParse) (virDomainChrGetDomainPtrs, virDomainControllerSCSINextUnit) (virDomainSCSIDriveAddressIsUsed) (virDomainDriveAddressIsUsedByDisk) (virDomainDriveAddressIsUsedByHostdev): Fix fallout. * src/openvz/openvz_driver.c (openvzDomainDeviceDefPostParse): Likewise. * src/libxl/libxl_domain.c (libxlDomainDeviceDefPostParse): Likewise. * src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse) (qemuDomainDefaultNetModel): Likewise. * src/lxc/lxc_domain.c (virLXCDomainDeviceDefPostParse): Likewise. * src/uml/uml_driver.c (umlDomainDeviceDefPostParse): Likewise. * src/xen/xen_driver.c (xenDomainDeviceDefPostParse): Split... (xenDomainDefPostParse): ...since per-device callback is not the time to be adding a device. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-10-08 15:08:25 +00:00
qemuDomainDefaultNetModel(const virDomainDef *def)
{
if (def->os.arch == VIR_ARCH_S390 ||
def->os.arch == VIR_ARCH_S390X)
return "virtio";
if (def->os.arch == VIR_ARCH_ARMV7L ||
def->os.arch == VIR_ARCH_AARCH64) {
if (STREQ(def->os.machine, "versatilepb"))
return "smc91c111";
if (STREQ(def->os.machine, "virt"))
return "virtio";
/* Incomplete. vexpress (and a few others) use this, but not all
* arm boards */
return "lan9118";
}
return "rtl8139";
}
static int
qemuDomainDeviceDefPostParse(virDomainDeviceDefPtr dev,
maint: avoid 'const fooPtr' in domain_conf 'const fooPtr' is the same as 'foo * const' (the pointer won't change, but it's contents can). But in general, if an interface is trying to be const-correct, it should be using 'const foo *' (the pointer is to data that can't be changed). Fix up offenders in src/conf/domain_conf, and their fallout. Several things to note: virObjectLock() requires a non-const argument; if this were C++, we could treat the locking field as 'mutable' and allow locking an otherwise 'const' object, but that is a more invasive change, so I instead dropped attempts to be const-correct on domain lookup. virXMLPropString and friends require a non-const xmlNodePtr - this is because libxml2 is not a const-correct library. We could make the src/util/virxml wrappers cast away const, but I figured it was easier to not try to mark xmlNodePtr as const. Finally, virDomainDeviceDefCopy was a rather hard conversion - it calls virDomainDeviceDefPostParse, which in turn in the xen driver was actually modifying the domain outside of the current device being visited. We should not be adding a device on the first per-device callback, but waiting until after all per-device callbacks are complete. * src/conf/domain_conf.h (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainObjAssignDef, virDomainObjListAdd): Drop attempt at const. (virDomainDeviceDefCopy): Use intended type. (virDomainDeviceDefParse, virDomainDeviceDefPostParseCallback) (virDomainVideoDefaultType, virDomainVideoDefaultRAM) (virDomainChrGetDomainPtrs): Make const-correct. * src/conf/domain_conf.c (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainDeviceDefCopy, virDomainObjListAdd) (virDomainObjAssignDef, virDomainHostdevSubsysUsbDefParseXML) (virDomainHostdevSubsysPciOrigStatesDefParseXML) (virDomainHostdevSubsysPciDefParseXML) (virDomainHostdevSubsysScsiDefParseXML) (virDomainControllerModelTypeFromString) (virDomainTPMDefParseXML, virDomainTimerDefParseXML) (virDomainSoundCodecDefParseXML, virDomainSoundDefParseXML) (virDomainWatchdogDefParseXML, virDomainRNGDefParseXML) (virDomainMemballoonDefParseXML, virDomainNVRAMDefParseXML) (virSysinfoParseXML, virDomainVideoAccelDefParseXML) (virDomainVideoDefParseXML, virDomainHostdevDefParseXML) (virDomainRedirdevDefParseXML) (virDomainRedirFilterUsbDevDefParseXML) (virDomainRedirFilterDefParseXML, virDomainIdMapEntrySort) (virDomainIdmapDefParseXML, virDomainVcpuPinDefParseXML) (virDiskNameToBusDeviceIndex, virDomainDeviceDefCopy) (virDomainVideoDefaultType, virDomainHostdevAssignAddress) (virDomainDeviceDefPostParseInternal, virDomainDeviceDefPostParse) (virDomainChrGetDomainPtrs, virDomainControllerSCSINextUnit) (virDomainSCSIDriveAddressIsUsed) (virDomainDriveAddressIsUsedByDisk) (virDomainDriveAddressIsUsedByHostdev): Fix fallout. * src/openvz/openvz_driver.c (openvzDomainDeviceDefPostParse): Likewise. * src/libxl/libxl_domain.c (libxlDomainDeviceDefPostParse): Likewise. * src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse) (qemuDomainDefaultNetModel): Likewise. * src/lxc/lxc_domain.c (virLXCDomainDeviceDefPostParse): Likewise. * src/uml/uml_driver.c (umlDomainDeviceDefPostParse): Likewise. * src/xen/xen_driver.c (xenDomainDeviceDefPostParse): Split... (xenDomainDefPostParse): ...since per-device callback is not the time to be adding a device. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-10-08 15:08:25 +00:00
const virDomainDef *def,
virCapsPtr caps ATTRIBUTE_UNUSED,
void *opaque)
{
int ret = -1;
virQEMUDriverPtr driver = opaque;
virQEMUDriverConfigPtr cfg = NULL;
if (dev->type == VIR_DOMAIN_DEVICE_NET &&
dev->data.net->type != VIR_DOMAIN_NET_TYPE_HOSTDEV &&
!dev->data.net->model) {
if (VIR_STRDUP(dev->data.net->model,
qemuDomainDefaultNetModel(def)) < 0)
goto cleanup;
}
/* set default disk types and drivers */
if (dev->type == VIR_DOMAIN_DEVICE_DISK) {
virDomainDiskDefPtr disk = dev->data.disk;
/* both of these require data from the driver config */
if (driver && (cfg = virQEMUDriverGetConfig(driver))) {
/* assign default storage format and driver according to config */
if (cfg->allowDiskFormatProbing) {
/* default disk format for drives */
if (virDomainDiskGetFormat(disk) == VIR_STORAGE_FILE_NONE &&
conf: move host disk type to util/ A continuation of the migration of disk details to virstoragefile. This patch moves a single enum, but converting the name has quite a bit of fallout. * src/conf/domain_conf.h (virDomainDiskType): Move... * src/util/virstoragefile.h (virStorageType): ...and rename. * src/bhyve/bhyve_command.c (bhyveBuildDiskArgStr) (virBhyveProcessBuildLoadCmd): Update clients. * src/conf/domain_conf.c (virDomainDiskSourceDefParse) (virDomainDiskDefParseXML, virDomainDiskSourceDefFormatInternal) (virDomainDiskDefFormat, virDomainDiskGetActualType) (virDomainDiskDefForeachPath, virDomainDiskSourceIsBlockType): Likewise. * src/conf/snapshot_conf.h (_virDomainSnapshotDiskDef): Likewise. * src/conf/snapshot_conf.c (virDomainSnapshotDiskDefParseXML) (virDomainSnapshotAlignDisks, virDomainSnapshotDiskDefFormat): Likewise. * src/esx/esx_driver.c (esxAutodetectSCSIControllerModel) (esxDomainDefineXML): Likewise. * src/locking/domain_lock.c (virDomainLockManagerAddDisk): Likewise. * src/lxc/lxc_controller.c (virLXCControllerSetupLoopDeviceDisk) (virLXCControllerSetupNBDDeviceDisk) (virLXCControllerSetupLoopDevices, virLXCControllerSetupDisk): Likewise. * src/parallels/parallels_driver.c (parallelsGetHddInfo): Likewise. * src/phyp/phyp_driver.c (phypDiskType): Likewise. * src/qemu/qemu_command.c (qemuGetDriveSourceString) (qemuDomainDiskGetSourceString, qemuBuildDriveStr) (qemuBuildCommandLine, qemuParseCommandLineDisk) (qemuParseCommandLine): Likewise. * src/qemu/qemu_conf.c (qemuCheckSharedDevice) (qemuTranslateDiskSourcePool) (qemuTranslateSnapshotDiskSourcePool): Likewise. * src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse) (qemuDomainDetermineDiskChain): Likewise. * src/qemu/qemu_driver.c (qemuDomainGetBlockInfo) (qemuDomainSnapshotPrepareDiskExternalBackingInactive) (qemuDomainSnapshotPrepareDiskExternalBackingActive) (qemuDomainSnapshotPrepareDiskExternalOverlayActive) (qemuDomainSnapshotPrepareDiskExternalOverlayInactive) (qemuDomainSnapshotPrepareDiskInternal) (qemuDomainSnapshotPrepare) (qemuDomainSnapshotCreateSingleDiskActive): Likewise. * src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsSafe): Likewise. * src/security/security_apparmor.c (AppArmorRestoreSecurityImageLabel) (AppArmorSetSecurityImageLabel): Likewise. * src/security/security_dac.c (virSecurityDACSetSecurityImageLabel) (virSecurityDACRestoreSecurityImageLabelInt) (virSecurityDACSetSecurityAllLabel): Likewise. * src/security/security_selinux.c (virSecuritySELinuxRestoreSecurityImageLabelInt) (virSecuritySELinuxSetSecurityImageLabel) (virSecuritySELinuxSetSecurityAllLabel): Likewise. * src/storage/storage_backend.c (virStorageFileBackendForType): Likewise. * src/storage/storage_backend_fs.c (virStorageFileBackendFile) (virStorageFileBackendBlock): Likewise. * src/storage/storage_backend_gluster.c (virStorageFileBackendGluster): Likewise. * src/vbox/vbox_tmpl.c (vboxDomainGetXMLDesc, vboxAttachDrives) (vboxDomainAttachDeviceImpl, vboxDomainDetachDevice): Likewise. * src/vmware/vmware_conf.c (vmwareVmxPath): Likewise. * src/vmx/vmx.c (virVMXParseDisk, virVMXFormatDisk) (virVMXFormatFloppy): Likewise. * src/xenxs/xen_sxpr.c (xenParseSxprDisks, xenParseSxpr) (xenFormatSxprDisk): Likewise. * src/xenxs/xen_xm.c (xenParseXM, xenFormatXMDisk): Likewise. * tests/securityselinuxlabeltest.c (testSELinuxLoadDef): Likewise. * src/libvirt_private.syms (domain_conf.h): Move symbols... (virstoragefile.h): ...as appropriate. Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-27 21:57:49 +00:00
(virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE ||
virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_BLOCK))
virDomainDiskSetFormat(disk, VIR_STORAGE_FILE_AUTO);
/* default disk format for mirrored drive */
if (disk->mirror &&
disk->mirror->format == VIR_STORAGE_FILE_NONE)
disk->mirror->format = VIR_STORAGE_FILE_AUTO;
} else {
/* default driver if probing is forbidden */
if (!virDomainDiskGetDriver(disk) &&
virDomainDiskSetDriver(disk, "qemu") < 0)
goto cleanup;
/* default disk format for drives */
if (virDomainDiskGetFormat(disk) == VIR_STORAGE_FILE_NONE &&
conf: move host disk type to util/ A continuation of the migration of disk details to virstoragefile. This patch moves a single enum, but converting the name has quite a bit of fallout. * src/conf/domain_conf.h (virDomainDiskType): Move... * src/util/virstoragefile.h (virStorageType): ...and rename. * src/bhyve/bhyve_command.c (bhyveBuildDiskArgStr) (virBhyveProcessBuildLoadCmd): Update clients. * src/conf/domain_conf.c (virDomainDiskSourceDefParse) (virDomainDiskDefParseXML, virDomainDiskSourceDefFormatInternal) (virDomainDiskDefFormat, virDomainDiskGetActualType) (virDomainDiskDefForeachPath, virDomainDiskSourceIsBlockType): Likewise. * src/conf/snapshot_conf.h (_virDomainSnapshotDiskDef): Likewise. * src/conf/snapshot_conf.c (virDomainSnapshotDiskDefParseXML) (virDomainSnapshotAlignDisks, virDomainSnapshotDiskDefFormat): Likewise. * src/esx/esx_driver.c (esxAutodetectSCSIControllerModel) (esxDomainDefineXML): Likewise. * src/locking/domain_lock.c (virDomainLockManagerAddDisk): Likewise. * src/lxc/lxc_controller.c (virLXCControllerSetupLoopDeviceDisk) (virLXCControllerSetupNBDDeviceDisk) (virLXCControllerSetupLoopDevices, virLXCControllerSetupDisk): Likewise. * src/parallels/parallels_driver.c (parallelsGetHddInfo): Likewise. * src/phyp/phyp_driver.c (phypDiskType): Likewise. * src/qemu/qemu_command.c (qemuGetDriveSourceString) (qemuDomainDiskGetSourceString, qemuBuildDriveStr) (qemuBuildCommandLine, qemuParseCommandLineDisk) (qemuParseCommandLine): Likewise. * src/qemu/qemu_conf.c (qemuCheckSharedDevice) (qemuTranslateDiskSourcePool) (qemuTranslateSnapshotDiskSourcePool): Likewise. * src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse) (qemuDomainDetermineDiskChain): Likewise. * src/qemu/qemu_driver.c (qemuDomainGetBlockInfo) (qemuDomainSnapshotPrepareDiskExternalBackingInactive) (qemuDomainSnapshotPrepareDiskExternalBackingActive) (qemuDomainSnapshotPrepareDiskExternalOverlayActive) (qemuDomainSnapshotPrepareDiskExternalOverlayInactive) (qemuDomainSnapshotPrepareDiskInternal) (qemuDomainSnapshotPrepare) (qemuDomainSnapshotCreateSingleDiskActive): Likewise. * src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsSafe): Likewise. * src/security/security_apparmor.c (AppArmorRestoreSecurityImageLabel) (AppArmorSetSecurityImageLabel): Likewise. * src/security/security_dac.c (virSecurityDACSetSecurityImageLabel) (virSecurityDACRestoreSecurityImageLabelInt) (virSecurityDACSetSecurityAllLabel): Likewise. * src/security/security_selinux.c (virSecuritySELinuxRestoreSecurityImageLabelInt) (virSecuritySELinuxSetSecurityImageLabel) (virSecuritySELinuxSetSecurityAllLabel): Likewise. * src/storage/storage_backend.c (virStorageFileBackendForType): Likewise. * src/storage/storage_backend_fs.c (virStorageFileBackendFile) (virStorageFileBackendBlock): Likewise. * src/storage/storage_backend_gluster.c (virStorageFileBackendGluster): Likewise. * src/vbox/vbox_tmpl.c (vboxDomainGetXMLDesc, vboxAttachDrives) (vboxDomainAttachDeviceImpl, vboxDomainDetachDevice): Likewise. * src/vmware/vmware_conf.c (vmwareVmxPath): Likewise. * src/vmx/vmx.c (virVMXParseDisk, virVMXFormatDisk) (virVMXFormatFloppy): Likewise. * src/xenxs/xen_sxpr.c (xenParseSxprDisks, xenParseSxpr) (xenFormatSxprDisk): Likewise. * src/xenxs/xen_xm.c (xenParseXM, xenFormatXMDisk): Likewise. * tests/securityselinuxlabeltest.c (testSELinuxLoadDef): Likewise. * src/libvirt_private.syms (domain_conf.h): Move symbols... (virstoragefile.h): ...as appropriate. Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-27 21:57:49 +00:00
(virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE ||
virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_BLOCK))
virDomainDiskSetFormat(disk, VIR_STORAGE_FILE_RAW);
/* default disk format for mirrored drive */
if (disk->mirror &&
disk->mirror->format == VIR_STORAGE_FILE_NONE)
disk->mirror->format = VIR_STORAGE_FILE_RAW;
}
}
}
/* set the default console type for S390 arches */
if (dev->type == VIR_DOMAIN_DEVICE_CHR &&
dev->data.chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE &&
dev->data.chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_NONE &&
(def->os.arch == VIR_ARCH_S390 || def->os.arch == VIR_ARCH_S390X))
dev->data.chr->targetType = VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_VIRTIO;
/* set the default USB model to none for s390 unless an address is found */
if (dev->type == VIR_DOMAIN_DEVICE_CONTROLLER &&
dev->data.controller->type == VIR_DOMAIN_CONTROLLER_TYPE_USB &&
dev->data.controller->model == -1 &&
dev->data.controller->info.type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE &&
(def->os.arch == VIR_ARCH_S390 || def->os.arch == VIR_ARCH_S390X))
dev->data.controller->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_NONE;
/* auto generate unix socket path */
if (dev->type == VIR_DOMAIN_DEVICE_CHR &&
dev->data.chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CHANNEL &&
dev->data.chr->targetType == VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO &&
dev->data.chr->source.type == VIR_DOMAIN_CHR_TYPE_UNIX &&
!dev->data.chr->source.data.nix.path &&
(driver && (cfg = virQEMUDriverGetConfig(driver)))) {
if (virAsprintf(&dev->data.chr->source.data.nix.path,
"%s/channel/target/%s.%s",
cfg->libDir, def->name,
dev->data.chr->target.name) < 0)
goto cleanup;
dev->data.chr->source.data.nix.listen = true;
}
ret = 0;
cleanup:
virObjectUnref(cfg);
return ret;
}
virDomainDefParserConfig virQEMUDriverDomainDefParserConfig = {
.devicesPostParseCallback = qemuDomainDeviceDefPostParse,
.domainPostParseCallback = qemuDomainDefPostParse,
};
static void
qemuDomainObjSaveJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
if (virDomainObjIsActive(obj)) {
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, obj) < 0)
VIR_WARN("Failed to save status on vm %s", obj->def->name);
}
virObjectUnref(cfg);
}
void
qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
virDomainObjPtr obj,
int phase)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
unsigned long long me = virThreadSelfID();
if (!priv->job.asyncJob)
return;
VIR_DEBUG("Setting '%s' phase to '%s'",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
if (priv->job.asyncOwner && me != priv->job.asyncOwner) {
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_WARN("'%s' async job is owned by thread %llu",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
priv->job.phase = phase;
priv->job.asyncOwner = me;
qemuDomainObjSaveJob(driver, obj);
}
void
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
unsigned long long allowedJobs)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (!priv->job.asyncJob)
return;
priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
}
void
qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
void
qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
VIR_DEBUG("Releasing ownership of '%s' async job",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.asyncOwner != virThreadSelfID()) {
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_WARN("'%s' async job is owned by thread %llu",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
priv->job.asyncOwner = 0;
}
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
static bool
qemuDomainNestedJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob job)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
return !priv->job.asyncJob || (priv->job.mask & JOB_MASK(job)) != 0;
}
bool
qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob job)
{
return !priv->job.active && qemuDomainNestedJobAllowed(priv, job);
}
/* Give up waiting for mutex after 30 seconds */
#define QEMU_JOB_WAIT_TIME (1000ull * 30)
/*
* obj must be locked before calling
*/
static int ATTRIBUTE_NONNULL(1)
qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virDomainObjPtr obj,
enum qemuDomainJob job,
enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
2011-06-01 10:35:18 +00:00
unsigned long long now;
unsigned long long then;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
bool nested = job == QEMU_JOB_ASYNC_NESTED;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int ret;
VIR_DEBUG("Starting %s: %s (async=%s vm=%p name=%s)",
job == QEMU_JOB_ASYNC ? "async job" : "job",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->jobs_queued++;
if (virTimeMillisNow(&now) < 0) {
virObjectUnref(cfg);
return -1;
}
2011-06-01 10:35:18 +00:00
then = now + QEMU_JOB_WAIT_TIME;
virObjectRef(obj);
retry:
if (cfg->maxQueuedJobs &&
priv->jobs_queued > cfg->maxQueuedJobs) {
goto error;
}
while (!nested && !qemuDomainNestedJobAllowed(priv, job)) {
VIR_DEBUG("Waiting for async job (vm=%p name=%s)", obj, obj->def->name);
if (virCondWaitUntil(&priv->job.asyncCond, &obj->parent.lock, then) < 0)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
goto error;
}
while (priv->job.active) {
VIR_DEBUG("Waiting for job (vm=%p name=%s)", obj, obj->def->name);
if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
goto error;
}
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* No job is active but a new async job could have been started while obj
* was unlocked, so we need to recheck it. */
if (!nested && !qemuDomainNestedJobAllowed(priv, job))
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
goto retry;
qemuDomainObjResetJob(priv);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (job != QEMU_JOB_ASYNC) {
VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
priv->job.active = job;
priv->job.owner = virThreadSelfID();
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
} else {
VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
qemuDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
qemuDomainObjResetAsyncJob(priv);
priv->job.asyncJob = asyncJob;
priv->job.asyncOwner = virThreadSelfID();
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
priv->job.start = now;
}
if (qemuDomainTrackJob(job))
qemuDomainObjSaveJob(driver, obj);
virObjectUnref(cfg);
return 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
error:
VIR_WARN("Cannot start job (%s, %s) for domain %s;"
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
" current job is (%s, %s) owned by (%llu, %llu)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(asyncJob),
obj->def->name,
qemuDomainJobTypeToString(priv->job.active),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.owner, priv->job.asyncOwner);
ret = -1;
if (errno == ETIMEDOUT) {
virReportError(VIR_ERR_OPERATION_TIMEOUT,
"%s", _("cannot acquire state change lock"));
ret = -2;
} else if (cfg->maxQueuedJobs &&
priv->jobs_queued > cfg->maxQueuedJobs) {
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("cannot acquire state change lock "
"due to max_queued limit"));
ret = -2;
} else {
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virReportSystemError(errno,
"%s", _("cannot acquire job mutex"));
}
priv->jobs_queued--;
virObjectUnref(obj);
virObjectUnref(cfg);
return ret;
}
/*
* obj must be locked before calling
*
* This must be called by anything that will change the VM state
* in any way, or anything that will use the QEMU monitor.
*
* Upon successful return, the object will have its ref count increased,
* successful calls must be followed by EndJob eventually
*/
int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
enum qemuDomainJob job)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
if (qemuDomainObjBeginJobInternal(driver, obj, job,
QEMU_ASYNC_JOB_NONE) < 0)
return -1;
else
return 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
enum qemuDomainAsyncJob asyncJob)
{
if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
asyncJob) < 0)
return -1;
else
return 0;
}
static int ATTRIBUTE_RETURN_CHECK
qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (asyncJob != priv->job.asyncJob) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected async job %d"), asyncJob);
return -1;
}
if (priv->job.asyncOwner != virThreadSelfID()) {
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_WARN("This thread doesn't seem to be the async job owner: %llu",
priv->job.asyncOwner);
}
return qemuDomainObjBeginJobInternal(driver, obj,
QEMU_JOB_ASYNC_NESTED,
QEMU_ASYNC_JOB_NONE);
}
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/*
* obj must be locked before calling
*
* To be called after completing the work associated with the
* earlier qemuDomainBeginJob() call
*
* Returns true if @obj was still referenced, false if it was
* disposed of.
*/
bool qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
enum qemuDomainJob job = priv->job.active;
priv->jobs_queued--;
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetJob(priv);
if (qemuDomainTrackJob(job))
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
return virObjectUnref(obj);
}
bool
qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
qemuDomainObjPrivatePtr priv = obj->privateData;
priv->jobs_queued--;
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virCondBroadcast(&priv->job.asyncCond);
return virObjectUnref(obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
void
qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->job.asyncAbort = true;
}
/*
* obj must be locked before calling
*
* To be called immediately before any QEMU monitor API call
* Must have already either called qemuDomainObjBeginJob() and checked
* that the VM is still active; may not be used for nested async jobs.
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
static int
qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
int ret;
if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0)
return ret;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (!virDomainObjIsActive(obj)) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running"));
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
/* Still referenced by the containing async job. */
ignore_value(qemuDomainObjEndJob(driver, obj));
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
return -1;
}
} else if (priv->job.asyncOwner == virThreadSelfID()) {
VIR_WARN("This thread seems to be the async job owner; entering"
" monitor without asking for a nested job is dangerous");
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
VIR_DEBUG("Entering monitor (mon=%p vm=%p name=%s)",
priv->mon, obj, obj->def->name);
virObjectLock(priv->mon);
virObjectRef(priv->mon);
ignore_value(virTimeMillisNow(&priv->monStart));
virObjectUnlock(obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
return 0;
}
static void ATTRIBUTE_NONNULL(1)
qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
bool hasRefs;
hasRefs = virObjectUnref(priv->mon);
if (hasRefs)
virObjectUnlock(priv->mon);
virObjectLock(obj);
VIR_DEBUG("Exited monitor (mon=%p vm=%p name=%s)",
priv->mon, obj, obj->def->name);
priv->monStart = 0;
if (!hasRefs)
priv->mon = NULL;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (priv->job.active == QEMU_JOB_ASYNC_NESTED) {
qemuDomainObjResetJob(priv);
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
virObjectUnref(obj);
}
}
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj)
{
ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
QEMU_ASYNC_JOB_NONE));
}
/* obj must NOT be locked before calling
*
* Should be paired with an earlier qemuDomainObjEnterMonitor() call
*/
void qemuDomainObjExitMonitor(virQEMUDriverPtr driver,
virDomainObjPtr obj)
{
qemuDomainObjExitMonitorInternal(driver, obj);
}
/*
* obj must be locked before calling
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
*
* To be called immediately before any QEMU monitor API call.
* Must have already either called qemuDomainObjBeginJob()
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
* and checked that the VM is still active, with asyncJob of
* QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
* with the same asyncJob.
*
* Returns 0 if job was started, in which case this must be followed with
* qemuDomainObjExitMonitor(); -2 if waiting for the nested job times out;
* or -1 if the job could not be started (probably because the vm exited
* in the meantime).
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
*/
int
qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj,
enum qemuDomainAsyncJob asyncJob)
{
return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob);
}
/*
* obj must be locked before calling
*
* To be called immediately before any QEMU agent API call.
* Must have already called qemuDomainObjBeginJob() and checked
* that the VM is still active.
*
* To be followed with qemuDomainObjExitAgent() once complete
*/
void
qemuDomainObjEnterAgent(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
VIR_DEBUG("Entering agent (agent=%p vm=%p name=%s)",
priv->agent, obj, obj->def->name);
virObjectLock(priv->agent);
virObjectRef(priv->agent);
ignore_value(virTimeMillisNow(&priv->agentStart));
virObjectUnlock(obj);
}
/* obj must NOT be locked before calling
*
* Should be paired with an earlier qemuDomainObjEnterAgent() call
*/
void
qemuDomainObjExitAgent(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
bool hasRefs;
hasRefs = virObjectUnref(priv->agent);
if (hasRefs)
virObjectUnlock(priv->agent);
virObjectLock(obj);
VIR_DEBUG("Exited agent (agent=%p vm=%p name=%s)",
priv->agent, obj, obj->def->name);
priv->agentStart = 0;
if (!hasRefs)
priv->agent = NULL;
}
void qemuDomainObjEnterRemote(virDomainObjPtr obj)
{
VIR_DEBUG("Entering remote (vm=%p name=%s)",
obj, obj->def->name);
virObjectRef(obj);
virObjectUnlock(obj);
}
void qemuDomainObjExitRemote(virDomainObjPtr obj)
{
virObjectLock(obj);
VIR_DEBUG("Exited remote (vm=%p name=%s)",
obj, obj->def->name);
virObjectUnref(obj);
}
virDomainDefPtr
qemuDomainDefCopy(virQEMUDriverPtr driver,
virDomainDefPtr src,
unsigned int flags)
{
virBuffer buf = VIR_BUFFER_INITIALIZER;
virDomainDefPtr ret = NULL;
virCapsPtr caps = NULL;
char *xml = NULL;
if (qemuDomainDefFormatBuf(driver, src, flags, &buf) < 0)
goto cleanup;
xml = virBufferContentAndReset(&buf);
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
if (!(ret = virDomainDefParseString(xml, caps, driver->xmlopt,
QEMU_EXPECTED_VIRT_TYPES,
VIR_DOMAIN_XML_INACTIVE)))
goto cleanup;
cleanup:
VIR_FREE(xml);
virObjectUnref(caps);
return ret;
}
int
qemuDomainDefFormatBuf(virQEMUDriverPtr driver,
virDomainDefPtr def,
unsigned int flags,
virBuffer *buf)
{
int ret = -1;
virCPUDefPtr cpu = NULL;
virCPUDefPtr def_cpu = def->cpu;
virDomainControllerDefPtr *controllers = NULL;
int ncontrollers = 0;
virCapsPtr caps = NULL;
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
/* Update guest CPU requirements according to host CPU */
if ((flags & VIR_DOMAIN_XML_UPDATE_CPU) &&
def_cpu &&
(def_cpu->mode != VIR_CPU_MODE_CUSTOM || def_cpu->model)) {
if (!caps->host.cpu ||
!caps->host.cpu->model) {
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("cannot get host CPU capabilities"));
goto cleanup;
}
if (!(cpu = virCPUDefCopy(def_cpu)) ||
cpuUpdate(cpu, caps->host.cpu) < 0)
goto cleanup;
def->cpu = cpu;
}
if ((flags & VIR_DOMAIN_XML_MIGRATABLE)) {
size_t i;
int toremove = 0;
virDomainControllerDefPtr usb = NULL, pci = NULL;
/* If only the default USB controller is present, we can remove it
* and make the XML compatible with older versions of libvirt which
* didn't support USB controllers in the XML but always added the
* default one to qemu anyway.
*/
for (i = 0; i < def->ncontrollers; i++) {
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_USB) {
if (usb) {
usb = NULL;
break;
}
usb = def->controllers[i];
}
}
if (usb && usb->idx == 0 && usb->model == -1) {
VIR_DEBUG("Removing default USB controller from domain '%s'"
" for migration compatibility", def->name);
toremove++;
} else {
usb = NULL;
}
/* Remove the default PCI controller if there is only one present
* and its model is pci-root */
for (i = 0; i < def->ncontrollers; i++) {
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_PCI) {
if (pci) {
pci = NULL;
break;
}
pci = def->controllers[i];
}
}
if (pci && pci->idx == 0 &&
pci->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT) {
VIR_DEBUG("Removing default pci-root from domain '%s'"
" for migration compatibility", def->name);
toremove++;
} else {
pci = NULL;
}
if (toremove) {
controllers = def->controllers;
ncontrollers = def->ncontrollers;
if (VIR_ALLOC_N(def->controllers, ncontrollers - toremove) < 0) {
controllers = NULL;
goto cleanup;
}
def->ncontrollers = 0;
for (i = 0; i < ncontrollers; i++) {
if (controllers[i] != usb && controllers[i] != pci)
def->controllers[def->ncontrollers++] = controllers[i];
}
}
}
ret = virDomainDefFormatInternal(def, flags, buf);
cleanup:
def->cpu = def_cpu;
virCPUDefFree(cpu);
if (controllers) {
VIR_FREE(def->controllers);
def->controllers = controllers;
def->ncontrollers = ncontrollers;
}
virObjectUnref(caps);
return ret;
}
char *qemuDomainDefFormatXML(virQEMUDriverPtr driver,
virDomainDefPtr def,
unsigned int flags)
{
virBuffer buf = VIR_BUFFER_INITIALIZER;
if (qemuDomainDefFormatBuf(driver, def, flags, &buf) < 0) {
virBufferFreeAndReset(&buf);
return NULL;
}
if (virBufferError(&buf)) {
virReportOOMError();
virBufferFreeAndReset(&buf);
return NULL;
}
return virBufferContentAndReset(&buf);
}
char *qemuDomainFormatXML(virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags)
{
virDomainDefPtr def;
if ((flags & VIR_DOMAIN_XML_INACTIVE) && vm->newDef)
def = vm->newDef;
else
def = vm->def;
return qemuDomainDefFormatXML(driver, def, flags);
}
char *
qemuDomainDefFormatLive(virQEMUDriverPtr driver,
virDomainDefPtr def,
bool inactive,
bool compatible)
{
unsigned int flags = QEMU_DOMAIN_FORMAT_LIVE_FLAGS;
if (inactive)
flags |= VIR_DOMAIN_XML_INACTIVE;
if (compatible)
flags |= VIR_DOMAIN_XML_MIGRATABLE;
return qemuDomainDefFormatXML(driver, def, flags);
}
void qemuDomainObjTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainTaintFlags taint,
int logFD)
{
virErrorPtr orig_err = NULL;
if (virDomainObjTaint(obj, taint)) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(obj->def->uuid, uuidstr);
VIR_WARN("Domain id=%d name='%s' uuid=%s is tainted: %s",
obj->def->id,
obj->def->name,
uuidstr,
virDomainTaintTypeToString(taint));
/* We don't care about errors logging taint info, so
* preserve original error, and clear any error that
* is raised */
orig_err = virSaveLastError();
if (qemuDomainAppendLog(driver, obj, logFD,
"Domain id=%d is tainted: %s\n",
obj->def->id,
virDomainTaintTypeToString(taint)) < 0)
virResetLastError();
if (orig_err) {
virSetError(orig_err);
virFreeError(orig_err);
}
}
}
void qemuDomainObjCheckTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
int logFD)
{
size_t i;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
qemuDomainObjPrivatePtr priv = obj->privateData;
if (cfg->privileged &&
(!cfg->clearEmulatorCapabilities ||
cfg->user == 0 ||
cfg->group == 0))
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logFD);
if (priv->hookRun)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOOK, logFD);
if (obj->def->namespaceData) {
qemuDomainCmdlineDefPtr qemucmd = obj->def->namespaceData;
if (qemucmd->num_args || qemucmd->num_env)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CUSTOM_ARGV, logFD);
}
if (obj->def->cpu && obj->def->cpu->mode == VIR_CPU_MODE_HOST_PASSTHROUGH)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOST_CPU, logFD);
for (i = 0; i < obj->def->ndisks; i++)
qemuDomainObjCheckDiskTaint(driver, obj, obj->def->disks[i], logFD);
for (i = 0; i < obj->def->nnets; i++)
qemuDomainObjCheckNetTaint(driver, obj, obj->def->nets[i], logFD);
virObjectUnref(cfg);
}
void qemuDomainObjCheckDiskTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainDiskDefPtr disk,
int logFD)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int format = virDomainDiskGetFormat(disk);
if ((!format || format == VIR_STORAGE_FILE_AUTO) &&
cfg->allowDiskFormatProbing)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_DISK_PROBING, logFD);
if (disk->rawio == 1)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logFD);
virObjectUnref(cfg);
}
void qemuDomainObjCheckNetTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainNetDefPtr net,
int logFD)
{
config: report error when script given for inappropriate interface type This fixes https://bugzilla.redhat.com/show_bug.cgi?id=638633 Although scripts are not used by interfaces of type other than "ethernet" in qemu, due to the fact that the parser stores the script name in a union that is only valid when type is ethernet or bridge, there is no way for anyone except the parser itself to catch the problem of specifying an interface script for an inappropriate interface type (by the time the parsed data gets back to the code that called the parser, all evidence that a script was specified is forgotten). Since the parser itself should be agnostic to which type of interface allows scripts (an example of why: a script specified for an interface of type bridge is valid for xen domains, but not for qemu domains), the solution here is to move the script out of the union(s) in the DomainNetDef, always populate it when specified (regardless of interface type), and let the driver decide whether or not it is appropriate. Currently the qemu, xen, libxml, and uml drivers recognize the script parameter and do something with it (the uml driver only to report that it isn't supported). Those drivers have been updated to log a CONFIG_UNSUPPORTED error when a script is specified for an interface type that's inappropriate for that particular hypervisor. (NB: There was earlier discussion of solving this problem by adding a VALIDATE flag to all libvirt APIs that accept XML, which would cause the XML to be validated against the RNG files. One statement during that discussion was that the RNG shouldn't contain hypervisor-specific things, though, and a proper solution to this problem would require that (again, because a script for an interface of type "bridge" is accepted by xen, but not by qemu).
2012-01-06 17:59:47 +00:00
/* script is only useful for NET_TYPE_ETHERNET (qemu) and
* NET_TYPE_BRIDGE (xen), but could be (incorrectly) specified for
* any interface type. In any case, it's adding user sauce into
* the soup, so it should taint the domain.
*/
if (net->script != NULL)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_SHELL_SCRIPTS, logFD);
}
static int
qemuDomainOpenLogHelper(virQEMUDriverConfigPtr cfg,
virDomainObjPtr vm,
int oflags,
mode_t mode)
{
char *logfile;
int fd = -1;
bool trunc = false;
if (virAsprintf(&logfile, "%s/%s.log", cfg->logDir, vm->def->name) < 0)
return -1;
/* To make SELinux happy we always need to open in append mode.
* So we fake O_TRUNC by calling ftruncate after open instead
*/
if (oflags & O_TRUNC) {
oflags &= ~O_TRUNC;
oflags |= O_APPEND;
trunc = true;
}
if ((fd = open(logfile, oflags, mode)) < 0) {
virReportSystemError(errno, _("failed to create logfile %s"),
logfile);
goto cleanup;
}
if (virSetCloseExec(fd) < 0) {
virReportSystemError(errno, _("failed to set close-on-exec flag on %s"),
logfile);
VIR_FORCE_CLOSE(fd);
goto cleanup;
}
if (trunc &&
ftruncate(fd, 0) < 0) {
virReportSystemError(errno, _("failed to truncate %s"),
logfile);
VIR_FORCE_CLOSE(fd);
goto cleanup;
}
cleanup:
VIR_FREE(logfile);
return fd;
}
int
qemuDomainCreateLog(virQEMUDriverPtr driver, virDomainObjPtr vm,
bool append)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int oflags;
int ret;
oflags = O_CREAT | O_WRONLY;
/* Only logrotate files in /var/log, so only append if running privileged */
if (cfg->privileged || append)
oflags |= O_APPEND;
else
oflags |= O_TRUNC;
ret = qemuDomainOpenLogHelper(cfg, vm, oflags, S_IRUSR | S_IWUSR);
virObjectUnref(cfg);
return ret;
}
int
qemuDomainOpenLog(virQEMUDriverPtr driver, virDomainObjPtr vm, off_t pos)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int fd;
off_t off;
int whence;
fd = qemuDomainOpenLogHelper(cfg, vm, O_RDONLY, 0);
virObjectUnref(cfg);
if (fd < 0)
return -1;
if (pos < 0) {
off = 0;
whence = SEEK_END;
} else {
off = pos;
whence = SEEK_SET;
}
if (lseek(fd, off, whence) < 0) {
if (whence == SEEK_END)
virReportSystemError(errno,
_("unable to seek to end of log for %s"),
vm->def->name);
else
virReportSystemError(errno,
_("unable to seek to %lld from start for %s"),
(long long)off, vm->def->name);
VIR_FORCE_CLOSE(fd);
}
return fd;
}
int qemuDomainAppendLog(virQEMUDriverPtr driver,
virDomainObjPtr obj,
int logFD,
const char *fmt, ...)
{
int fd = logFD;
va_list argptr;
char *message = NULL;
int ret = -1;
va_start(argptr, fmt);
if ((fd == -1) &&
(fd = qemuDomainCreateLog(driver, obj, true)) < 0)
goto cleanup;
if (virVasprintf(&message, fmt, argptr) < 0)
goto cleanup;
if (safewrite(fd, message, strlen(message)) < 0) {
virReportSystemError(errno, _("Unable to write to domain logfile %s"),
obj->def->name);
goto cleanup;
}
ret = 0;
cleanup:
va_end(argptr);
if (fd != logFD)
VIR_FORCE_CLOSE(fd);
VIR_FREE(message);
return ret;
}
/* Locate an appropriate 'qemu-img' binary. */
const char *
qemuFindQemuImgBinary(virQEMUDriverPtr driver)
{
if (!driver->qemuImgBinary)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("unable to find kvm-img or qemu-img"));
return driver->qemuImgBinary;
}
int
qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
virDomainSnapshotObjPtr snapshot,
char *snapshotDir)
{
char *newxml = NULL;
int ret = -1;
char *snapDir = NULL;
char *snapFile = NULL;
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(vm->def->uuid, uuidstr);
newxml = virDomainSnapshotDefFormat(uuidstr, snapshot->def,
QEMU_DOMAIN_FORMAT_LIVE_FLAGS, 1);
if (newxml == NULL)
return -1;
if (virAsprintf(&snapDir, "%s/%s", snapshotDir, vm->def->name) < 0)
goto cleanup;
if (virFileMakePath(snapDir) < 0) {
virReportSystemError(errno, _("cannot create snapshot directory '%s'"),
snapDir);
goto cleanup;
}
if (virAsprintf(&snapFile, "%s/%s.xml", snapDir, snapshot->def->name) < 0)
goto cleanup;
ret = virXMLSaveFile(snapFile, NULL, "snapshot-edit", newxml);
cleanup:
VIR_FREE(snapFile);
VIR_FREE(snapDir);
VIR_FREE(newxml);
return ret;
}
/* The domain is expected to be locked and inactive. Return -1 on normal
* failure, 1 if we skipped a disk due to try_all. */
static int
qemuDomainSnapshotForEachQcow2Raw(virQEMUDriverPtr driver,
virDomainDefPtr def,
const char *name,
const char *op,
bool try_all,
int ndisks)
{
const char *qemuimgarg[] = { NULL, "snapshot", NULL, NULL, NULL, NULL };
size_t i;
bool skipped = false;
qemuimgarg[0] = qemuFindQemuImgBinary(driver);
if (qemuimgarg[0] == NULL) {
/* qemuFindQemuImgBinary set the error */
return -1;
}
qemuimgarg[2] = op;
qemuimgarg[3] = name;
for (i = 0; i < ndisks; i++) {
/* FIXME: we also need to handle LVM here */
if (def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
int format = virDomainDiskGetFormat(def->disks[i]);
if (format > 0 && format != VIR_STORAGE_FILE_QCOW2) {
if (try_all) {
/* Continue on even in the face of error, since other
* disks in this VM may have the same snapshot name.
*/
VIR_WARN("skipping snapshot action on %s",
def->disks[i]->dst);
skipped = true;
continue;
} else if (STREQ(op, "-c") && i) {
/* We must roll back partial creation by deleting
* all earlier snapshots. */
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
"-d", false, i);
}
virReportError(VIR_ERR_OPERATION_INVALID,
_("Disk device '%s' does not support"
" snapshotting"),
def->disks[i]->dst);
return -1;
}
qemuimgarg[4] = virDomainDiskGetSource(def->disks[i]);
if (virRun(qemuimgarg, NULL) < 0) {
if (try_all) {
VIR_WARN("skipping snapshot action on %s",
def->disks[i]->dst);
skipped = true;
continue;
} else if (STREQ(op, "-c") && i) {
/* We must roll back partial creation by deleting
* all earlier snapshots. */
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
"-d", false, i);
}
return -1;
}
}
}
return skipped ? 1 : 0;
}
/* The domain is expected to be locked and inactive. Return -1 on normal
* failure, 1 if we skipped a disk due to try_all. */
int
qemuDomainSnapshotForEachQcow2(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
const char *op,
bool try_all)
{
/* Prefer action on the disks in use at the time the snapshot was
* created; but fall back to current definition if dealing with a
* snapshot created prior to libvirt 0.9.5. */
virDomainDefPtr def = snap->def->dom;
if (!def)
def = vm->def;
return qemuDomainSnapshotForEachQcow2Raw(driver, def, snap->def->name,
op, try_all, def->ndisks);
}
/* Discard one snapshot (or its metadata), without reparenting any children. */
int
qemuDomainSnapshotDiscard(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
bool update_current,
bool metadata_only)
{
char *snapFile = NULL;
int ret = -1;
qemuDomainObjPrivatePtr priv;
virDomainSnapshotObjPtr parentsnap = NULL;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
if (!metadata_only) {
if (!virDomainObjIsActive(vm)) {
/* Ignore any skipped disks */
if (qemuDomainSnapshotForEachQcow2(driver, vm, snap, "-d",
true) < 0)
goto cleanup;
} else {
priv = vm->privateData;
qemuDomainObjEnterMonitor(driver, vm);
/* we continue on even in the face of error */
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
qemuDomainObjExitMonitor(driver, vm);
}
}
if (virAsprintf(&snapFile, "%s/%s/%s.xml", cfg->snapshotDir,
vm->def->name, snap->def->name) < 0)
goto cleanup;
if (snap == vm->current_snapshot) {
if (update_current && snap->def->parent) {
snapshot: make virDomainSnapshotObjList opaque We were failing to react to allocation failure when initializing a snapshot object list. Changing things to store a pointer instead of a complete object adds one more possible point of allocation failure, but at the same time, will make it easier to react to failure now, as well as making it easier for a future patch to split all virDomainSnapshotPtr handling into a separate file, as I continue to add even more snapshot code. Luckily, there was only one client outside of domain_conf.c that was actually peeking inside the object, and a new wrapper function was easy. * src/conf/domain_conf.h (_virDomainObj): Use a pointer. (virDomainSnapshotObjListInit): Rename. (virDomainSnapshotObjListFree, virDomainSnapshotForEach): New declarations. (_virDomainSnapshotObjList): Move definitions... * src/conf/domain_conf.c: ...here. (virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit): Rename... (virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to these. (virDomainSnapshotForEach): New function. (virDomainObjDispose, virDomainListPopulate): Adjust callers. * src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard) (qemuDomainSnapshotDiscardAllMetadata): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise. * src/qemu/qemu_driver.c (qemuDomainSnapshotLoad) (qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML) (qemuDomainSnapshotListNames, qemuDomainSnapshotNum) (qemuDomainListAllSnapshots) (qemuDomainSnapshotListChildrenNames) (qemuDomainSnapshotNumChildren) (qemuDomainSnapshotListAllChildren) (qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent) (qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent) (qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot) (qemuDomainSnapshotDelete): Likewise. * src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
parentsnap = virDomainSnapshotFindByName(vm->snapshots,
snap->def->parent);
if (!parentsnap) {
VIR_WARN("missing parent snapshot matching name '%s'",
snap->def->parent);
} else {
parentsnap->def->current = true;
if (qemuDomainSnapshotWriteMetadata(vm, parentsnap,
cfg->snapshotDir) < 0) {
VIR_WARN("failed to set parent snapshot '%s' as current",
snap->def->parent);
parentsnap->def->current = false;
parentsnap = NULL;
}
}
}
vm->current_snapshot = parentsnap;
}
if (unlink(snapFile) < 0)
VIR_WARN("Failed to unlink %s", snapFile);
snapshot: make virDomainSnapshotObjList opaque We were failing to react to allocation failure when initializing a snapshot object list. Changing things to store a pointer instead of a complete object adds one more possible point of allocation failure, but at the same time, will make it easier to react to failure now, as well as making it easier for a future patch to split all virDomainSnapshotPtr handling into a separate file, as I continue to add even more snapshot code. Luckily, there was only one client outside of domain_conf.c that was actually peeking inside the object, and a new wrapper function was easy. * src/conf/domain_conf.h (_virDomainObj): Use a pointer. (virDomainSnapshotObjListInit): Rename. (virDomainSnapshotObjListFree, virDomainSnapshotForEach): New declarations. (_virDomainSnapshotObjList): Move definitions... * src/conf/domain_conf.c: ...here. (virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit): Rename... (virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to these. (virDomainSnapshotForEach): New function. (virDomainObjDispose, virDomainListPopulate): Adjust callers. * src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard) (qemuDomainSnapshotDiscardAllMetadata): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise. * src/qemu/qemu_driver.c (qemuDomainSnapshotLoad) (qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML) (qemuDomainSnapshotListNames, qemuDomainSnapshotNum) (qemuDomainListAllSnapshots) (qemuDomainSnapshotListChildrenNames) (qemuDomainSnapshotNumChildren) (qemuDomainSnapshotListAllChildren) (qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent) (qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent) (qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot) (qemuDomainSnapshotDelete): Likewise. * src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
virDomainSnapshotObjListRemove(vm->snapshots, snap);
ret = 0;
cleanup:
VIR_FREE(snapFile);
virObjectUnref(cfg);
return ret;
}
/* Hash iterator callback to discard multiple snapshots. */
void qemuDomainSnapshotDiscardAll(void *payload,
const void *name ATTRIBUTE_UNUSED,
void *data)
{
virDomainSnapshotObjPtr snap = payload;
virQEMUSnapRemovePtr curr = data;
int err;
if (snap->def->current)
curr->current = true;
err = qemuDomainSnapshotDiscard(curr->driver, curr->vm, snap, false,
curr->metadata_only);
if (err && !curr->err)
curr->err = err;
}
int
qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
virQEMUSnapRemove rem;
rem.driver = driver;
rem.vm = vm;
rem.metadata_only = true;
rem.err = 0;
snapshot: make virDomainSnapshotObjList opaque We were failing to react to allocation failure when initializing a snapshot object list. Changing things to store a pointer instead of a complete object adds one more possible point of allocation failure, but at the same time, will make it easier to react to failure now, as well as making it easier for a future patch to split all virDomainSnapshotPtr handling into a separate file, as I continue to add even more snapshot code. Luckily, there was only one client outside of domain_conf.c that was actually peeking inside the object, and a new wrapper function was easy. * src/conf/domain_conf.h (_virDomainObj): Use a pointer. (virDomainSnapshotObjListInit): Rename. (virDomainSnapshotObjListFree, virDomainSnapshotForEach): New declarations. (_virDomainSnapshotObjList): Move definitions... * src/conf/domain_conf.c: ...here. (virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit): Rename... (virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to these. (virDomainSnapshotForEach): New function. (virDomainObjDispose, virDomainListPopulate): Adjust callers. * src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard) (qemuDomainSnapshotDiscardAllMetadata): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise. * src/qemu/qemu_driver.c (qemuDomainSnapshotLoad) (qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML) (qemuDomainSnapshotListNames, qemuDomainSnapshotNum) (qemuDomainListAllSnapshots) (qemuDomainSnapshotListChildrenNames) (qemuDomainSnapshotNumChildren) (qemuDomainSnapshotListAllChildren) (qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent) (qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent) (qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot) (qemuDomainSnapshotDelete): Likewise. * src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
virDomainSnapshotForEach(vm->snapshots, qemuDomainSnapshotDiscardAll,
&rem);
return rem.err;
}
/*
* The caller must hold a lock the vm and there must
* be no remaining references to vm.
*/
void
qemuDomainRemoveInactive(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
char *snapDir;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
/* Remove any snapshot metadata prior to removing the domain */
if (qemuDomainSnapshotDiscardAllMetadata(driver, vm) < 0) {
VIR_WARN("unable to remove all snapshots for domain %s",
vm->def->name);
}
else if (virAsprintf(&snapDir, "%s/%s", cfg->snapshotDir,
vm->def->name) < 0) {
VIR_WARN("unable to remove snapshot directory %s/%s",
cfg->snapshotDir, vm->def->name);
} else {
if (rmdir(snapDir) < 0 && errno != ENOENT)
VIR_WARN("unable to remove snapshot directory %s", snapDir);
VIR_FREE(snapDir);
}
virDomainObjListRemove(driver->domains, vm);
virObjectUnref(cfg);
}
void
qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool value)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
if (priv->fakeReboot == value)
goto cleanup;
priv->fakeReboot = value;
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0)
VIR_WARN("Failed to save status on vm %s", vm->def->name);
cleanup:
virObjectUnref(cfg);
}
static int
qemuDomainCheckRemoveOptionalDisk(virQEMUDriverPtr driver,
virDomainObjPtr vm,
size_t diskIndex)
{
char uuid[VIR_UUID_STRING_BUFLEN];
virObjectEventPtr event = NULL;
virDomainDiskDefPtr disk = vm->def->disks[diskIndex];
const char *src = virDomainDiskGetSource(disk);
virUUIDFormat(vm->def->uuid, uuid);
VIR_DEBUG("Dropping disk '%s' on domain '%s' (UUID '%s') "
"due to inaccessible source '%s'",
disk->dst, vm->def->name, uuid, src);
if (disk->device == VIR_DOMAIN_DISK_DEVICE_CDROM ||
disk->device == VIR_DOMAIN_DISK_DEVICE_FLOPPY) {
event = virDomainEventDiskChangeNewFromObj(vm, src, NULL,
disk->info.alias,
VIR_DOMAIN_EVENT_DISK_CHANGE_MISSING_ON_START);
ignore_value(virDomainDiskSetSource(disk, NULL));
} else {
event = virDomainEventDiskChangeNewFromObj(vm, src, NULL,
disk->info.alias,
VIR_DOMAIN_EVENT_DISK_DROP_MISSING_ON_START);
virDomainDiskRemove(vm->def, diskIndex);
virDomainDiskDefFree(disk);
}
if (event)
qemuDomainEventQueue(driver, event);
return 0;
}
static int
qemuDomainCheckDiskStartupPolicy(virQEMUDriverPtr driver,
virDomainObjPtr vm,
size_t diskIndex,
bool cold_boot)
{
char uuid[VIR_UUID_STRING_BUFLEN];
int startupPolicy = vm->def->disks[diskIndex]->startupPolicy;
virUUIDFormat(vm->def->uuid, uuid);
switch ((virDomainStartupPolicy) startupPolicy) {
case VIR_DOMAIN_STARTUP_POLICY_OPTIONAL:
break;
case VIR_DOMAIN_STARTUP_POLICY_MANDATORY:
goto error;
case VIR_DOMAIN_STARTUP_POLICY_REQUISITE:
if (cold_boot)
goto error;
break;
case VIR_DOMAIN_STARTUP_POLICY_DEFAULT:
case VIR_DOMAIN_STARTUP_POLICY_LAST:
/* this should never happen */
break;
}
if (qemuDomainCheckRemoveOptionalDisk(driver, vm, diskIndex) < 0)
goto error;
return 0;
error:
return -1;
}
static int
qemuDiskChainCheckBroken(virDomainDiskDefPtr disk)
{
char *brokenFile = NULL;
if (!virDomainDiskGetSource(disk))
return 0;
if (virStorageFileChainGetBroken(disk->src, &brokenFile) < 0)
return -1;
if (brokenFile) {
virReportError(VIR_ERR_INVALID_ARG,
_("Backing file '%s' of image '%s' is missing."),
brokenFile, virDomainDiskGetSource(disk));
VIR_FREE(brokenFile);
return -1;
}
return 0;
}
int
qemuDomainCheckDiskPresence(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool cold_boot)
{
int ret = -1;
size_t i;
VIR_DEBUG("Checking for disk presence");
for (i = vm->def->ndisks; i > 0; i--) {
size_t idx = i - 1;
virDomainDiskDefPtr disk = vm->def->disks[idx];
const char *path = virDomainDiskGetSource(disk);
virStorageFileFormat format = virDomainDiskGetFormat(disk);
virStorageType type = virStorageSourceGetActualType(disk->src);
if (!path)
continue;
/* There is no need to check the backing chain for disks
* without backing support, the fact that the file exists is
* more than enough */
if (type != VIR_STORAGE_TYPE_NETWORK &&
format >= VIR_STORAGE_FILE_NONE &&
format < VIR_STORAGE_FILE_BACKING &&
virFileExists(path))
continue;
if (qemuDomainDetermineDiskChain(driver, vm, disk, false) >= 0 &&
qemuDiskChainCheckBroken(disk) >= 0)
continue;
if (disk->startupPolicy &&
qemuDomainCheckDiskStartupPolicy(driver, vm, idx,
cold_boot) >= 0) {
virResetLastError();
continue;
}
goto error;
}
ret = 0;
error:
return ret;
}
/*
* The vm must be locked when any of the following cleanup functions is
* called.
*/
int
qemuDomainCleanupAdd(virDomainObjPtr vm,
qemuDomainCleanupCallback cb)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
for (i = 0; i < priv->ncleanupCallbacks; i++) {
if (priv->cleanupCallbacks[i] == cb)
return 0;
}
if (VIR_RESIZE_N(priv->cleanupCallbacks,
priv->ncleanupCallbacks_max,
priv->ncleanupCallbacks, 1) < 0)
return -1;
priv->cleanupCallbacks[priv->ncleanupCallbacks++] = cb;
return 0;
}
void
qemuDomainCleanupRemove(virDomainObjPtr vm,
qemuDomainCleanupCallback cb)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
for (i = 0; i < priv->ncleanupCallbacks; i++) {
if (priv->cleanupCallbacks[i] == cb)
VIR_DELETE_ELEMENT_INPLACE(priv->cleanupCallbacks,
i, priv->ncleanupCallbacks);
}
VIR_SHRINK_N(priv->cleanupCallbacks,
priv->ncleanupCallbacks_max,
priv->ncleanupCallbacks_max - priv->ncleanupCallbacks);
}
void
qemuDomainCleanupRun(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
VIR_DEBUG("driver=%p, vm=%s", driver, vm->def->name);
/* run cleanup callbacks in reverse order */
for (i = 0; i < priv->ncleanupCallbacks; i++) {
if (priv->cleanupCallbacks[priv->ncleanupCallbacks - (i + 1)])
priv->cleanupCallbacks[i](driver, vm);
}
VIR_FREE(priv->cleanupCallbacks);
priv->ncleanupCallbacks = 0;
priv->ncleanupCallbacks_max = 0;
}
static void
qemuDomainGetImageIds(virQEMUDriverConfigPtr cfg,
virDomainObjPtr vm,
virDomainDiskDefPtr disk,
uid_t *uid, gid_t *gid)
{
virSecurityLabelDefPtr vmlabel;
virSecurityDeviceLabelDefPtr disklabel;
if (uid)
*uid = -1;
if (gid)
*gid = -1;
if (cfg) {
if (uid)
*uid = cfg->user;
if (gid)
*gid = cfg->group;
}
if (vm && (vmlabel = virDomainDefGetSecurityLabelDef(vm->def, "dac")) &&
vmlabel->label)
virParseOwnershipIds(vmlabel->label, uid, gid);
if ((disklabel = virStorageSourceGetSecurityLabelDef(disk->src, "dac")) &&
qemuDomainGetImageIds: Skip <seclabel/> without label It's easy to shed the daemon these days. With this XML snippet: <disk type='file' device='disk'> <driver name='qemu' type='raw'/> <source file='/some/dummy/path/test.bin'> <seclabel model='dac' relabel='no'/> </source> <target dev='vdb' bus='virtio'/> <readonly/> <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/> </disk> I get the SIGSEGV when starting the domain. The thing is, when starting a domain, we check for its disk presence. For some reason, when determining the disk chain, we parse the <seclabel/> (don't ask me why). However, there's no label attribute in the XML, so we end up calling virParseOwnershipIds() over NULL string: [Switching to Thread 0x7ffff10c4700 (LWP 30956)] __strchr_sse42 () at ../sysdeps/x86_64/multiarch/strchr.S:136 136 ../sysdeps/x86_64/multiarch/strchr.S: No such file or directory. (gdb) bt #0 __strchr_sse42 () at ../sysdeps/x86_64/multiarch/strchr.S:136 #1 0x00007ffff749f800 in virParseOwnershipIds (label=0x0, uidPtr=uidPtr@entry=0x7ffff10c2df0, gidPtr=gidPtr@entry=0x7ffff10c2df4) at util/virutil.c:2115 #2 0x00007fffe929f006 in qemuDomainGetImageIds (gid=0x7ffff10c2df4, uid=0x7ffff10c2df0, disk=0x7fffe40cb000, vm=0x7fffe40a6410, cfg=0x7fffe409ae00) at qemu/qemu_domain.c:2385 #3 qemuDomainDetermineDiskChain (driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, disk=disk@entry=0x7fffe40cb000, force=force@entry=false) at qemu/qemu_domain.c:2414 #4 0x00007fffe929f128 in qemuDomainCheckDiskPresence (driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, cold_boot=cold_boot@entry=true) at qemu/qemu_domain.c:2250 #5 0x00007fffe92b6fc8 in qemuProcessStart (conn=conn@entry=0x7fffd4000b60, driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, migrateFrom=migrateFrom@entry=0x0, stdin_fd=stdin_fd@entry=-1, stdin_path=stdin_path@entry=0x0, snapshot=snapshot@entry=0x0, vmop=vmop@entry=VIR_NETDEV_VPORT_PROFILE_OP_CREATE, flags=flags@entry=1) at qemu/qemu_process.c:3813 #6 0x00007fffe93087e8 in qemuDomainObjStart (conn=0x7fffd4000b60, driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, flags=flags@entry=0) at qemu/qemu_driver.c:6051 #7 0x00007fffe9308e32 in qemuDomainCreateWithFlags (dom=0x7fffcc000d50, flags=0) at qemu/qemu_driver.c:6105 #8 0x00007ffff753c5cc in virDomainCreate (domain=domain@entry=0x7fffcc000d50) at libvirt.c:8861 Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2014-03-24 15:44:17 +00:00
disklabel->label)
virParseOwnershipIds(disklabel->label, uid, gid);
}
int
qemuDomainDetermineDiskChain(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainDiskDefPtr disk,
bool force)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int ret = 0;
uid_t uid;
gid_t gid;
int type = virStorageSourceGetActualType(disk->src);
if (type != VIR_STORAGE_TYPE_NETWORK &&
!disk->src->path)
goto cleanup;
if (disk->src->backingStore) {
if (force)
virStorageSourceClearBackingStore(disk->src);
else
goto cleanup;
}
qemuDomainGetImageIds(cfg, vm, disk, &uid, &gid);
if (virStorageFileGetMetadata(disk->src,
uid, gid,
cfg->allowDiskFormatProbing) < 0)
ret = -1;
cleanup:
virObjectUnref(cfg);
return ret;
}
int
qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char **aliases;
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT))
return 0;
qemuDomainObjEnterMonitor(driver, vm);
if (qemuMonitorGetDeviceAliases(priv->mon, &aliases) < 0) {
qemuDomainObjExitMonitor(driver, vm);
return -1;
}
qemuDomainObjExitMonitor(driver, vm);
virStringFreeList(priv->qemuDevices);
priv->qemuDevices = aliases;
return 0;
}
bool
qemuDomainDefCheckABIStability(virQEMUDriverPtr driver,
virDomainDefPtr src,
virDomainDefPtr dst)
{
virDomainDefPtr migratableDefSrc = NULL;
virDomainDefPtr migratableDefDst = NULL;
const int flags = VIR_DOMAIN_XML_SECURE | VIR_DOMAIN_XML_UPDATE_CPU | VIR_DOMAIN_XML_MIGRATABLE;
bool ret = false;
if (!(migratableDefSrc = qemuDomainDefCopy(driver, src, flags)) ||
!(migratableDefDst = qemuDomainDefCopy(driver, dst, flags)))
goto cleanup;
ret = virDomainDefCheckABIStability(migratableDefSrc, migratableDefDst);
cleanup:
virDomainDefFree(migratableDefSrc);
virDomainDefFree(migratableDefDst);
return ret;
}
bool
qemuDomainAgentAvailable(qemuDomainObjPrivatePtr priv,
bool reportError)
{
if (priv->agentError) {
if (reportError) {
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
_("QEMU guest agent is not "
"available due to an error"));
}
return false;
}
if (!priv->agent) {
if (reportError) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("QEMU guest agent is not configured"));
}
return false;
}
return true;
}