2011-02-14 16:09:39 +00:00
|
|
|
/*
|
|
|
|
* qemu_process.h: QEMU process management
|
|
|
|
*
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
* Copyright (C) 2006-2012 Red Hat, Inc.
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-07-21 10:06:23 +00:00
|
|
|
* License along with this library; If not, see
|
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <sys/stat.h>
|
2011-04-05 12:17:28 +00:00
|
|
|
#include <sys/time.h>
|
|
|
|
#include <sys/resource.h>
|
2012-01-31 04:52:00 +00:00
|
|
|
#include <linux/capability.h>
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
#include "qemu_process.h"
|
|
|
|
#include "qemu_domain.h"
|
|
|
|
#include "qemu_cgroup.h"
|
|
|
|
#include "qemu_capabilities.h"
|
|
|
|
#include "qemu_monitor.h"
|
|
|
|
#include "qemu_command.h"
|
|
|
|
#include "qemu_hostdev.h"
|
|
|
|
#include "qemu_hotplug.h"
|
|
|
|
#include "qemu_bridge_filter.h"
|
2011-07-19 00:27:33 +00:00
|
|
|
#include "qemu_migration.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-06-20 07:16:16 +00:00
|
|
|
#if HAVE_NUMACTL
|
2011-06-24 03:53:08 +00:00
|
|
|
# define NUMA_VERSION1_COMPATIBILITY 1
|
2011-06-20 07:16:16 +00:00
|
|
|
# include <numa.h>
|
|
|
|
#endif
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "datatypes.h"
|
|
|
|
#include "logging.h"
|
|
|
|
#include "virterror_internal.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "hooks.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2011-08-05 13:13:12 +00:00
|
|
|
#include "virpidfile.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "util.h"
|
|
|
|
#include "c-ctype.h"
|
|
|
|
#include "nodeinfo.h"
|
|
|
|
#include "processinfo.h"
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "domain_nwfilter.h"
|
2010-10-26 14:04:46 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2011-07-04 06:27:12 +00:00
|
|
|
#include "network/bridge_driver.h"
|
2011-06-23 09:37:57 +00:00
|
|
|
#include "uuid.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2012-02-10 21:09:00 +00:00
|
|
|
#include "virnetdevtap.h"
|
2012-09-14 07:47:00 +00:00
|
|
|
#include "bitmap.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
|
|
|
#define START_POSTFIX ": starting up\n"
|
2011-05-05 16:32:21 +00:00
|
|
|
#define ATTACH_POSTFIX ": attaching\n"
|
2011-02-14 16:09:39 +00:00
|
|
|
#define SHUTDOWN_POSTFIX ": shutting down\n"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemudRemoveDomainStatus
|
|
|
|
*
|
|
|
|
* remove all state files of a domain from statedir
|
|
|
|
*
|
|
|
|
* Returns 0 on success
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessRemoveDomainStatus(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
char ebuf[1024];
|
|
|
|
char *file = NULL;
|
2011-06-17 13:43:54 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (virAsprintf(&file, "%s/%s.xml", driver->stateDir, vm->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
2012-03-22 11:33:35 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlink(file) < 0 && errno != ENOENT && errno != ENOTDIR)
|
|
|
|
VIR_WARN("Failed to remove domain XML for %s: %s",
|
|
|
|
vm->def->name, virStrerror(errno, ebuf, sizeof(ebuf)));
|
|
|
|
VIR_FREE(file);
|
|
|
|
|
2011-06-17 13:43:54 +00:00
|
|
|
if (priv->pidfile &&
|
|
|
|
unlink(priv->pidfile) < 0 &&
|
|
|
|
errno != ENOENT)
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_WARN("Failed to remove PID file for %s: %s",
|
|
|
|
vm->def->name, virStrerror(errno, ebuf, sizeof(ebuf)));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* XXX figure out how to remove this */
|
|
|
|
extern struct qemud_driver *qemu_driver;
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/*
|
|
|
|
* This is a callback registered with a qemuAgentPtr instance,
|
|
|
|
* and to be invoked when the agent console hits an end of file
|
|
|
|
* condition, or error, thus indicating VM shutdown should be
|
|
|
|
* performed
|
|
|
|
*/
|
|
|
|
static void
|
2012-09-14 08:53:00 +00:00
|
|
|
qemuProcessHandleAgentEOF(qemuAgentPtr agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
VIR_DEBUG("Received EOF from agent on %p '%s'", vm, vm->def->name);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
priv->agent = NULL;
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
2012-09-14 08:53:00 +00:00
|
|
|
|
|
|
|
qemuAgentClose(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is invoked when there is some kind of error
|
|
|
|
* parsing data to/from the agent. The VM can continue
|
|
|
|
* to run, but no further agent commands will be
|
|
|
|
* allowed
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessHandleAgentError(qemuAgentPtr agent ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
VIR_DEBUG("Received error from agent on %p '%s'", vm, vm->def->name);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
priv->agentError = true;
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qemuProcessHandleAgentDestroy(qemuAgentPtr agent,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
2012-06-15 14:14:38 +00:00
|
|
|
VIR_DEBUG("Received destroy agent=%p vm=%p", agent, vm);
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjLock(vm);
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->agent == agent)
|
|
|
|
priv->agent = NULL;
|
2012-07-11 13:35:46 +00:00
|
|
|
if (virObjectUnref(vm))
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuAgentCallbacks agentCallbacks = {
|
|
|
|
.destroy = qemuProcessHandleAgentDestroy,
|
|
|
|
.eofNotify = qemuProcessHandleAgentEOF,
|
|
|
|
.errorNotify = qemuProcessHandleAgentError,
|
|
|
|
};
|
|
|
|
|
|
|
|
static virDomainChrSourceDefPtr
|
|
|
|
qemuFindAgentConfig(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
virDomainChrSourceDefPtr config = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < def->nchannels ; i++) {
|
|
|
|
virDomainChrDefPtr channel = def->channels[i];
|
|
|
|
|
|
|
|
if (channel->targetType != VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO)
|
|
|
|
continue;
|
|
|
|
|
2012-02-16 02:32:03 +00:00
|
|
|
if (STREQ_NULLABLE(channel->target.name, "org.qemu.guest_agent.0")) {
|
2011-10-05 17:31:54 +00:00
|
|
|
config = &channel->source;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return config;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuConnectAgent(struct qemud_driver *driver, virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
qemuAgentPtr agent = NULL;
|
|
|
|
virDomainChrSourceDefPtr config = qemuFindAgentConfig(vm->def);
|
|
|
|
|
|
|
|
if (!config)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virSecurityManagerSetDaemonSocketLabel(driver->securityManager,
|
|
|
|
vm->def) < 0) {
|
|
|
|
VIR_ERROR(_("Failed to set security context for agent for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted while the agent is active */
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
ignore_value(virTimeMillisNow(&priv->agentStart));
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
agent = qemuAgentOpen(vm,
|
|
|
|
config,
|
|
|
|
&agentCallbacks);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
priv->agentStart = 0;
|
|
|
|
|
|
|
|
if (virSecurityManagerClearSocketLabel(driver->securityManager,
|
|
|
|
vm->def) < 0) {
|
|
|
|
VIR_ERROR(_("Failed to clear security context for agent for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (agent == NULL)
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuAgentClose(agent);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
priv->agent = agent;
|
|
|
|
|
|
|
|
if (priv->agent == NULL) {
|
|
|
|
VIR_INFO("Failed to connect agent for %s", vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/*
|
2011-09-21 19:08:51 +00:00
|
|
|
* This is a callback registered with a qemuMonitorPtr instance,
|
2011-02-14 16:09:39 +00:00
|
|
|
* and to be invoked when the monitor console hits an end of file
|
|
|
|
* condition, or error, thus indicating VM shutdown should be
|
|
|
|
* performed
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2011-05-29 12:37:29 +00:00
|
|
|
virDomainObjPtr vm)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-05-29 12:37:29 +00:00
|
|
|
int eventReason = VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN;
|
|
|
|
int stopReason = VIR_DOMAIN_SHUTOFF_SHUTDOWN;
|
|
|
|
const char *auditReason = "shutdown";
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Received EOF on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2011-03-03 18:21:07 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjLock(vm);
|
|
|
|
|
2011-12-09 14:33:13 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->beingDestroyed) {
|
|
|
|
VIR_DEBUG("Domain is being destroyed, EOF is expected");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("Domain %p is not active, ignoring EOF", vm);
|
2011-12-09 14:33:13 +00:00
|
|
|
goto unlock;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-05-29 12:37:29 +00:00
|
|
|
if (priv->monJSON && !priv->gotShutdown) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Monitor connection to '%s' closed without SHUTDOWN event; "
|
|
|
|
"assuming the domain crashed", vm->def->name);
|
2011-05-29 12:37:29 +00:00
|
|
|
eventReason = VIR_DOMAIN_EVENT_STOPPED_FAILED;
|
2011-09-13 16:35:21 +00:00
|
|
|
stopReason = VIR_DOMAIN_SHUTOFF_CRASHED;
|
2011-05-29 12:37:29 +00:00
|
|
|
auditReason = "failed";
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
2011-05-29 12:37:29 +00:00
|
|
|
eventReason);
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, vm, stopReason, 0);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, auditReason);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-12-09 14:33:13 +00:00
|
|
|
if (!vm->persistent) {
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-12-09 14:33:13 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
virDomainObjUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-12-09 14:33:13 +00:00
|
|
|
cleanup:
|
2011-05-29 12:37:29 +00:00
|
|
|
if (event)
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-05-29 12:37:29 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is invoked when there is some kind of error
|
|
|
|
* parsing data to/from the monitor. The VM can continue
|
|
|
|
* to run, but no further monitor commands will be
|
|
|
|
* allowed
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessHandleMonitorError(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
|
|
|
|
VIR_DEBUG("Received error on %p '%s'", vm, vm->def->name);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
|
2011-05-31 16:34:20 +00:00
|
|
|
((qemuDomainObjPrivatePtr) vm->privateData)->monError = true;
|
2011-05-29 12:37:29 +00:00
|
|
|
event = virDomainEventControlErrorNewFromObj(vm);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
2011-03-03 18:21:07 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static virDomainDiskDefPtr
|
|
|
|
qemuProcessFindDomainDiskByPath(virDomainObjPtr vm,
|
|
|
|
const char *path)
|
|
|
|
{
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
int i = virDomainDiskIndexByName(vm->def, path, true);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
if (i >= 0)
|
|
|
|
return vm->def->disks[i];
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no disk found with path %s"),
|
|
|
|
path);
|
2011-02-14 16:09:39 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static virDomainDiskDefPtr
|
|
|
|
qemuProcessFindDomainDiskByAlias(virDomainObjPtr vm,
|
|
|
|
const char *alias)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (STRPREFIX(alias, QEMU_DRIVE_HOST_PREFIX))
|
|
|
|
alias += strlen(QEMU_DRIVE_HOST_PREFIX);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
|
|
|
|
disk = vm->def->disks[i];
|
|
|
|
if (disk->info.alias != NULL && STREQ(disk->info.alias, alias))
|
|
|
|
return disk;
|
|
|
|
}
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no disk found with alias %s"),
|
|
|
|
alias);
|
2011-02-14 16:09:39 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetVolumeQcowPassphrase(virConnectPtr conn,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
char **secretRet,
|
|
|
|
size_t *secretLen)
|
|
|
|
{
|
|
|
|
virSecretPtr secret;
|
|
|
|
char *passphrase;
|
|
|
|
unsigned char *data;
|
|
|
|
size_t size;
|
|
|
|
int ret = -1;
|
|
|
|
virStorageEncryptionPtr enc;
|
|
|
|
|
|
|
|
if (!disk->encryption) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("disk %s does not have any encryption information"),
|
|
|
|
disk->src);
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
enc = disk->encryption;
|
|
|
|
|
|
|
|
if (!conn) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("cannot find secrets without a connection"));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->secretDriver == NULL ||
|
|
|
|
conn->secretDriver->lookupByUUID == NULL ||
|
|
|
|
conn->secretDriver->getValue == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("secret storage not supported"));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enc->format != VIR_STORAGE_ENCRYPTION_FORMAT_QCOW ||
|
|
|
|
enc->nsecrets != 1 ||
|
|
|
|
enc->secrets[0]->type !=
|
|
|
|
VIR_STORAGE_ENCRYPTION_SECRET_TYPE_PASSPHRASE) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("invalid <encryption> for volume %s"), disk->src);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
secret = conn->secretDriver->lookupByUUID(conn,
|
|
|
|
enc->secrets[0]->uuid);
|
|
|
|
if (secret == NULL)
|
|
|
|
goto cleanup;
|
libvirt: do not mix internal flags into public API
There were two API in driver.c that were silently masking flags
bits prior to calling out to the drivers, and several others
that were explicitly masking flags bits. This is not
forward-compatible - if we ever have that many flags in the
future, then talking to an old server that masks out the
flags would be indistinguishable from talking to a new server
that can honor the flag. In general, libvirt.c should forward
_all_ flags on to drivers, and only the drivers should reject
unknown flags.
In the case of virDrvSecretGetValue, the solution is to separate
the internal driver callback function to have two parameters
instead of one, with only one parameter affected by the public
API. In the case of virDomainGetXMLDesc, it turns out that
no one was ever mixing VIR_DOMAIN_XML_INTERNAL_STATUS with
the dumpxml path in the first place; that internal flag was
only used in saving and restoring state files, which happened
to be in functions internal to a single file, so there is no
mixing of the internal flag with a public flags argument.
Additionally, virDomainMemoryStats passed a flags argument
over RPC, but not to the driver.
* src/driver.h (VIR_DOMAIN_XML_FLAGS_MASK)
(VIR_SECRET_GET_VALUE_FLAGS_MASK): Delete.
(virDrvSecretGetValue): Separate out internal flags.
(virDrvDomainMemoryStats): Provide missing flags argument.
* src/driver.c (verify): Drop unused check.
* src/conf/domain_conf.h (virDomainObjParseFile): Delete
declaration.
(virDomainXMLInternalFlags): Move...
* src/conf/domain_conf.c: ...here. Delete redundant include.
(virDomainObjParseFile): Make static.
* src/libvirt.c (virDomainGetXMLDesc, virSecretGetValue): Update
clients.
(virDomainMemoryPeek, virInterfaceGetXMLDesc)
(virDomainMemoryStats, virDomainBlockPeek, virNetworkGetXMLDesc)
(virStoragePoolGetXMLDesc, virStorageVolGetXMLDesc)
(virNodeNumOfDevices, virNodeListDevices, virNWFilterGetXMLDesc):
Don't mask unknown flags.
* src/interface/netcf_driver.c (interfaceGetXMLDesc): Reject
unknown flags.
* src/secret/secret_driver.c (secretGetValue): Update clients.
* src/remote/remote_driver.c (remoteSecretGetValue)
(remoteDomainMemoryStats): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainMemoryStats): Likewise.
* daemon/remote.c (remoteDispatchDomainMemoryStats): Likewise.
2011-07-13 21:31:56 +00:00
|
|
|
data = conn->secretDriver->getValue(secret, &size, 0,
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_SECRET_GET_VALUE_INTERNAL_CALL);
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(secret);
|
2011-02-14 16:09:39 +00:00
|
|
|
if (data == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (memchr(data, '\0', size) != NULL) {
|
|
|
|
memset(data, 0, size);
|
|
|
|
VIR_FREE(data);
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("format='qcow' passphrase for %s must not contain a "
|
|
|
|
"'\\0'"), disk->src);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_ALLOC_N(passphrase, size + 1) < 0) {
|
|
|
|
memset(data, 0, size);
|
|
|
|
VIR_FREE(data);
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
memcpy(passphrase, data, size);
|
|
|
|
passphrase[size] = '\0';
|
|
|
|
|
|
|
|
memset(data, 0, size);
|
|
|
|
VIR_FREE(data);
|
|
|
|
|
|
|
|
*secretRet = passphrase;
|
|
|
|
*secretLen = size;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *path,
|
|
|
|
char **secretRet,
|
|
|
|
size_t *secretLen)
|
|
|
|
{
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
disk = qemuProcessFindDomainDiskByPath(vm, path);
|
|
|
|
|
|
|
|
if (!disk)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = qemuProcessGetVolumeQcowPassphrase(conn, disk, secretRet, secretLen);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleReset(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event;
|
2012-09-04 10:01:43 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventRebootNewFromObj(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_RESET);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
/*
|
|
|
|
* Since we have the '-no-shutdown' flag set, the
|
|
|
|
* QEMU process will currently have guest OS shutdown
|
|
|
|
* and the CPUS stopped. To fake the reboot, we thus
|
|
|
|
* want todo a reset of the virtual hardware, followed
|
|
|
|
* by restart of the CPUs. This should result in the
|
|
|
|
* guest OS booting up again
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessFakeReboot(void *opaque)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
VIR_DEBUG("vm=%p", vm);
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
if (qemuMonitorSystemReset(priv->mon) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, NULL,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_BOOTED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-06-15 16:49:58 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2011-09-13 16:48:13 +00:00
|
|
|
priv->gotShutdown = false;
|
2011-06-15 16:49:58 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_UNPAUSED);
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
endjob:
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuDomainObjEndJob(driver, vm))
|
2011-06-15 16:49:58 +00:00
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm) {
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
if (ret == -1) {
|
|
|
|
ignore_value(qemuProcessKill(driver, vm,
|
|
|
|
VIR_QEMU_PROCESS_KILL_FORCE));
|
|
|
|
}
|
2012-07-11 13:35:46 +00:00
|
|
|
if (virObjectUnref(vm))
|
2011-06-15 16:49:58 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
}
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
static void
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuProcessShutdownOrReboot(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2011-06-15 16:49:58 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->fakeReboot) {
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuDomainSetFakeReboot(driver, vm, false);
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
virThread th;
|
|
|
|
if (virThreadCreate(&th,
|
|
|
|
false,
|
|
|
|
qemuProcessFakeReboot,
|
|
|
|
vm) < 0) {
|
2011-06-24 11:20:20 +00:00
|
|
|
VIR_ERROR(_("Failed to create reboot thread, killing domain"));
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
ignore_value(qemuProcessKill(driver, vm,
|
|
|
|
VIR_QEMU_PROCESS_KILL_NOWAIT));
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
ignore_value(qemuProcessKill(driver, vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
2011-09-27 12:56:17 +00:00
|
|
|
}
|
2011-09-13 16:11:26 +00:00
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleShutdown(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2011-11-30 14:31:45 +00:00
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
VIR_DEBUG("vm=%p", vm);
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->gotShutdown) {
|
|
|
|
VIR_DEBUG("Ignoring repeated SHUTDOWN event from domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
goto unlock;
|
2011-12-07 11:52:59 +00:00
|
|
|
} else if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("Ignoring SHUTDOWN event from inactive domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
goto unlock;
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
|
|
|
priv->gotShutdown = true;
|
|
|
|
|
|
|
|
VIR_DEBUG("Transitioned guest %s to shutdown state",
|
|
|
|
vm->def->name);
|
|
|
|
virDomainObjSetState(vm,
|
|
|
|
VIR_DOMAIN_SHUTDOWN,
|
|
|
|
VIR_DOMAIN_SHUTDOWN_UNKNOWN);
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SHUTDOWN,
|
|
|
|
VIR_DOMAIN_EVENT_SHUTDOWN_FINISHED);
|
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Unable to save status on vm %s after state change",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
2012-06-15 16:00:13 +00:00
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SHUTDOWN);
|
|
|
|
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuProcessShutdownOrReboot(driver, vm);
|
|
|
|
|
|
|
|
unlock:
|
2011-09-27 12:56:17 +00:00
|
|
|
virDomainObjUnlock(vm);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
2011-05-04 09:07:01 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2010-10-26 14:04:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-09-15 13:07:51 +00:00
|
|
|
if (priv->gotShutdown) {
|
2011-11-30 14:31:45 +00:00
|
|
|
VIR_DEBUG("Ignoring STOP event after SHUTDOWN");
|
|
|
|
goto unlock;
|
2011-09-15 13:07:51 +00:00
|
|
|
}
|
|
|
|
|
2011-11-30 14:31:45 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state",
|
|
|
|
vm->def->name);
|
2011-09-15 13:07:51 +00:00
|
|
|
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_UNKNOWN);
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2011-03-08 13:42:05 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Unable to save status on vm %s after state change",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-11-30 14:31:45 +00:00
|
|
|
|
|
|
|
unlock:
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleRTCChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
long long offset)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
event = virDomainEventRTCChangeNewFromObj(vm, offset);
|
|
|
|
|
|
|
|
if (vm->def->clock.offset == VIR_DOMAIN_CLOCK_OFFSET_VARIABLE)
|
2012-02-06 13:59:16 +00:00
|
|
|
vm->def->clock.data.variable.adjustment = offset;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_WARN("unable to save domain status with RTC change");
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleWatchdog(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int action)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr watchdogEvent = NULL;
|
|
|
|
virDomainEventPtr lifecycleEvent = NULL;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
watchdogEvent = virDomainEventWatchdogNewFromObj(vm, action);
|
|
|
|
|
|
|
|
if (action == VIR_DOMAIN_EVENT_WATCHDOG_PAUSE &&
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2010-10-26 14:04:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state due to watchdog", vm->def->name);
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_WATCHDOG);
|
2011-02-14 16:09:39 +00:00
|
|
|
lifecycleEvent = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_WATCHDOG);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2011-03-08 13:42:05 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Unable to save status on vm %s after watchdog event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->watchdog->action == VIR_DOMAIN_WATCHDOG_ACTION_DUMP) {
|
|
|
|
struct qemuDomainWatchdogEvent *wdEvent;
|
|
|
|
if (VIR_ALLOC(wdEvent) == 0) {
|
|
|
|
wdEvent->action = VIR_DOMAIN_WATCHDOG_ACTION_DUMP;
|
|
|
|
wdEvent->vm = vm;
|
2011-04-15 03:11:39 +00:00
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted before handling watchdog event is finished.
|
|
|
|
*/
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-08-12 12:04:31 +00:00
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, wdEvent) < 0) {
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!virObjectUnref(vm))
|
2011-04-18 11:41:23 +00:00
|
|
|
vm = NULL;
|
2011-04-15 03:11:39 +00:00
|
|
|
VIR_FREE(wdEvent);
|
|
|
|
}
|
2011-04-18 11:41:23 +00:00
|
|
|
} else {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportOOMError();
|
2011-04-18 11:41:23 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-04-18 11:41:23 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (watchdogEvent || lifecycleEvent) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
if (watchdogEvent)
|
|
|
|
qemuDomainEventQueue(driver, watchdogEvent);
|
|
|
|
if (lifecycleEvent)
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleIOError(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *diskAlias,
|
|
|
|
int action,
|
|
|
|
const char *reason)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr ioErrorEvent = NULL;
|
|
|
|
virDomainEventPtr ioErrorEvent2 = NULL;
|
|
|
|
virDomainEventPtr lifecycleEvent = NULL;
|
|
|
|
const char *srcPath;
|
|
|
|
const char *devAlias;
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias);
|
|
|
|
|
|
|
|
if (disk) {
|
|
|
|
srcPath = disk->src;
|
|
|
|
devAlias = disk->info.alias;
|
|
|
|
} else {
|
|
|
|
srcPath = "";
|
|
|
|
devAlias = "";
|
|
|
|
}
|
|
|
|
|
|
|
|
ioErrorEvent = virDomainEventIOErrorNewFromObj(vm, srcPath, devAlias, action);
|
|
|
|
ioErrorEvent2 = virDomainEventIOErrorReasonNewFromObj(vm, srcPath, devAlias, action, reason);
|
|
|
|
|
|
|
|
if (action == VIR_DOMAIN_EVENT_IO_ERROR_PAUSE &&
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2010-10-26 14:04:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state due to IO error", vm->def->name);
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_IOERROR);
|
2011-02-14 16:09:39 +00:00
|
|
|
lifecycleEvent = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_IOERROR);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
VIR_WARN("Unable to save status on vm %s after IO error", vm->def->name);
|
|
|
|
}
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (ioErrorEvent || ioErrorEvent2 || lifecycleEvent) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
if (ioErrorEvent)
|
|
|
|
qemuDomainEventQueue(driver, ioErrorEvent);
|
|
|
|
if (ioErrorEvent2)
|
|
|
|
qemuDomainEventQueue(driver, ioErrorEvent2);
|
|
|
|
if (lifecycleEvent)
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-22 05:57:42 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *diskAlias,
|
|
|
|
int type,
|
|
|
|
int status)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
const char *path;
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias);
|
|
|
|
|
|
|
|
if (disk) {
|
|
|
|
path = disk->src;
|
|
|
|
event = virDomainEventBlockJobNewFromObj(vm, path, type, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleGraphics(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int phase,
|
|
|
|
int localFamily,
|
|
|
|
const char *localNode,
|
|
|
|
const char *localService,
|
|
|
|
int remoteFamily,
|
|
|
|
const char *remoteNode,
|
|
|
|
const char *remoteService,
|
|
|
|
const char *authScheme,
|
|
|
|
const char *x509dname,
|
|
|
|
const char *saslUsername)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event;
|
|
|
|
virDomainEventGraphicsAddressPtr localAddr = NULL;
|
|
|
|
virDomainEventGraphicsAddressPtr remoteAddr = NULL;
|
|
|
|
virDomainEventGraphicsSubjectPtr subject = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(localAddr) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
localAddr->family = localFamily;
|
|
|
|
if (!(localAddr->service = strdup(localService)) ||
|
|
|
|
!(localAddr->node = strdup(localNode)))
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(remoteAddr) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
remoteAddr->family = remoteFamily;
|
|
|
|
if (!(remoteAddr->service = strdup(remoteService)) ||
|
|
|
|
!(remoteAddr->node = strdup(remoteNode)))
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(subject) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
if (x509dname) {
|
|
|
|
if (VIR_REALLOC_N(subject->identities, subject->nidentity+1) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
subject->nidentity++;
|
2011-03-30 07:46:41 +00:00
|
|
|
if (!(subject->identities[subject->nidentity-1].type = strdup("x509dname")) ||
|
|
|
|
!(subject->identities[subject->nidentity-1].name = strdup(x509dname)))
|
|
|
|
goto no_memory;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
if (saslUsername) {
|
|
|
|
if (VIR_REALLOC_N(subject->identities, subject->nidentity+1) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
subject->nidentity++;
|
2011-03-30 07:46:41 +00:00
|
|
|
if (!(subject->identities[subject->nidentity-1].type = strdup("saslUsername")) ||
|
|
|
|
!(subject->identities[subject->nidentity-1].name = strdup(saslUsername)))
|
|
|
|
goto no_memory;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-03-30 07:46:33 +00:00
|
|
|
virDomainObjLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventGraphicsNewFromObj(vm, phase, localAddr, remoteAddr, authScheme, subject);
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
if (localAddr) {
|
|
|
|
VIR_FREE(localAddr->service);
|
|
|
|
VIR_FREE(localAddr->node);
|
|
|
|
VIR_FREE(localAddr);
|
|
|
|
}
|
|
|
|
if (remoteAddr) {
|
|
|
|
VIR_FREE(remoteAddr->service);
|
|
|
|
VIR_FREE(remoteAddr->node);
|
|
|
|
VIR_FREE(remoteAddr);
|
|
|
|
}
|
|
|
|
if (subject) {
|
|
|
|
for (i = 0 ; i < subject->nidentity ; i++) {
|
|
|
|
VIR_FREE(subject->identities[i].type);
|
|
|
|
VIR_FREE(subject->identities[i].name);
|
|
|
|
}
|
|
|
|
VIR_FREE(subject->identities);
|
|
|
|
VIR_FREE(subject);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void qemuProcessHandleMonitorDestroy(qemuMonitorPtr mon,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2011-03-03 16:54:08 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
if (priv->mon == mon)
|
|
|
|
priv->mon = NULL;
|
2012-07-11 13:35:46 +00:00
|
|
|
if (virObjectUnref(vm))
|
2011-03-03 16:54:08 +00:00
|
|
|
virDomainObjUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2012-03-23 13:44:50 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleTrayChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *devAlias,
|
|
|
|
int reason)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
disk = qemuProcessFindDomainDiskByAlias(vm, devAlias);
|
|
|
|
|
|
|
|
if (disk) {
|
|
|
|
event = virDomainEventTrayChangeNewFromObj(vm,
|
|
|
|
devAlias,
|
|
|
|
reason);
|
2012-03-14 15:26:50 +00:00
|
|
|
/* Update disk tray status */
|
|
|
|
if (reason == VIR_DOMAIN_EVENT_TRAY_CHANGE_OPEN)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
|
|
|
|
else if (reason == VIR_DOMAIN_EVENT_TRAY_CHANGE_CLOSE)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
|
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Unable to save status on vm %s after tray moved event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2012-03-23 13:44:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-23 14:43:14 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandlePMWakeup(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
2012-03-14 15:26:55 +00:00
|
|
|
virDomainEventPtr lifecycleEvent = NULL;
|
2012-03-23 14:43:14 +00:00
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
event = virDomainEventPMWakeupNewFromObj(vm);
|
|
|
|
|
2012-03-14 15:26:55 +00:00
|
|
|
/* Don't set domain status back to running if it wasn't paused
|
|
|
|
* from guest side, otherwise it can just cause confusion.
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PMSUSPENDED) {
|
|
|
|
VIR_DEBUG("Transitioned guest %s from pmsuspended to running "
|
|
|
|
"state due to QMP wakeup event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_WAKEUP);
|
|
|
|
lifecycleEvent = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_WAKEUP);
|
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Unable to save status on vm %s after wakeup event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-23 14:43:14 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
2012-03-14 15:26:55 +00:00
|
|
|
if (event || lifecycleEvent) {
|
2012-03-23 14:43:14 +00:00
|
|
|
qemuDriverLock(driver);
|
2012-03-14 15:26:55 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
if (lifecycleEvent)
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2012-03-23 14:43:14 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-23 13:44:50 +00:00
|
|
|
|
2012-03-23 14:50:36 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandlePMSuspend(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event = NULL;
|
2012-09-06 15:00:43 +00:00
|
|
|
virDomainEventPtr lifecycleEvent = NULL;
|
2012-03-23 14:50:36 +00:00
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
event = virDomainEventPMSuspendNewFromObj(vm);
|
|
|
|
|
2012-03-14 15:26:54 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2012-06-15 16:00:13 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-03-14 15:26:54 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to pmsuspended state due to "
|
|
|
|
"QMP suspend event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_PMSUSPENDED_UNKNOWN);
|
2012-09-06 15:00:43 +00:00
|
|
|
lifecycleEvent =
|
|
|
|
virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY);
|
2012-03-14 15:26:54 +00:00
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Unable to save status on vm %s after suspend event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2012-06-15 16:00:13 +00:00
|
|
|
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND);
|
2012-03-14 15:26:54 +00:00
|
|
|
}
|
|
|
|
|
2012-03-23 14:50:36 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
2012-09-06 15:00:43 +00:00
|
|
|
if (event || lifecycleEvent) {
|
2012-03-23 14:50:36 +00:00
|
|
|
qemuDriverLock(driver);
|
2012-09-06 15:00:43 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
if (lifecycleEvent)
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2012-03-23 14:50:36 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:45:57 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleBalloonChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
unsigned long long actual)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = qemu_driver;
|
|
|
|
virDomainEventPtr event;
|
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
event = virDomainEventBalloonChangeNewFromObj(vm, actual);
|
|
|
|
|
|
|
|
VIR_DEBUG("Updating balloon from %lld to %lld kb",
|
|
|
|
vm->def->mem.cur_balloon, actual);
|
|
|
|
vm->def->mem.cur_balloon = actual;
|
|
|
|
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
VIR_WARN("unable to save domain status with balloon change");
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
|
|
|
|
if (event) {
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static qemuMonitorCallbacks monitorCallbacks = {
|
|
|
|
.destroy = qemuProcessHandleMonitorDestroy,
|
|
|
|
.eofNotify = qemuProcessHandleMonitorEOF,
|
2011-05-29 12:37:29 +00:00
|
|
|
.errorNotify = qemuProcessHandleMonitorError,
|
2011-02-14 16:09:39 +00:00
|
|
|
.diskSecretLookup = qemuProcessFindVolumeQcowPassphrase,
|
|
|
|
.domainShutdown = qemuProcessHandleShutdown,
|
|
|
|
.domainStop = qemuProcessHandleStop,
|
|
|
|
.domainReset = qemuProcessHandleReset,
|
|
|
|
.domainRTCChange = qemuProcessHandleRTCChange,
|
|
|
|
.domainWatchdog = qemuProcessHandleWatchdog,
|
|
|
|
.domainIOError = qemuProcessHandleIOError,
|
|
|
|
.domainGraphics = qemuProcessHandleGraphics,
|
2011-07-22 05:57:42 +00:00
|
|
|
.domainBlockJob = qemuProcessHandleBlockJob,
|
2012-03-23 13:44:50 +00:00
|
|
|
.domainTrayChange = qemuProcessHandleTrayChange,
|
2012-03-23 14:43:14 +00:00
|
|
|
.domainPMWakeup = qemuProcessHandlePMWakeup,
|
2012-03-23 14:50:36 +00:00
|
|
|
.domainPMSuspend = qemuProcessHandlePMSuspend,
|
2012-07-12 15:45:57 +00:00
|
|
|
.domainBalloonChange = qemuProcessHandleBalloonChange,
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuConnectMonitor(struct qemud_driver *driver, virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret = -1;
|
2011-08-16 10:51:36 +00:00
|
|
|
qemuMonitorPtr mon = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-08-26 07:05:57 +00:00
|
|
|
if (virSecurityManagerSetDaemonSocketLabel(driver->securityManager,
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
vm->def) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_ERROR(_("Failed to set security context for monitor for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted while the monitor is active */
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
ignore_value(virTimeMillisNow(&priv->monStart));
|
2011-08-16 10:51:36 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
mon = qemuMonitorOpen(vm,
|
|
|
|
priv->monConfig,
|
|
|
|
priv->monJSON,
|
|
|
|
&monitorCallbacks);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
priv->monStart = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
if (mon == NULL)
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuMonitorClose(mon);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
priv->mon = mon;
|
|
|
|
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_ERROR(_("Failed to clear security context for monitor for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->mon == NULL) {
|
|
|
|
VIR_INFO("Failed to connect monitor for %s", vm->def->name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2012-08-20 16:44:14 +00:00
|
|
|
ret = qemuMonitorSetCapabilities(priv->mon, priv->caps);
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef int qemuProcessLogHandleOutput(virDomainObjPtr vm,
|
|
|
|
const char *output,
|
|
|
|
int fd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns -1 for error, 0 on success
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessReadLogOutput(virDomainObjPtr vm,
|
|
|
|
int fd,
|
|
|
|
char *buf,
|
|
|
|
size_t buflen,
|
|
|
|
qemuProcessLogHandleOutput func,
|
|
|
|
const char *what,
|
|
|
|
int timeout)
|
|
|
|
{
|
|
|
|
int retries = (timeout*10);
|
|
|
|
int got = 0;
|
2011-03-30 11:57:27 +00:00
|
|
|
char *debug = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
char *filter_next = buf;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
buf[0] = '\0';
|
|
|
|
|
2011-03-30 11:57:27 +00:00
|
|
|
/* This relies on log message format generated by virLogFormatString() and
|
|
|
|
* might need to be modified when message format changes. */
|
|
|
|
if (virAsprintf(&debug, ": %d: debug : ", vm->pid) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
while (retries) {
|
2011-03-30 11:57:27 +00:00
|
|
|
ssize_t func_ret, bytes;
|
2011-02-14 16:09:39 +00:00
|
|
|
int isdead = 0;
|
2011-03-30 11:57:27 +00:00
|
|
|
char *eol;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
func_ret = func(vm, buf, fd);
|
|
|
|
|
|
|
|
if (kill(vm->pid, 0) == -1 && errno == ESRCH)
|
|
|
|
isdead = 1;
|
|
|
|
|
|
|
|
/* Any failures should be detected before we read the log, so we
|
|
|
|
* always have something useful to report on failure. */
|
2011-03-30 11:57:27 +00:00
|
|
|
bytes = saferead(fd, buf+got, buflen-got-1);
|
|
|
|
if (bytes < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Failure while reading %s log output"),
|
|
|
|
what);
|
2011-03-30 11:57:27 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-03-30 11:57:27 +00:00
|
|
|
got += bytes;
|
2011-02-14 16:09:39 +00:00
|
|
|
buf[got] = '\0';
|
2011-03-30 11:57:27 +00:00
|
|
|
|
|
|
|
/* Filter out debug messages from intermediate libvirt process */
|
|
|
|
while ((eol = strchr(filter_next, '\n'))) {
|
|
|
|
*eol = '\0';
|
|
|
|
if (strstr(filter_next, debug)) {
|
|
|
|
memmove(filter_next, eol + 1, got - (eol - buf));
|
|
|
|
got -= eol + 1 - filter_next;
|
|
|
|
} else {
|
|
|
|
filter_next = eol + 1;
|
|
|
|
*eol = '\n';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (got == buflen-1) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Out of space while reading %s log output: %s"),
|
|
|
|
what, buf);
|
2011-03-30 11:57:27 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (isdead) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Process exited while reading %s log output: %s"),
|
|
|
|
what, buf);
|
2011-03-30 11:57:27 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-03-30 11:57:27 +00:00
|
|
|
if (func_ret <= 0) {
|
|
|
|
ret = func_ret;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
usleep(100*1000);
|
|
|
|
retries--;
|
|
|
|
}
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Timed out while reading %s log output: %s"),
|
|
|
|
what, buf);
|
2011-03-30 11:57:27 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(debug);
|
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look at a chunk of data from the QEMU stdout logs and try to
|
|
|
|
* find a TTY device, as indicated by a line like
|
|
|
|
*
|
|
|
|
* char device redirected to /dev/pts/3
|
|
|
|
*
|
|
|
|
* Returns -1 for error, 0 success, 1 continue reading
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessExtractTTYPath(const char *haystack,
|
|
|
|
size_t *offset,
|
|
|
|
char **path)
|
|
|
|
{
|
|
|
|
static const char needle[] = "char device redirected to";
|
|
|
|
char *tmp, *dev;
|
|
|
|
|
|
|
|
VIR_FREE(*path);
|
|
|
|
/* First look for our magic string */
|
|
|
|
if (!(tmp = strstr(haystack + *offset, needle))) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
tmp += sizeof(needle);
|
|
|
|
dev = tmp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And look for first whitespace character and nul terminate
|
|
|
|
* to mark end of the pty path
|
|
|
|
*/
|
|
|
|
while (*tmp) {
|
|
|
|
if (c_isspace(*tmp)) {
|
|
|
|
*path = strndup(dev, tmp-dev);
|
|
|
|
if (*path == NULL) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ... now further update offset till we get EOL */
|
|
|
|
*offset = tmp - haystack;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
tmp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We found a path, but didn't find any whitespace,
|
|
|
|
* so it must be still incomplete - we should at
|
|
|
|
* least see a \n - indicate that we want to carry
|
|
|
|
* on trying again
|
|
|
|
*/
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2011-03-30 09:07:59 +00:00
|
|
|
static int
|
|
|
|
qemuProcessLookupPTYs(virDomainChrDefPtr *devices,
|
|
|
|
int count,
|
2011-06-08 16:25:11 +00:00
|
|
|
virHashTablePtr paths,
|
|
|
|
bool chardevfmt)
|
2011-03-30 09:07:59 +00:00
|
|
|
{
|
|
|
|
int i;
|
2011-06-08 16:25:11 +00:00
|
|
|
const char *prefix = chardevfmt ? "char" : "";
|
2011-03-30 09:07:59 +00:00
|
|
|
|
|
|
|
for (i = 0 ; i < count ; i++) {
|
|
|
|
virDomainChrDefPtr chr = devices[i];
|
|
|
|
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
|
2011-06-08 16:25:11 +00:00
|
|
|
char id[32];
|
2011-03-30 09:07:59 +00:00
|
|
|
const char *path;
|
|
|
|
|
2011-06-08 16:25:11 +00:00
|
|
|
if (snprintf(id, sizeof(id), "%s%s",
|
|
|
|
prefix, chr->info.alias) >= sizeof(id))
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
path = (const char *) virHashLookup(paths, id);
|
|
|
|
if (path == NULL) {
|
|
|
|
if (chr->source.data.file.path == NULL) {
|
|
|
|
/* neither the log output nor 'info chardev' had a
|
|
|
|
* pty path for this chardev, report an error
|
|
|
|
*/
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no assigned pty for device %s"), id);
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
/* 'info chardev' had no pty path for this chardev,
|
|
|
|
* but the log output had, so we're fine
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(chr->source.data.file.path);
|
|
|
|
chr->source.data.file.path = strdup(path);
|
|
|
|
|
|
|
|
if (chr->source.data.file.path == NULL) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessFindCharDevicePTYsMonitor(virDomainObjPtr vm,
|
2012-08-20 16:44:14 +00:00
|
|
|
qemuCapsPtr caps,
|
2011-02-14 16:09:39 +00:00
|
|
|
virHashTablePtr paths)
|
|
|
|
{
|
2012-08-20 16:44:14 +00:00
|
|
|
bool chardevfmt = qemuCapsGet(caps, QEMU_CAPS_CHARDEV);
|
2011-06-08 16:25:11 +00:00
|
|
|
|
2011-03-30 09:07:59 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->serials, vm->def->nserials,
|
2011-06-08 16:25:11 +00:00
|
|
|
paths, chardevfmt) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuProcessLookupPTYs(vm->def->parallels, vm->def->nparallels,
|
2011-06-08 16:25:11 +00:00
|
|
|
paths, chardevfmt) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-03-30 09:07:59 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->channels, vm->def->nchannels,
|
2011-06-08 16:25:11 +00:00
|
|
|
paths, chardevfmt) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
|
Allow multiple consoles per virtual guest
While Xen only has a single paravirt console, UML, and
QEMU both support multiple paravirt consoles. The LXC
driver can also be trivially made to support multiple
consoles. This patch extends the XML to allow multiple
<console> elements in the XML. It also makes the UML
and QEMU drivers support this config.
* src/conf/domain_conf.c, src/conf/domain_conf.h: Allow
multiple <console> devices
* src/lxc/lxc_driver.c, src/xen/xen_driver.c,
src/xenxs/xen_sxpr.c, src/xenxs/xen_xm.c: Update for
internal API changes
* src/security/security_selinux.c, src/security/virt-aa-helper.c:
Only label consoles that aren't a copy of the serial device
* src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_process.c, src/uml/uml_conf.c,
src/uml/uml_driver.c: Support multiple console devices
* tests/qemuxml2xmltest.c, tests/qemuxml2argvtest.c: Extra
tests for multiple virtio consoles. Set QEMU_CAPS_CHARDEV
for all console /channel tests
* tests/qemuxml2argvdata/qemuxml2argv-channel-virtio-auto.args,
tests/qemuxml2argvdata/qemuxml2argv-channel-virtio.args
tests/qemuxml2argvdata/qemuxml2argv-console-virtio.args: Update
for correct chardev syntax
* tests/qemuxml2argvdata/qemuxml2argv-console-virtio-many.args,
tests/qemuxml2argvdata/qemuxml2argv-console-virtio-many.xml: New
test file
2011-02-23 18:27:23 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->consoles, vm->def->nconsoles,
|
|
|
|
paths, chardevfmt) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessFindCharDevicePTYs(virDomainObjPtr vm,
|
|
|
|
const char *output,
|
|
|
|
int fd ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
size_t offset = 0;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
/* The order in which QEMU prints out the PTY paths is
|
|
|
|
the order in which it procsses its serial and parallel
|
|
|
|
device args. This code must match that ordering.... */
|
|
|
|
|
|
|
|
/* first comes the serial devices */
|
|
|
|
for (i = 0 ; i < vm->def->nserials ; i++) {
|
|
|
|
virDomainChrDefPtr chr = vm->def->serials[i];
|
|
|
|
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
|
|
|
|
if ((ret = qemuProcessExtractTTYPath(output, &offset,
|
|
|
|
&chr->source.data.file.path)) != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* then the parallel devices */
|
|
|
|
for (i = 0 ; i < vm->def->nparallels ; i++) {
|
|
|
|
virDomainChrDefPtr chr = vm->def->parallels[i];
|
|
|
|
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
|
|
|
|
if ((ret = qemuProcessExtractTTYPath(output, &offset,
|
|
|
|
&chr->source.data.file.path)) != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* then the channel devices */
|
|
|
|
for (i = 0 ; i < vm->def->nchannels ; i++) {
|
|
|
|
virDomainChrDefPtr chr = vm->def->channels[i];
|
|
|
|
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
|
|
|
|
if ((ret = qemuProcessExtractTTYPath(output, &offset,
|
|
|
|
&chr->source.data.file.path)) != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Allow multiple consoles per virtual guest
While Xen only has a single paravirt console, UML, and
QEMU both support multiple paravirt consoles. The LXC
driver can also be trivially made to support multiple
consoles. This patch extends the XML to allow multiple
<console> elements in the XML. It also makes the UML
and QEMU drivers support this config.
* src/conf/domain_conf.c, src/conf/domain_conf.h: Allow
multiple <console> devices
* src/lxc/lxc_driver.c, src/xen/xen_driver.c,
src/xenxs/xen_sxpr.c, src/xenxs/xen_xm.c: Update for
internal API changes
* src/security/security_selinux.c, src/security/virt-aa-helper.c:
Only label consoles that aren't a copy of the serial device
* src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_process.c, src/uml/uml_conf.c,
src/uml/uml_driver.c: Support multiple console devices
* tests/qemuxml2xmltest.c, tests/qemuxml2argvtest.c: Extra
tests for multiple virtio consoles. Set QEMU_CAPS_CHARDEV
for all console /channel tests
* tests/qemuxml2argvdata/qemuxml2argv-channel-virtio-auto.args,
tests/qemuxml2argvdata/qemuxml2argv-channel-virtio.args
tests/qemuxml2argvdata/qemuxml2argv-console-virtio.args: Update
for correct chardev syntax
* tests/qemuxml2argvdata/qemuxml2argv-console-virtio-many.args,
tests/qemuxml2argvdata/qemuxml2argv-console-virtio-many.xml: New
test file
2011-02-23 18:27:23 +00:00
|
|
|
for (i = 0 ; i < vm->def->nconsoles ; i++) {
|
|
|
|
virDomainChrDefPtr chr = vm->def->consoles[i];
|
2011-11-15 08:01:31 +00:00
|
|
|
/* For historical reasons, console[0] can be just an alias
|
|
|
|
* for serial[0]; That's why we need to update it as well */
|
|
|
|
if (i == 0 && vm->def->nserials &&
|
|
|
|
chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE &&
|
|
|
|
chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_SERIAL) {
|
|
|
|
if ((ret = virDomainChrSourceDefCopy(&chr->source,
|
|
|
|
&((vm->def->serials[0])->source))) != 0)
|
2011-06-08 16:27:18 +00:00
|
|
|
return ret;
|
2011-11-15 08:01:31 +00:00
|
|
|
} else {
|
|
|
|
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY &&
|
|
|
|
chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_VIRTIO) {
|
|
|
|
if ((ret = qemuProcessExtractTTYPath(output, &offset,
|
|
|
|
&chr->source.data.file.path)) != 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2011-06-08 16:27:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Allow hash tables to use generic pointers as keys
Relax the restriction that the hash table key must be a string
by allowing an arbitrary hash code generator + comparison func
to be provided
* util/hash.c, util/hash.h: Allow any pointer as a key
* internal.h: Include stdbool.h as standard.
* conf/domain_conf.c, conf/domain_conf.c,
conf/nwfilter_params.c, nwfilter/nwfilter_gentech_driver.c,
nwfilter/nwfilter_gentech_driver.h, nwfilter/nwfilter_learnipaddr.c,
qemu/qemu_command.c, qemu/qemu_driver.c,
qemu/qemu_process.c, uml/uml_driver.c,
xen/xm_internal.c: s/char */void */ in hash callbacks
2011-02-22 15:11:59 +00:00
|
|
|
static void qemuProcessFreePtyPath(void *payload, const void *name ATTRIBUTE_UNUSED)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
VIR_FREE(payload);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuProcessReadLogFD(int logfd, char *buf, int maxlen, int off)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
char *tmpbuf = buf + off;
|
|
|
|
|
|
|
|
ret = saferead(logfd, tmpbuf, maxlen - off - 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpbuf[ret] = '\0';
|
|
|
|
}
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessWaitForMonitor(struct qemud_driver* driver,
|
2011-06-08 16:25:11 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-08-20 16:44:14 +00:00
|
|
|
qemuCapsPtr caps,
|
2011-06-08 16:25:11 +00:00
|
|
|
off_t pos)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2011-05-05 16:32:21 +00:00
|
|
|
char *buf = NULL;
|
2011-04-03 09:21:19 +00:00
|
|
|
size_t buf_size = 4096; /* Plenty of space to get startup greeting */
|
2011-05-05 16:32:21 +00:00
|
|
|
int logfd = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
int ret = -1;
|
|
|
|
virHashTablePtr paths = NULL;
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
if (pos != -1) {
|
|
|
|
if ((logfd = qemuDomainOpenLog(driver, vm, pos)) < 0)
|
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
if (VIR_ALLOC_N(buf, buf_size) < 0) {
|
|
|
|
virReportOOMError();
|
2011-11-30 06:49:51 +00:00
|
|
|
goto closelog;
|
2011-05-05 16:32:21 +00:00
|
|
|
}
|
2011-04-03 09:21:19 +00:00
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
if (qemuProcessReadLogOutput(vm, logfd, buf, buf_size,
|
|
|
|
qemuProcessFindCharDevicePTYs,
|
|
|
|
"console", 30) < 0)
|
|
|
|
goto closelog;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Connect monitor to %p '%s'", vm, vm->def->name);
|
|
|
|
if (qemuConnectMonitor(driver, vm) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to get the pty path mappings again via the monitor. This is much more
|
|
|
|
* reliable if it's available.
|
|
|
|
* Note that the monitor itself can be on a pty, so we still need to try the
|
|
|
|
* log output method. */
|
2011-02-18 21:30:24 +00:00
|
|
|
paths = virHashCreate(0, qemuProcessFreePtyPath);
|
2011-02-17 21:14:58 +00:00
|
|
|
if (paths == NULL)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
priv = vm->privateData;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = qemuMonitorGetPtyPaths(priv->mon, paths);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("qemuMonitorGetPtyPaths returned %i", ret);
|
|
|
|
if (ret == 0)
|
2012-08-20 16:44:14 +00:00
|
|
|
ret = qemuProcessFindCharDevicePTYsMonitor(vm, caps, paths);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-02-18 21:30:24 +00:00
|
|
|
virHashFree(paths);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-08-04 20:07:58 +00:00
|
|
|
if (pos != -1 && kill(vm->pid, 0) == -1 && errno == ESRCH) {
|
2011-02-14 16:09:39 +00:00
|
|
|
/* VM is dead, any other error raised in the interim is probably
|
|
|
|
* not as important as the qemu cmdline output */
|
2011-04-03 09:21:19 +00:00
|
|
|
qemuProcessReadLogFD(logfd, buf, buf_size, strlen(buf));
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("process exited while connecting to monitor: %s"),
|
|
|
|
buf);
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
2011-06-23 10:15:25 +00:00
|
|
|
closelog:
|
2011-02-14 16:09:39 +00:00
|
|
|
if (VIR_CLOSE(logfd) < 0) {
|
2011-04-03 09:21:19 +00:00
|
|
|
char ebuf[1024];
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_WARN("Unable to close logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
VIR_FREE(buf);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessDetectVcpuPIDs(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
pid_t *cpupids = NULL;
|
|
|
|
int ncpupids;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2012-05-07 11:58:22 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
/* failure to get the VCPU<-> PID mapping or to execute the query
|
|
|
|
* command will not be treated fatal as some versions of qemu don't
|
|
|
|
* support this command */
|
|
|
|
if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) <= 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
virResetLastError();
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
priv->nvcpupids = 1;
|
|
|
|
if (VIR_ALLOC_N(priv->vcpupids, priv->nvcpupids) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
priv->vcpupids[0] = vm->pid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
if (ncpupids != vm->def->vcpus) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("got wrong number of vCPU pids from QEMU monitor. "
|
|
|
|
"got %d, wanted %d"),
|
|
|
|
ncpupids, vm->def->vcpus);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(cpupids);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->nvcpupids = ncpupids;
|
|
|
|
priv->vcpupids = cpupids;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-20 07:16:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set NUMA memory policy for qemu process, to be run between
|
|
|
|
* fork/exec of QEMU only.
|
|
|
|
*/
|
|
|
|
#if HAVE_NUMACTL
|
|
|
|
static int
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
qemuProcessInitNumaMemoryPolicy(virDomainObjPtr vm,
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapPtr nodemask)
|
2011-06-20 07:16:16 +00:00
|
|
|
{
|
2011-06-24 03:53:08 +00:00
|
|
|
nodemask_t mask;
|
2011-06-20 07:16:16 +00:00
|
|
|
int mode = -1;
|
|
|
|
int node = -1;
|
|
|
|
int ret = -1;
|
|
|
|
int i = 0;
|
|
|
|
int maxnode = 0;
|
2011-06-24 03:53:08 +00:00
|
|
|
bool warned = false;
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
virDomainNumatuneDef numatune = vm->def->numatune;
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapPtr tmp_nodemask = NULL;
|
2011-06-20 07:16:16 +00:00
|
|
|
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
if (numatune.memory.placement_mode ==
|
|
|
|
VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_STATIC) {
|
|
|
|
if (!numatune.memory.nodemask)
|
|
|
|
return 0;
|
|
|
|
VIR_DEBUG("Set NUMA memory policy with specified nodeset");
|
|
|
|
tmp_nodemask = numatune.memory.nodemask;
|
|
|
|
} else if (numatune.memory.placement_mode ==
|
|
|
|
VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_AUTO) {
|
|
|
|
VIR_DEBUG("Set NUMA memory policy with advisory nodeset from numad");
|
|
|
|
tmp_nodemask = nodemask;
|
|
|
|
} else {
|
2011-06-21 16:37:10 +00:00
|
|
|
return 0;
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
}
|
2011-06-20 07:16:16 +00:00
|
|
|
|
|
|
|
if (numa_available() < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Host kernel is not aware of NUMA."));
|
2011-06-20 07:16:16 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-06-24 03:53:08 +00:00
|
|
|
maxnode = numa_max_node() + 1;
|
2011-06-20 07:16:16 +00:00
|
|
|
/* Convert nodemask to NUMA bitmask. */
|
2011-06-24 03:53:08 +00:00
|
|
|
nodemask_zero(&mask);
|
2012-09-14 07:47:00 +00:00
|
|
|
i = -1;
|
|
|
|
while ((i = virBitmapNextSetBit(tmp_nodemask, i)) >= 0) {
|
|
|
|
if (i > NUMA_NUM_NODES) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Host cannot support NUMA node %d"), i);
|
|
|
|
return -1;
|
2011-06-20 07:16:16 +00:00
|
|
|
}
|
2012-09-14 07:47:00 +00:00
|
|
|
if (i > maxnode && !warned) {
|
|
|
|
VIR_WARN("nodeset is out of range, there is only %d NUMA "
|
|
|
|
"nodes on host", maxnode);
|
|
|
|
warned = true;
|
|
|
|
}
|
|
|
|
nodemask_set(&mask, i);
|
2011-06-20 07:16:16 +00:00
|
|
|
}
|
|
|
|
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
mode = numatune.memory.mode;
|
2011-06-20 07:16:16 +00:00
|
|
|
|
|
|
|
if (mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT) {
|
|
|
|
numa_set_bind_policy(1);
|
2011-06-24 03:53:08 +00:00
|
|
|
numa_set_membind(&mask);
|
2011-06-20 07:16:16 +00:00
|
|
|
numa_set_bind_policy(0);
|
|
|
|
} else if (mode == VIR_DOMAIN_NUMATUNE_MEM_PREFERRED) {
|
|
|
|
int nnodes = 0;
|
2011-06-24 03:53:08 +00:00
|
|
|
for (i = 0; i < NUMA_NUM_NODES; i++) {
|
|
|
|
if (nodemask_isset(&mask, i)) {
|
2011-06-20 07:16:16 +00:00
|
|
|
node = i;
|
|
|
|
nnodes++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nnodes != 1) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("NUMA memory tuning in 'preferred' mode "
|
|
|
|
"only supports single node"));
|
2011-06-20 07:16:16 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
numa_set_bind_policy(0);
|
|
|
|
numa_set_preferred(node);
|
|
|
|
} else if (mode == VIR_DOMAIN_NUMATUNE_MEM_INTERLEAVE) {
|
2011-06-24 03:53:08 +00:00
|
|
|
numa_set_interleave_mask(&mask);
|
2011-06-20 07:16:16 +00:00
|
|
|
} else {
|
|
|
|
/* XXX: Shouldn't go here, as we already do checking when
|
|
|
|
* parsing domain XML.
|
|
|
|
*/
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
"%s", _("Invalid mode for memory NUMA tuning."));
|
2011-06-20 07:16:16 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int
|
2012-05-09 23:55:31 +00:00
|
|
|
qemuProcessInitNumaMemoryPolicy(virDomainObjPtr vm,
|
2012-09-18 09:13:20 +00:00
|
|
|
virBitmapPtr nodemask ATTRIBUTE_UNUSED)
|
2011-06-20 07:16:16 +00:00
|
|
|
{
|
|
|
|
if (vm->def->numatune.memory.nodemask) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("libvirt is compiled without NUMA tuning support"));
|
2011-06-20 07:16:16 +00:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-24 01:35:20 +00:00
|
|
|
#if HAVE_NUMAD
|
2012-03-08 13:36:26 +00:00
|
|
|
static char *
|
|
|
|
qemuGetNumadAdvice(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
virCommandPtr cmd = NULL;
|
|
|
|
char *output = NULL;
|
|
|
|
|
2012-04-11 14:40:34 +00:00
|
|
|
cmd = virCommandNewArgList(NUMAD, "-w", NULL);
|
|
|
|
virCommandAddArgFormat(cmd, "%d:%llu", def->vcpus,
|
2012-05-08 16:04:37 +00:00
|
|
|
VIR_DIV_UP(def->mem.cur_balloon, 1024));
|
2012-03-08 13:36:26 +00:00
|
|
|
|
|
|
|
virCommandSetOutputBuffer(cmd, &output);
|
|
|
|
|
|
|
|
if (virCommandRun(cmd, NULL) < 0)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Failed to query numad for the "
|
|
|
|
"advisory nodeset"));
|
2012-03-08 13:36:26 +00:00
|
|
|
|
|
|
|
virCommandFree(cmd);
|
|
|
|
return output;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static char *
|
|
|
|
qemuGetNumadAdvice(virDomainDefPtr def ATTRIBUTE_UNUSED)
|
|
|
|
{
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("numad is not available on this host"));
|
2012-03-08 13:36:26 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/*
|
|
|
|
* To be run between fork/exec of QEMU only
|
|
|
|
*/
|
|
|
|
static int
|
2012-03-08 13:36:26 +00:00
|
|
|
qemuProcessInitCpuAffinity(struct qemud_driver *driver,
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapPtr nodemask)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2012-04-13 08:53:17 +00:00
|
|
|
int ret = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
int i, hostcpus, maxcpu = QEMUD_CPUMASK_LEN;
|
|
|
|
virNodeInfo nodeinfo;
|
2012-09-14 07:47:01 +00:00
|
|
|
virBitmapPtr cpumap, cpumapToSet;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting CPU affinity");
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (nodeGetInfo(NULL, &nodeinfo) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* setaffinity fails if you set bits for CPUs which
|
|
|
|
* aren't present, so we have to limit ourselves */
|
|
|
|
hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
|
|
|
|
if (maxcpu > hostcpus)
|
|
|
|
maxcpu = hostcpus;
|
|
|
|
|
2012-09-14 07:46:59 +00:00
|
|
|
cpumap = virBitmapNew(maxcpu);
|
|
|
|
if (!cpumap) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-09-14 07:47:01 +00:00
|
|
|
cpumapToSet = cpumap;
|
|
|
|
|
2012-03-08 13:36:26 +00:00
|
|
|
if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
VIR_DEBUG("Set CPU affinity with advisory nodeset from numad");
|
2012-04-16 10:04:27 +00:00
|
|
|
/* numad returns the NUMA node list, convert it to cpumap */
|
|
|
|
for (i = 0; i < driver->caps->host.nnumaCell; i++) {
|
|
|
|
int j;
|
|
|
|
int cur_ncpus = driver->caps->host.numaCell[i]->ncpus;
|
2012-09-14 07:47:00 +00:00
|
|
|
bool result;
|
|
|
|
if (virBitmapGetBit(nodemask, i, &result) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
if (result) {
|
2012-05-12 12:52:45 +00:00
|
|
|
for (j = 0; j < cur_ncpus; j++)
|
2012-09-14 07:46:59 +00:00
|
|
|
ignore_value(virBitmapSetBit(cpumap,
|
|
|
|
driver->caps->host.numaCell[i]->cpus[j]));
|
2012-04-16 10:04:27 +00:00
|
|
|
}
|
2012-03-08 13:36:26 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
} else {
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
VIR_DEBUG("Set CPU affinity with specified cpuset");
|
2012-03-08 13:36:26 +00:00
|
|
|
if (vm->def->cpumask) {
|
2012-09-14 07:47:01 +00:00
|
|
|
cpumapToSet = vm->def->cpumask;
|
2012-03-08 13:36:26 +00:00
|
|
|
} else {
|
|
|
|
/* You may think this is redundant, but we can't assume libvirtd
|
|
|
|
* itself is running on all pCPUs, so we need to explicitly set
|
|
|
|
* the spawned QEMU instance to all pCPUs if no map is given in
|
|
|
|
* its config file */
|
2012-09-14 07:46:59 +00:00
|
|
|
virBitmapSetAll(cpumap);
|
2012-03-08 13:36:26 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We are pressuming we are running between fork/exec of QEMU
|
|
|
|
* so use '0' to indicate our own process ID. No threads are
|
|
|
|
* running at this point
|
|
|
|
*/
|
2012-09-14 07:47:01 +00:00
|
|
|
if (virProcessInfoSetAffinity(0 /* Self */, cpumapToSet) < 0)
|
2012-04-13 08:53:17 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-04-13 08:53:17 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2012-09-14 07:46:59 +00:00
|
|
|
virBitmapFree(cpumap);
|
2012-04-13 08:53:17 +00:00
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
/* set link states to down on interfaces at qemu start */
|
|
|
|
static int
|
|
|
|
qemuProcessSetLinkStates(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
int i;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
if (def->nets[i]->linkstate == VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN) {
|
|
|
|
VIR_DEBUG("Setting link state: %s", def->nets[i]->info.alias);
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
if (!qemuCapsGet(priv->caps, QEMU_CAPS_NETDEV)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_NO_SUPPORT, "%s",
|
|
|
|
_("Setting of link state is not supported by this qemu"));
|
2011-09-06 08:23:47 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qemuMonitorSetLink(priv->mon,
|
|
|
|
def->nets[i]->info.alias,
|
|
|
|
VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN);
|
|
|
|
if (ret != 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
2011-09-06 08:23:47 +00:00
|
|
|
_("Couldn't set link state on interface: %s"), def->nets[i]->info.alias);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set CPU affinities for vcpus if vcpupin xml provided. */
|
2011-03-29 13:41:25 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetVcpuAffinites(virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
virNodeInfo nodeinfo;
|
2012-09-14 07:46:59 +00:00
|
|
|
int vcpu, n;
|
2011-06-02 22:47:53 +00:00
|
|
|
int ret = -1;
|
2011-03-29 13:41:25 +00:00
|
|
|
|
|
|
|
if (virNodeGetInfo(conn, &nodeinfo) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!def->cputune.nvcpupin)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (priv->vcpupids == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cpu affinity is not supported"));
|
2011-03-29 13:41:25 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-06-14 03:13:11 +00:00
|
|
|
for (n = 0; n < def->cputune.nvcpupin; n++) {
|
|
|
|
vcpu = def->cputune.vcpupin[n]->vcpuid;
|
2011-03-29 13:41:25 +00:00
|
|
|
|
2012-09-14 07:46:59 +00:00
|
|
|
if (virProcessInfoSetAffinity(priv->vcpupids[vcpu],
|
|
|
|
def->cputune.vcpupin[n]->cpumask) < 0) {
|
2011-06-02 22:47:53 +00:00
|
|
|
goto cleanup;
|
2011-03-29 13:41:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-02 22:47:53 +00:00
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
2011-03-29 13:41:25 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-08-21 09:18:34 +00:00
|
|
|
/* Set CPU affinities for emulator threads if emulatorpin xml provided. */
|
|
|
|
static int
|
|
|
|
qemuProcessSetEmulatorAffinites(virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
virNodeInfo nodeinfo;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (virNodeGetInfo(conn, &nodeinfo) != 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!def->cputune.emulatorpin)
|
|
|
|
return 0;
|
|
|
|
|
2012-09-14 07:46:59 +00:00
|
|
|
if (virProcessInfoSetAffinity(vm->pid,
|
|
|
|
def->cputune.emulatorpin->cpumask) < 0) {
|
2012-08-21 09:18:34 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessInitPasswords(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
2011-05-04 11:55:38 +00:00
|
|
|
virDomainObjPtr vm)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (vm->def->ngraphics == 1) {
|
|
|
|
if (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
|
|
|
ret = qemuDomainChangeGraphicsPasswords(driver, vm,
|
|
|
|
VIR_DOMAIN_GRAPHICS_TYPE_VNC,
|
|
|
|
&vm->def->graphics[0]->data.vnc.auth,
|
|
|
|
driver->vncPassword);
|
|
|
|
} else if (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
ret = qemuDomainChangeGraphicsPasswords(driver, vm,
|
|
|
|
VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
|
|
|
|
&vm->def->graphics[0]->data.spice.auth,
|
|
|
|
driver->spicePassword);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, QEMU_CAPS_DEVICE)) {
|
2011-02-14 16:09:39 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < vm->def->ndisks ; i++) {
|
|
|
|
char *secret;
|
|
|
|
size_t secretLen;
|
2011-03-15 02:20:53 +00:00
|
|
|
const char *alias;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (!vm->def->disks[i]->encryption ||
|
|
|
|
!vm->def->disks[i]->src)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessGetVolumeQcowPassphrase(conn,
|
|
|
|
vm->def->disks[i],
|
|
|
|
&secret, &secretLen) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-15 02:20:53 +00:00
|
|
|
alias = vm->def->disks[i]->info.alias;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-03-15 02:20:53 +00:00
|
|
|
ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(secret);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define QEMU_PCI_VENDOR_INTEL 0x8086
|
|
|
|
#define QEMU_PCI_VENDOR_LSI_LOGIC 0x1000
|
|
|
|
#define QEMU_PCI_VENDOR_REDHAT 0x1af4
|
|
|
|
#define QEMU_PCI_VENDOR_CIRRUS 0x1013
|
|
|
|
#define QEMU_PCI_VENDOR_REALTEK 0x10ec
|
|
|
|
#define QEMU_PCI_VENDOR_AMD 0x1022
|
|
|
|
#define QEMU_PCI_VENDOR_ENSONIQ 0x1274
|
|
|
|
#define QEMU_PCI_VENDOR_VMWARE 0x15ad
|
|
|
|
#define QEMU_PCI_VENDOR_QEMU 0x1234
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_DISK_VIRTIO 0x1001
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_BALLOON_VIRTIO 0x1002
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_NIC_NE2K 0x8029
|
|
|
|
#define QEMU_PCI_PRODUCT_NIC_PCNET 0x2000
|
|
|
|
#define QEMU_PCI_PRODUCT_NIC_RTL8139 0x8139
|
|
|
|
#define QEMU_PCI_PRODUCT_NIC_E1000 0x100E
|
|
|
|
#define QEMU_PCI_PRODUCT_NIC_VIRTIO 0x1000
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_VGA_CIRRUS 0x00b8
|
|
|
|
#define QEMU_PCI_PRODUCT_VGA_VMWARE 0x0405
|
|
|
|
#define QEMU_PCI_PRODUCT_VGA_STDVGA 0x1111
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_AUDIO_AC97 0x2415
|
|
|
|
#define QEMU_PCI_PRODUCT_AUDIO_ES1370 0x5000
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_CONTROLLER_PIIX 0x7010
|
|
|
|
#define QEMU_PCI_PRODUCT_CONTROLLER_LSI 0x0012
|
|
|
|
|
|
|
|
#define QEMU_PCI_PRODUCT_WATCHDOG_I63000ESB 0x25ab
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessAssignNextPCIAddress(virDomainDeviceInfo *info,
|
|
|
|
int vendor,
|
|
|
|
int product,
|
|
|
|
qemuMonitorPCIAddress *addrs,
|
|
|
|
int naddrs)
|
|
|
|
{
|
|
|
|
int found = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
VIR_DEBUG("Look for %x:%x out of %d", vendor, product, naddrs);
|
|
|
|
|
|
|
|
for (i = 0 ; (i < naddrs) && !found; i++) {
|
|
|
|
VIR_DEBUG("Maybe %x:%x", addrs[i].vendor, addrs[i].product);
|
|
|
|
if (addrs[i].vendor == vendor &&
|
|
|
|
addrs[i].product == product) {
|
|
|
|
VIR_DEBUG("Match %d", i);
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Blank it out so this device isn't matched again */
|
|
|
|
addrs[i].vendor = 0;
|
|
|
|
addrs[i].product = 0;
|
|
|
|
|
|
|
|
if (info->type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE)
|
|
|
|
info->type = VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI;
|
|
|
|
|
|
|
|
if (info->type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI) {
|
|
|
|
info->addr.pci.domain = addrs[i].addr.domain;
|
|
|
|
info->addr.pci.bus = addrs[i].addr.bus;
|
|
|
|
info->addr.pci.slot = addrs[i].addr.slot;
|
|
|
|
info->addr.pci.function = addrs[i].addr.function;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCIDiskVendorProduct(virDomainDiskDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
switch (def->bus) {
|
|
|
|
case VIR_DOMAIN_DISK_BUS_VIRTIO:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_REDHAT;
|
|
|
|
*product = QEMU_PCI_PRODUCT_DISK_VIRTIO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCINetVendorProduct(virDomainNetDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
if (!def->model)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (STREQ(def->model, "ne2k_pci")) {
|
|
|
|
*vendor = QEMU_PCI_VENDOR_REALTEK;
|
|
|
|
*product = QEMU_PCI_PRODUCT_NIC_NE2K;
|
|
|
|
} else if (STREQ(def->model, "pcnet")) {
|
|
|
|
*vendor = QEMU_PCI_VENDOR_AMD;
|
|
|
|
*product = QEMU_PCI_PRODUCT_NIC_PCNET;
|
|
|
|
} else if (STREQ(def->model, "rtl8139")) {
|
|
|
|
*vendor = QEMU_PCI_VENDOR_REALTEK;
|
|
|
|
*product = QEMU_PCI_PRODUCT_NIC_RTL8139;
|
|
|
|
} else if (STREQ(def->model, "e1000")) {
|
|
|
|
*vendor = QEMU_PCI_VENDOR_INTEL;
|
|
|
|
*product = QEMU_PCI_PRODUCT_NIC_E1000;
|
|
|
|
} else if (STREQ(def->model, "virtio")) {
|
|
|
|
*vendor = QEMU_PCI_VENDOR_REDHAT;
|
|
|
|
*product = QEMU_PCI_PRODUCT_NIC_VIRTIO;
|
|
|
|
} else {
|
|
|
|
VIR_INFO("Unexpected NIC model %s, cannot get PCI address",
|
|
|
|
def->model);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCIControllerVendorProduct(virDomainControllerDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
switch (def->type) {
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_SCSI:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_LSI_LOGIC;
|
|
|
|
*product = QEMU_PCI_PRODUCT_CONTROLLER_LSI;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_FDC:
|
|
|
|
/* XXX we could put in the ISA bridge address, but
|
|
|
|
that's not technically the FDC's address */
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_IDE:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_INTEL;
|
|
|
|
*product = QEMU_PCI_PRODUCT_CONTROLLER_PIIX;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
VIR_INFO("Unexpected controller type %s, cannot get PCI address",
|
|
|
|
virDomainControllerTypeToString(def->type));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCIVideoVendorProduct(virDomainVideoDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
switch (def->type) {
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_CIRRUS:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_CIRRUS;
|
|
|
|
*product = QEMU_PCI_PRODUCT_VGA_CIRRUS;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VGA:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_QEMU;
|
|
|
|
*product = QEMU_PCI_PRODUCT_VGA_STDVGA;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VMVGA:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_VMWARE;
|
|
|
|
*product = QEMU_PCI_PRODUCT_VGA_VMWARE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCISoundVendorProduct(virDomainSoundDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
switch (def->model) {
|
|
|
|
case VIR_DOMAIN_SOUND_MODEL_ES1370:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_ENSONIQ;
|
|
|
|
*product = QEMU_PCI_PRODUCT_AUDIO_ES1370;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_SOUND_MODEL_AC97:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_INTEL;
|
|
|
|
*product = QEMU_PCI_PRODUCT_AUDIO_AC97;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCIWatchdogVendorProduct(virDomainWatchdogDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
switch (def->model) {
|
|
|
|
case VIR_DOMAIN_WATCHDOG_MODEL_I6300ESB:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_INTEL;
|
|
|
|
*product = QEMU_PCI_PRODUCT_WATCHDOG_I63000ESB;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetPCIMemballoonVendorProduct(virDomainMemballoonDefPtr def,
|
|
|
|
unsigned *vendor,
|
|
|
|
unsigned *product)
|
|
|
|
{
|
|
|
|
switch (def->model) {
|
|
|
|
case VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO:
|
|
|
|
*vendor = QEMU_PCI_VENDOR_REDHAT;
|
|
|
|
*product = QEMU_PCI_PRODUCT_BALLOON_VIRTIO;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This entire method assumes that PCI devices in 'info pci'
|
|
|
|
* match ordering of devices specified on the command line
|
|
|
|
* wrt to devices of matching vendor+product
|
|
|
|
*
|
|
|
|
* XXXX this might not be a valid assumption if we assign
|
|
|
|
* some static addrs on CLI. Have to check that...
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessDetectPCIAddresses(virDomainObjPtr vm,
|
|
|
|
qemuMonitorPCIAddress *addrs,
|
|
|
|
int naddrs)
|
|
|
|
{
|
|
|
|
unsigned int vendor = 0, product = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* XXX should all these vendor/product IDs be kept in the
|
|
|
|
* actual device data structure instead ?
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 0 ; i < vm->def->ndisks ; i++) {
|
|
|
|
if (qemuProcessGetPCIDiskVendorProduct(vm->def->disks[i], &vendor, &product) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->disks[i]->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for VirtIO disk %s"),
|
|
|
|
vm->def->disks[i]->dst);
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < vm->def->nnets ; i++) {
|
|
|
|
if (qemuProcessGetPCINetVendorProduct(vm->def->nets[i], &vendor, &product) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->nets[i]->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for %s NIC"),
|
|
|
|
vm->def->nets[i]->model);
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < vm->def->ncontrollers ; i++) {
|
|
|
|
if (qemuProcessGetPCIControllerVendorProduct(vm->def->controllers[i], &vendor, &product) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->controllers[i]->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for controller %s"),
|
|
|
|
virDomainControllerTypeToString(vm->def->controllers[i]->type));
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < vm->def->nvideos ; i++) {
|
|
|
|
if (qemuProcessGetPCIVideoVendorProduct(vm->def->videos[i], &vendor, &product) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->videos[i]->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for video adapter %s"),
|
|
|
|
virDomainVideoTypeToString(vm->def->videos[i]->type));
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < vm->def->nsounds ; i++) {
|
|
|
|
if (qemuProcessGetPCISoundVendorProduct(vm->def->sounds[i], &vendor, &product) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->sounds[i]->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for sound adapter %s"),
|
|
|
|
virDomainSoundModelTypeToString(vm->def->sounds[i]->model));
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (vm->def->watchdog &&
|
|
|
|
qemuProcessGetPCIWatchdogVendorProduct(vm->def->watchdog, &vendor, &product) == 0) {
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->watchdog->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for watchdog %s"),
|
|
|
|
virDomainWatchdogModelTypeToString(vm->def->watchdog->model));
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->memballoon &&
|
|
|
|
qemuProcessGetPCIMemballoonVendorProduct(vm->def->memballoon, &vendor, &product) == 0) {
|
|
|
|
if (qemuProcessAssignNextPCIAddress(&(vm->def->memballoon->info),
|
|
|
|
vendor, product,
|
|
|
|
addrs, naddrs) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find PCI address for balloon %s"),
|
|
|
|
virDomainMemballoonModelTypeToString(vm->def->memballoon->model));
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX console (virtio) */
|
|
|
|
|
|
|
|
|
|
|
|
/* ... and now things we don't have in our xml */
|
|
|
|
|
|
|
|
/* XXX USB controller ? */
|
|
|
|
|
|
|
|
/* XXX what about other PCI devices (ie bridges) */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessInitPCIAddresses(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int naddrs;
|
|
|
|
int ret;
|
|
|
|
qemuMonitorPCIAddress *addrs = NULL;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
naddrs = qemuMonitorGetAllPCIAddresses(priv->mon,
|
|
|
|
&addrs);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
ret = qemuProcessDetectPCIAddresses(vm, addrs, naddrs);
|
|
|
|
|
|
|
|
VIR_FREE(addrs);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int qemuProcessNextFreePort(struct qemud_driver *driver,
|
|
|
|
int startPort)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2012-06-18 08:22:07 +00:00
|
|
|
for (i = startPort ; i < driver->remotePortMax; i++) {
|
2011-02-14 16:09:39 +00:00
|
|
|
int fd;
|
|
|
|
int reuse = 1;
|
|
|
|
struct sockaddr_in addr;
|
|
|
|
bool used = false;
|
|
|
|
|
2012-06-18 07:58:31 +00:00
|
|
|
if (virBitmapGetBit(driver->reservedRemotePorts,
|
2012-06-18 08:22:07 +00:00
|
|
|
i - driver->remotePortMin, &used) < 0)
|
|
|
|
VIR_DEBUG("virBitmapGetBit failed on bit %d", i - driver->remotePortMin);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (used)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
addr.sin_family = AF_INET;
|
|
|
|
addr.sin_port = htons(i);
|
|
|
|
addr.sin_addr.s_addr = htonl(INADDR_ANY);
|
|
|
|
fd = socket(PF_INET, SOCK_STREAM, 0);
|
|
|
|
if (fd < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void*)&reuse, sizeof(reuse)) < 0) {
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bind(fd, (struct sockaddr*)&addr, sizeof(addr)) == 0) {
|
|
|
|
/* Not in use, lets grab it */
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
/* Add port to bitmap of reserved ports */
|
2012-06-18 07:58:31 +00:00
|
|
|
if (virBitmapSetBit(driver->reservedRemotePorts,
|
2012-06-18 08:22:07 +00:00
|
|
|
i - driver->remotePortMin) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("virBitmapSetBit failed on bit %d",
|
2012-06-18 08:22:07 +00:00
|
|
|
i - driver->remotePortMin);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
|
|
|
if (errno == EADDRINUSE) {
|
|
|
|
/* In use, try next */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Some other bad failure, get out.. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuProcessReturnPort(struct qemud_driver *driver,
|
|
|
|
int port)
|
|
|
|
{
|
2012-06-18 08:22:07 +00:00
|
|
|
if (port < driver->remotePortMin)
|
2011-02-14 16:09:39 +00:00
|
|
|
return;
|
|
|
|
|
2012-06-18 07:58:31 +00:00
|
|
|
if (virBitmapClearBit(driver->reservedRemotePorts,
|
2012-06-18 08:22:07 +00:00
|
|
|
port - driver->remotePortMin) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Could not mark port %d as unused", port);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessPrepareChardevDevice(virDomainDefPtr def ATTRIBUTE_UNUSED,
|
|
|
|
virDomainChrDefPtr dev,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
int fd;
|
|
|
|
if (dev->source.type != VIR_DOMAIN_CHR_TYPE_FILE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((fd = open(dev->source.data.file.path,
|
|
|
|
O_CREAT | O_APPEND, S_IRUSR|S_IWUSR)) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to pre-create chardev file '%s'"),
|
|
|
|
dev->source.data.file.path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-05 12:17:28 +00:00
|
|
|
static int
|
|
|
|
qemuProcessLimits(struct qemud_driver *driver)
|
|
|
|
{
|
2011-12-22 11:22:31 +00:00
|
|
|
struct rlimit rlim;
|
2011-04-05 12:17:28 +00:00
|
|
|
|
2011-12-22 11:22:31 +00:00
|
|
|
if (driver->maxProcesses > 0) {
|
2011-04-05 12:17:28 +00:00
|
|
|
rlim.rlim_cur = rlim.rlim_max = driver->maxProcesses;
|
|
|
|
if (setrlimit(RLIMIT_NPROC, &rlim) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot limit number of processes to %d"),
|
|
|
|
driver->maxProcesses);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-22 11:22:31 +00:00
|
|
|
if (driver->maxFiles > 0) {
|
|
|
|
/* Max number of opened files is one greater than
|
|
|
|
* actual limit. See man setrlimit */
|
|
|
|
rlim.rlim_cur = rlim.rlim_max = driver->maxFiles + 1;
|
|
|
|
if (setrlimit(RLIMIT_NOFILE, &rlim) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot set max opened files to %d"),
|
|
|
|
driver->maxFiles);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-05 12:17:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
struct qemuProcessHookData {
|
|
|
|
virConnectPtr conn;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
struct qemud_driver *driver;
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapPtr nodemask;
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int qemuProcessHook(void *data)
|
|
|
|
{
|
|
|
|
struct qemuProcessHookData *h = data;
|
2010-10-26 14:04:46 +00:00
|
|
|
int ret = -1;
|
2011-06-24 14:14:41 +00:00
|
|
|
int fd;
|
2010-10-26 14:04:46 +00:00
|
|
|
|
|
|
|
/* Some later calls want pid present */
|
|
|
|
h->vm->pid = getpid();
|
|
|
|
|
|
|
|
VIR_DEBUG("Obtaining domain lock");
|
2011-08-26 15:06:31 +00:00
|
|
|
/*
|
|
|
|
* Since we're going to leak the returned FD to QEMU,
|
|
|
|
* we need to make sure it gets a sensible label.
|
|
|
|
* This mildly sucks, because there could be other
|
|
|
|
* sockets the lock driver opens that we don't want
|
|
|
|
* labelled. So far we're ok though.
|
|
|
|
*/
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerSetSocketLabel(h->driver->securityManager, h->vm->def) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
if (virDomainLockProcessStart(h->driver->lockManager,
|
|
|
|
h->vm,
|
|
|
|
/* QEMU is always pased initially */
|
2011-06-24 14:14:41 +00:00
|
|
|
true,
|
|
|
|
&fd) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerClearSocketLabel(h->driver->securityManager, h->vm->def) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-04-05 12:17:28 +00:00
|
|
|
if (qemuProcessLimits(h->driver) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2011-04-05 12:17:28 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* This must take place before exec(), so that all QEMU
|
|
|
|
* memory allocation is on the correct NUMA node
|
|
|
|
*/
|
2012-05-12 12:53:15 +00:00
|
|
|
VIR_DEBUG("Moving process to cgroup");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuAddToCgroup(h->driver, h->vm->def) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* This must be done after cgroup placement to avoid resetting CPU
|
|
|
|
* affinity */
|
2012-05-12 12:53:15 +00:00
|
|
|
if (qemuProcessInitCpuAffinity(h->driver, h->vm, h->nodemask) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-05-12 12:53:15 +00:00
|
|
|
if (qemuProcessInitNumaMemoryPolicy(h->vm, h->nodemask) < 0)
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
goto cleanup;
|
2011-06-20 07:16:16 +00:00
|
|
|
|
2011-06-24 14:14:41 +00:00
|
|
|
VIR_DEBUG("Setting up security labelling");
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerSetProcessLabel(h->driver->securityManager, h->vm->def) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_DEBUG("Hook complete ret=%d", ret);
|
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuProcessPrepareMonitorChr(struct qemud_driver *driver,
|
|
|
|
virDomainChrSourceDefPtr monConfig,
|
|
|
|
const char *vm)
|
|
|
|
{
|
|
|
|
monConfig->type = VIR_DOMAIN_CHR_TYPE_UNIX;
|
|
|
|
monConfig->data.nix.listen = true;
|
|
|
|
|
|
|
|
if (virAsprintf(&monConfig->data.nix.path, "%s/%s.monitor",
|
|
|
|
driver->libDir, vm) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
/*
|
|
|
|
* Precondition: Both driver and vm must be locked,
|
|
|
|
* and a job must be active. This method will call
|
|
|
|
* {Enter,Exit}MonitorWithDriver
|
|
|
|
*/
|
2011-03-15 02:20:53 +00:00
|
|
|
int
|
|
|
|
qemuProcessStartCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virConnectPtr conn, virDomainRunningReason reason,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Using lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
if (virDomainLockProcessResume(driver->lockManager, vm, priv->lockState) < 0) {
|
|
|
|
/* Don't free priv->lockState on error, because we need
|
|
|
|
* to make sure we have state still present if the user
|
|
|
|
* tries to resume again
|
|
|
|
*/
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = qemuMonitorStartCPUs(priv->mon, conn);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
}
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
if (ret == 0) {
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason);
|
2010-10-26 14:04:46 +00:00
|
|
|
} else {
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
int qemuProcessStopCPUs(struct qemud_driver *driver, virDomainObjPtr vm,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainPausedReason reason,
|
|
|
|
enum qemuDomainAsyncJob asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2011-05-04 09:07:01 +00:00
|
|
|
int oldState;
|
|
|
|
int oldReason;
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
2011-05-04 09:07:01 +00:00
|
|
|
oldState = virDomainObjGetState(vm, &oldReason);
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
ret = qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob);
|
2011-06-30 09:23:50 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
ret = qemuMonitorStopCPUs(priv->mon);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
}
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
} else {
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, oldState, oldReason);
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2011-07-04 06:27:12 +00:00
|
|
|
static int
|
|
|
|
qemuProcessNotifyNets(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
int ii;
|
|
|
|
|
|
|
|
for (ii = 0 ; ii < def->nnets ; ii++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[ii];
|
|
|
|
if (networkNotifyActualDevice(net) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessFiltersInstantiate(virConnectPtr conn,
|
|
|
|
virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
for (i = 0 ; i < def->nnets ; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
|
|
|
if ((net->filter) && (net->ifname)) {
|
2011-12-09 02:35:20 +00:00
|
|
|
if (virDomainConfNWFilterInstantiate(conn, def->uuid, net) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
err = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-05-05 11:50:25 +00:00
|
|
|
static int
|
|
|
|
qemuProcessUpdateState(struct qemud_driver *driver, virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainState state;
|
2011-09-27 09:42:04 +00:00
|
|
|
virDomainPausedReason reason;
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainState newState = VIR_DOMAIN_NOSTATE;
|
|
|
|
int newReason;
|
2011-05-05 11:50:25 +00:00
|
|
|
bool running;
|
2011-11-30 14:31:45 +00:00
|
|
|
char *msg = NULL;
|
2011-05-05 11:50:25 +00:00
|
|
|
int ret;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-09-27 09:42:04 +00:00
|
|
|
ret = qemuMonitorGetStatus(priv->mon, &running, &reason);
|
2011-05-05 11:50:25 +00:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
if (ret < 0 || !virDomainObjIsActive(vm))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
state = virDomainObjGetState(vm, NULL);
|
|
|
|
|
|
|
|
if (state == VIR_DOMAIN_PAUSED && running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
newState = VIR_DOMAIN_RUNNING;
|
|
|
|
newReason = VIR_DOMAIN_RUNNING_UNPAUSED;
|
|
|
|
msg = strdup("was unpaused");
|
2011-05-05 11:50:25 +00:00
|
|
|
} else if (state == VIR_DOMAIN_RUNNING && !running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
if (reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN) {
|
|
|
|
newState = VIR_DOMAIN_SHUTDOWN;
|
|
|
|
newReason = VIR_DOMAIN_SHUTDOWN_UNKNOWN;
|
|
|
|
msg = strdup("shutdown");
|
|
|
|
} else {
|
|
|
|
newState = VIR_DOMAIN_PAUSED;
|
|
|
|
newReason = reason;
|
2012-04-27 21:25:35 +00:00
|
|
|
ignore_value(virAsprintf(&msg, "was paused (%s)",
|
|
|
|
virDomainPausedReasonTypeToString(reason)));
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
2011-07-12 09:45:16 +00:00
|
|
|
} else if (state == VIR_DOMAIN_SHUTOFF && running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
newState = VIR_DOMAIN_RUNNING;
|
|
|
|
newReason = VIR_DOMAIN_RUNNING_BOOTED;
|
|
|
|
msg = strdup("finished booting");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newState != VIR_DOMAIN_NOSTATE) {
|
|
|
|
if (!msg) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Domain %s %s while its monitor was disconnected;"
|
|
|
|
" changing state to %s (%s)",
|
|
|
|
vm->def->name,
|
|
|
|
msg,
|
|
|
|
virDomainStateTypeToString(newState),
|
|
|
|
virDomainStateReasonToString(newState, newReason));
|
|
|
|
VIR_FREE(msg);
|
|
|
|
virDomainObjSetState(vm, newState, newReason);
|
2011-05-05 11:50:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:33 +00:00
|
|
|
static int
|
|
|
|
qemuProcessRecoverMigration(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
|
|
|
enum qemuDomainAsyncJob job,
|
|
|
|
enum qemuMigrationJobPhase phase,
|
|
|
|
virDomainState state,
|
|
|
|
int reason)
|
|
|
|
{
|
2011-07-19 00:27:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2011-07-19 00:27:33 +00:00
|
|
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
|
|
|
switch (phase) {
|
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
VIR_DEBUG("Killing unfinished incoming migration for domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
/* source domain is already killed so let's just resume the domain
|
|
|
|
* and hope we are all set */
|
|
|
|
VIR_DEBUG("Incoming migration finished, resuming domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-07-19 00:27:33 +00:00
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
/* migration finished, we started resuming the domain but didn't
|
|
|
|
* confirm success or failure yet; killing it seems safest */
|
|
|
|
VIR_DEBUG("Killing migrated domain %s", vm->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (job == QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
|
|
|
switch (phase) {
|
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
/* nothing happen so far, just forget we were about to migrate the
|
|
|
|
* domain */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
/* migration is still in progress, let's cancel it and resume the
|
|
|
|
* domain */
|
|
|
|
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
|
|
|
|
vm->def->name);
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:39 +00:00
|
|
|
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-07-19 00:27:33 +00:00
|
|
|
/* resume the domain but only if it was paused as a result of
|
|
|
|
* migration */
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-07-19 00:27:33 +00:00
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
/* migration finished but we didn't have a chance to get the result
|
|
|
|
* of Finish3 step; third party needs to check what to do next
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
|
|
|
/* Finish3 failed, we need to resume the domain */
|
|
|
|
VIR_DEBUG("Resuming domain %s after failed migration",
|
|
|
|
vm->def->name);
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-07-19 00:27:33 +00:00
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
/* migration completed, we need to kill the domain here */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessRecoverJob(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
|
|
|
const struct qemuDomainJobObj *job)
|
|
|
|
{
|
2011-07-19 00:27:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-07-04 21:33:39 +00:00
|
|
|
virDomainState state;
|
|
|
|
int reason;
|
|
|
|
|
|
|
|
state = virDomainObjGetState(vm, &reason);
|
|
|
|
|
|
|
|
switch (job->asyncJob) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
2011-07-19 00:27:33 +00:00
|
|
|
if (qemuProcessRecoverMigration(driver, vm, conn, job->asyncJob,
|
|
|
|
job->phase, state, reason) < 0)
|
|
|
|
return -1;
|
2011-07-04 21:33:39 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:39 +00:00
|
|
|
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-07-04 21:33:39 +00:00
|
|
|
/* resume the domain but only if it was paused as a result of
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
* running save/dump operation. Although we are recovering an
|
|
|
|
* async job, this function is run at startup and must resume
|
|
|
|
* things using sync monitor connections. */
|
2011-07-04 21:33:39 +00:00
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_DUMP) ||
|
|
|
|
(job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_SAVE) ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-07-04 21:33:39 +00:00
|
|
|
VIR_WARN("Could not resume domain %s after", vm->def->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_ASYNC_JOB_NONE:
|
|
|
|
case QEMU_ASYNC_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm))
|
|
|
|
return -1;
|
|
|
|
|
2012-04-06 17:42:34 +00:00
|
|
|
/* In case any special handling is added for job type that has been ignored
|
|
|
|
* before, QEMU_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated
|
|
|
|
* for the job to be properly tracked in domain state XML.
|
|
|
|
*/
|
2011-07-04 21:33:39 +00:00
|
|
|
switch (job->active) {
|
|
|
|
case QEMU_JOB_QUERY:
|
|
|
|
/* harmless */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_JOB_DESTROY:
|
|
|
|
VIR_DEBUG("Domain %s should have already been destroyed",
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
case QEMU_JOB_SUSPEND:
|
|
|
|
/* mostly harmless */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_JOB_MODIFY:
|
|
|
|
/* XXX depending on the command we may be in an inconsistent state and
|
|
|
|
* we should probably fall back to "monitor error" state and refuse to
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
2011-07-19 00:27:36 +00:00
|
|
|
case QEMU_JOB_MIGRATION_OP:
|
2011-07-19 00:27:39 +00:00
|
|
|
case QEMU_JOB_ABORT:
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_JOB_ASYNC:
|
|
|
|
case QEMU_JOB_ASYNC_NESTED:
|
|
|
|
/* async job was already handled above */
|
|
|
|
case QEMU_JOB_NONE:
|
|
|
|
case QEMU_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
struct qemuProcessReconnectData {
|
|
|
|
virConnectPtr conn;
|
|
|
|
struct qemud_driver *driver;
|
2011-08-16 10:51:36 +00:00
|
|
|
void *payload;
|
|
|
|
struct qemuDomainJobObj oldjob;
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
/*
|
|
|
|
* Open an existing VM's monitor, re-detect VCPU threads
|
|
|
|
* and re-reserve the security labels in use
|
2011-09-16 13:44:43 +00:00
|
|
|
*
|
|
|
|
* We own the virConnectPtr we are passed here - whoever started
|
|
|
|
* this thread function has increased the reference counter to it
|
|
|
|
* so that we now have to close it.
|
2011-02-14 16:09:39 +00:00
|
|
|
*/
|
|
|
|
static void
|
2011-08-16 10:51:36 +00:00
|
|
|
qemuProcessReconnect(void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
struct qemuProcessReconnectData *data = opaque;
|
|
|
|
struct qemud_driver *driver = data->driver;
|
2011-08-16 10:51:36 +00:00
|
|
|
virDomainObjPtr obj = data->payload;
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
virConnectPtr conn = data->conn;
|
2011-07-04 21:33:39 +00:00
|
|
|
struct qemuDomainJobObj oldjob;
|
2011-09-27 12:56:17 +00:00
|
|
|
int state;
|
|
|
|
int reason;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
memcpy(&oldjob, &data->oldjob, sizeof(oldjob));
|
|
|
|
|
|
|
|
VIR_FREE(data);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjLock(obj);
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Reconnect monitor to %p '%s'", obj, obj->def->name);
|
|
|
|
|
|
|
|
priv = obj->privateData;
|
|
|
|
|
2012-04-06 16:55:46 +00:00
|
|
|
/* Job was started by the caller for us */
|
|
|
|
qemuDomainObjTransferJob(obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted if qemuConnectMonitor() failed */
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(obj);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* XXX check PID liveliness & EXE path */
|
|
|
|
if (qemuConnectMonitor(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/* Failure to connect to agent shouldn't be fatal */
|
|
|
|
if (qemuConnectAgent(driver, obj) < 0) {
|
|
|
|
VIR_WARN("Cannot connect to QEMU guest agent for %s",
|
|
|
|
obj->def->name);
|
|
|
|
virResetLastError();
|
|
|
|
priv->agentError = true;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuUpdateActivePciHostdevs(driver, obj->def) < 0) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2012-03-26 14:44:19 +00:00
|
|
|
if (qemuUpdateActiveUsbHostdevs(driver, obj->def) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-05-05 11:50:25 +00:00
|
|
|
if (qemuProcessUpdateState(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
state = virDomainObjGetState(obj, &reason);
|
|
|
|
if (state == VIR_DOMAIN_SHUTOFF) {
|
2011-07-12 09:45:16 +00:00
|
|
|
VIR_DEBUG("Domain '%s' wasn't fully started yet, killing it",
|
|
|
|
obj->def->name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2011-05-04 11:55:38 +00:00
|
|
|
/* If upgrading from old libvirtd we won't have found any
|
|
|
|
* caps in the domain status, so re-query them
|
|
|
|
*/
|
2012-08-20 16:44:14 +00:00
|
|
|
if (!priv->caps &&
|
2011-05-04 11:55:38 +00:00
|
|
|
qemuCapsExtractVersionInfo(obj->def->emulator, obj->def->os.arch,
|
2012-07-04 09:44:58 +00:00
|
|
|
false,
|
2011-02-14 16:09:39 +00:00
|
|
|
NULL,
|
2012-08-20 16:44:14 +00:00
|
|
|
&priv->caps) < 0)
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
|
2011-11-30 14:31:45 +00:00
|
|
|
/* In case the domain shutdown while we were not running,
|
2011-09-27 12:56:17 +00:00
|
|
|
* we need to finish the shutdown process. And we need to do it after
|
|
|
|
* we have qemuCaps filled in.
|
|
|
|
*/
|
2011-11-30 14:31:45 +00:00
|
|
|
if (state == VIR_DOMAIN_SHUTDOWN ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN)) {
|
|
|
|
VIR_DEBUG("Finishing shutdown sequence for domain %s",
|
|
|
|
obj->def->name);
|
|
|
|
qemuProcessShutdownOrReboot(driver, obj);
|
2011-09-27 12:56:17 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, QEMU_CAPS_DEVICE))
|
|
|
|
if ((qemuDomainAssignAddresses(obj->def, priv->caps, obj)) < 0)
|
2012-08-08 07:06:33 +00:00
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto error;
|
|
|
|
|
2011-07-04 06:27:12 +00:00
|
|
|
if (qemuProcessNotifyNets(obj->def) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuProcessFiltersInstantiate(conn, obj->def))
|
|
|
|
goto error;
|
|
|
|
|
2012-04-02 13:55:08 +00:00
|
|
|
if (qemuDomainCheckEjectableMedia(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2011-09-13 13:49:50 +00:00
|
|
|
goto error;
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
if (qemuProcessRecoverJob(driver, obj, conn, &oldjob) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-05-05 11:50:25 +00:00
|
|
|
/* update domain state XML with possibly updated state in virDomainObj */
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
/* Run an hook to allow admins to do some magic */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
|
|
|
char *xml = qemuDomainDefFormatXML(driver, obj->def, 0, false);
|
|
|
|
int hookret;
|
|
|
|
|
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, obj->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_RECONNECT, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, NULL);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (obj->def->id >= driver->nextvmid)
|
|
|
|
driver->nextvmid = obj->def->id + 1;
|
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
endjob:
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuDomainObjEndJob(driver, obj))
|
2011-08-16 10:51:36 +00:00
|
|
|
obj = NULL;
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
if (obj && virObjectUnref(obj))
|
2011-09-27 12:56:17 +00:00
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2011-09-16 13:44:43 +00:00
|
|
|
virConnectClose(conn);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuDomainObjEndJob(driver, obj))
|
2011-08-16 10:51:36 +00:00
|
|
|
obj = NULL;
|
|
|
|
|
2011-09-21 19:02:44 +00:00
|
|
|
if (obj) {
|
|
|
|
if (!virDomainObjIsActive(obj)) {
|
2012-07-11 13:35:46 +00:00
|
|
|
if (virObjectUnref(obj))
|
2011-09-21 19:02:44 +00:00
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
if (virObjectUnref(obj)) {
|
2011-09-21 19:02:44 +00:00
|
|
|
/* We can't get the monitor back, so must kill the VM
|
2011-09-22 06:02:03 +00:00
|
|
|
* to remove danger of it ending up running twice if
|
|
|
|
* user tries to start it again later
|
|
|
|
*/
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, QEMU_CAPS_NO_SHUTDOWN)) {
|
2012-06-06 14:43:41 +00:00
|
|
|
/* If we couldn't get the monitor and qemu supports
|
|
|
|
* no-shutdown, we can safely say that the domain
|
|
|
|
* crashed ... */
|
|
|
|
state = VIR_DOMAIN_SHUTOFF_CRASHED;
|
|
|
|
} else {
|
|
|
|
/* ... but if it doesn't we can't say what the state
|
|
|
|
* really is and FAILED means "failed to start" */
|
|
|
|
state = VIR_DOMAIN_SHUTOFF_UNKNOWN;
|
|
|
|
}
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, obj, state, 0);
|
2011-09-21 19:02:44 +00:00
|
|
|
if (!obj->persistent)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, obj);
|
2011-09-21 19:02:44 +00:00
|
|
|
else
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2011-09-16 13:44:43 +00:00
|
|
|
|
|
|
|
virConnectClose(conn);
|
2011-08-16 10:51:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuProcessReconnectHelper(void *payload,
|
|
|
|
const void *name ATTRIBUTE_UNUSED,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virThread thread;
|
|
|
|
struct qemuProcessReconnectData *src = opaque;
|
|
|
|
struct qemuProcessReconnectData *data;
|
|
|
|
virDomainObjPtr obj = payload;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(data) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(data, src, sizeof(*data));
|
|
|
|
data->payload = payload;
|
|
|
|
|
|
|
|
/* This iterator is called with driver being locked.
|
|
|
|
* We create a separate thread to run qemuProcessReconnect in it.
|
|
|
|
* However, qemuProcessReconnect needs to:
|
|
|
|
* 1. lock driver
|
|
|
|
* 2. just before monitor reconnect do lightweight MonitorEnter
|
|
|
|
* (increase VM refcount, unlock VM & driver)
|
|
|
|
* 3. reconnect to monitor
|
|
|
|
* 4. do lightweight MonitorExit (lock driver & VM)
|
|
|
|
* 5. continue reconnect process
|
|
|
|
* 6. EndJob
|
|
|
|
* 7. unlock driver
|
|
|
|
*
|
|
|
|
* It is necessary to NOT hold driver lock for the entire run
|
|
|
|
* of reconnect, otherwise we will get blocked if there is
|
|
|
|
* unresponsive qemu.
|
|
|
|
* However, iterating over hash table MUST be done on locked
|
|
|
|
* driver.
|
|
|
|
*
|
|
|
|
* NB, we can't do normal MonitorEnter & MonitorExit because
|
|
|
|
* these two lock the monitor lock, which does not exists in
|
|
|
|
* this early phase.
|
|
|
|
*/
|
|
|
|
|
|
|
|
virDomainObjLock(obj);
|
|
|
|
|
|
|
|
qemuDomainObjRestoreJob(obj, &data->oldjob);
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJobWithDriver(src->driver, obj, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-09-16 13:44:43 +00:00
|
|
|
/* Since we close the connection later on, we have to make sure
|
|
|
|
* that the threads we start see a valid connection throughout their
|
|
|
|
* lifetime. We simply increase the reference counter here.
|
|
|
|
*/
|
|
|
|
virConnectRef(data->conn);
|
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
if (virThreadCreate(&thread, true, qemuProcessReconnect, data) < 0) {
|
2011-09-16 13:44:43 +00:00
|
|
|
|
|
|
|
virConnectClose(data->conn);
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Could not create thread. QEMU initialization "
|
|
|
|
"might be incomplete"));
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuDomainObjEndJob(src->driver, obj)) {
|
2011-08-16 10:51:36 +00:00
|
|
|
obj = NULL;
|
2012-07-11 13:35:46 +00:00
|
|
|
} else if (virObjectUnref(obj)) {
|
2011-08-16 10:51:36 +00:00
|
|
|
/* We can't spawn a thread and thus connect to monitor.
|
|
|
|
* Kill qemu */
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED, 0);
|
2011-08-16 10:51:36 +00:00
|
|
|
if (!obj->persistent)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(src->driver, obj);
|
2011-08-16 10:51:36 +00:00
|
|
|
else
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
}
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(data);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuProcessReconnectAll
|
|
|
|
*
|
|
|
|
* Try to re-open the resources for live VMs that we care
|
|
|
|
* about.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuProcessReconnectAll(virConnectPtr conn, struct qemud_driver *driver)
|
|
|
|
{
|
2012-03-26 16:39:03 +00:00
|
|
|
struct qemuProcessReconnectData data = {.conn = conn, .driver = driver};
|
2011-08-16 10:51:36 +00:00
|
|
|
virHashForEach(driver->domains.objs, qemuProcessReconnectHelper, &data);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int qemuProcessStart(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *migrateFrom,
|
|
|
|
int stdin_fd,
|
|
|
|
const char *stdin_path,
|
2011-08-25 20:44:48 +00:00
|
|
|
virDomainSnapshotObjPtr snapshot,
|
2012-03-08 13:20:19 +00:00
|
|
|
enum virNetDevVPortProfileOp vmop,
|
|
|
|
unsigned int flags)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
off_t pos = -1;
|
|
|
|
char ebuf[1024];
|
|
|
|
int logfile = -1;
|
|
|
|
char *timestamp;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCommandPtr cmd = NULL;
|
|
|
|
struct qemuProcessHookData hookData;
|
2011-03-15 02:20:53 +00:00
|
|
|
unsigned long cur_balloon;
|
2012-01-31 04:52:00 +00:00
|
|
|
int i;
|
2012-05-12 12:53:15 +00:00
|
|
|
char *nodeset = NULL;
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapPtr nodemask = NULL;
|
2012-06-11 13:57:19 +00:00
|
|
|
unsigned int stop_flags;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-03-08 13:20:19 +00:00
|
|
|
/* Okay, these are just internal flags,
|
|
|
|
* but doesn't hurt to check */
|
|
|
|
virCheckFlags(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
|
|
|
VIR_QEMU_PROCESS_START_AUTODESROY, -1);
|
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* From now on until domain security labeling is done:
|
|
|
|
* if any operation fails and we goto cleanup, we must not
|
|
|
|
* restore any security label as we would overwrite labels
|
|
|
|
* we did not set. */
|
|
|
|
stop_flags = VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
hookData.conn = conn;
|
|
|
|
hookData.vm = vm;
|
|
|
|
hookData.driver = driver;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Beginning VM startup process");
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("VM is already active"));
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do this upfront, so any part of the startup process can add
|
|
|
|
* runtime state to vm->def that won't be persisted. This let's us
|
|
|
|
* report implicit runtime defaults in the XML, like vnc listen/socket
|
|
|
|
*/
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting current domain def as transient");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virDomainObjSetDefTransient(driver->caps, vm, true) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
vm->def->id = driver->nextvmid++;
|
2011-09-28 10:10:13 +00:00
|
|
|
qemuDomainSetFakeReboot(driver, vm, false);
|
2011-07-12 09:45:16 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, VIR_DOMAIN_SHUTOFF_UNKNOWN);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-03-23 20:50:29 +00:00
|
|
|
/* Run an early hook to set-up missing devices */
|
2011-03-22 13:12:36 +00:00
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2012-05-04 19:23:17 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0, false);
|
2011-03-22 13:12:36 +00:00
|
|
|
int hookret;
|
|
|
|
|
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
2012-02-27 16:06:22 +00:00
|
|
|
VIR_HOOK_QEMU_OP_PREPARE, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, NULL);
|
2011-03-22 13:12:36 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Must be run before security labelling */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Preparing host devices");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuPrepareHostDevices(driver, vm->def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Preparing chr devices");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virDomainChrDefForeach(vm->def,
|
|
|
|
true,
|
|
|
|
qemuProcessPrepareChardevDevice,
|
|
|
|
NULL) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* If you are using a SecurityDriver with dynamic labelling,
|
|
|
|
then generate a security label for isolation */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Generating domain security label (if required)");
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
if (virSecurityManagerGenLabel(driver->securityManager, vm->def) < 0) {
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditSecurityLabel(vm, false);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditSecurityLabel(vm, true);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* Ensure no historical cgroup for this VM is lying around bogus
|
|
|
|
* settings */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Ensuring no historical cgroup is lying around");
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuRemoveCgroup(driver, vm, 1);
|
|
|
|
|
|
|
|
if (vm->def->ngraphics == 1) {
|
|
|
|
if (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
|
|
|
|
!vm->def->graphics[0]->data.vnc.socket &&
|
|
|
|
vm->def->graphics[0]->data.vnc.autoport) {
|
2012-08-31 13:33:26 +00:00
|
|
|
int port = qemuProcessNextFreePort(driver, driver->remotePortMin);
|
2011-02-14 16:09:39 +00:00
|
|
|
if (port < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-06-18 09:07:17 +00:00
|
|
|
"%s", _("Unable to find an unused port for VNC"));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
vm->def->graphics[0]->data.vnc.port = port;
|
2012-03-12 15:50:39 +00:00
|
|
|
} else if (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
int port = -1;
|
|
|
|
if (vm->def->graphics[0]->data.spice.autoport ||
|
|
|
|
vm->def->graphics[0]->data.spice.port == -1) {
|
2012-08-31 13:33:26 +00:00
|
|
|
port = qemuProcessNextFreePort(driver, driver->remotePortMin);
|
2012-03-12 15:50:39 +00:00
|
|
|
|
|
|
|
if (port < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-06-18 09:07:17 +00:00
|
|
|
"%s", _("Unable to find an unused port for SPICE"));
|
2012-03-12 15:50:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm->def->graphics[0]->data.spice.port = port;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2012-03-12 15:50:39 +00:00
|
|
|
if (driver->spiceTLS &&
|
|
|
|
(vm->def->graphics[0]->data.spice.autoport ||
|
|
|
|
vm->def->graphics[0]->data.spice.tlsPort == -1)) {
|
|
|
|
int tlsPort = qemuProcessNextFreePort(driver,
|
|
|
|
vm->def->graphics[0]->data.spice.port + 1);
|
2011-02-14 16:09:39 +00:00
|
|
|
if (tlsPort < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2012-06-18 09:07:17 +00:00
|
|
|
"%s", _("Unable to find an unused port for SPICE TLS"));
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuProcessReturnPort(driver, port);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-08-27 07:41:36 +00:00
|
|
|
vm->def->graphics[0]->data.spice.tlsPort = tlsPort;
|
2012-03-12 15:50:39 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2012-06-25 11:50:52 +00:00
|
|
|
|
|
|
|
if (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC ||
|
|
|
|
vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
virDomainGraphicsDefPtr graphics = vm->def->graphics[0];
|
|
|
|
if (graphics->nListens == 0) {
|
|
|
|
if (VIR_EXPAND_N(graphics->listens, graphics->nListens, 1) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
graphics->listens[0].type = VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS;
|
|
|
|
if (vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC)
|
|
|
|
graphics->listens[0].address = strdup(driver->vncListen);
|
|
|
|
else
|
|
|
|
graphics->listens[0].address = strdup(driver->spiceListen);
|
|
|
|
if (!graphics->listens[0].address) {
|
|
|
|
VIR_SHRINK_N(graphics->listens, graphics->nListens, 1);
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(driver->logDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot create log directory %s"),
|
|
|
|
driver->logDir);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Creating domain log file");
|
2011-05-05 11:38:04 +00:00
|
|
|
if ((logfile = qemuDomainCreateLog(driver, vm, false)) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-01-09 19:54:14 +00:00
|
|
|
if (vm->def->virtType == VIR_DOMAIN_VIRT_KVM) {
|
|
|
|
VIR_DEBUG("Checking for KVM availability");
|
|
|
|
if (access("/dev/kvm", F_OK) != 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Domain requires KVM, but it is not available. "
|
|
|
|
"Check that virtualization is enabled in the host BIOS, "
|
|
|
|
"and host configuration is setup to load the kvm modules."));
|
2012-01-09 19:54:14 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Determining emulator version");
|
2012-08-20 16:44:14 +00:00
|
|
|
virObjectUnref(priv->caps);
|
|
|
|
priv->caps = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuCapsExtractVersionInfo(vm->def->emulator, vm->def->os.arch,
|
2012-07-04 09:44:58 +00:00
|
|
|
true,
|
2011-02-14 16:09:39 +00:00
|
|
|
NULL,
|
2012-08-20 16:44:14 +00:00
|
|
|
&priv->caps) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuAssignDeviceAliases(vm->def, priv->caps) < 0)
|
2011-10-18 14:15:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Checking for CDROM and floppy presence");
|
2012-03-08 13:20:19 +00:00
|
|
|
if (qemuDomainCheckDiskPresence(driver, vm,
|
|
|
|
flags & VIR_QEMU_PROCESS_START_COLD) < 0)
|
2011-10-18 14:15:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-05-12 12:53:15 +00:00
|
|
|
/* Get the advisory nodeset from numad if 'placement' of
|
|
|
|
* either <vcpu> or <numatune> is 'auto'.
|
|
|
|
*/
|
|
|
|
if ((vm->def->placement_mode ==
|
|
|
|
VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) ||
|
|
|
|
(vm->def->numatune.memory.placement_mode ==
|
|
|
|
VIR_DOMAIN_NUMATUNE_MEM_PLACEMENT_MODE_AUTO)) {
|
|
|
|
nodeset = qemuGetNumadAdvice(vm->def);
|
|
|
|
if (!nodeset)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Nodeset returned from numad: %s", nodeset);
|
|
|
|
|
2012-09-14 07:47:00 +00:00
|
|
|
if (virBitmapParse(nodeset, 0, &nodemask,
|
|
|
|
VIR_DOMAIN_CPUMASK_LEN) < 0)
|
2012-05-12 12:53:15 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
hookData.nodemask = nodemask;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting up domain cgroup (if required)");
|
2012-05-12 12:53:15 +00:00
|
|
|
if (qemuSetupCgroup(driver, vm, nodemask) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(priv->monConfig) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Preparing monitor state");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuProcessPrepareMonitorChr(driver, priv->monConfig, vm->def->name) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, QEMU_CAPS_MONITOR_JSON))
|
2011-02-14 16:09:39 +00:00
|
|
|
priv->monJSON = 1;
|
|
|
|
else
|
|
|
|
priv->monJSON = 0;
|
|
|
|
|
2011-05-31 16:34:20 +00:00
|
|
|
priv->monError = false;
|
|
|
|
priv->monStart = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
priv->gotShutdown = false;
|
|
|
|
|
2011-06-17 13:43:54 +00:00
|
|
|
VIR_FREE(priv->pidfile);
|
2011-08-05 13:13:12 +00:00
|
|
|
if (!(priv->pidfile = virPidFileBuildPath(driver->stateDir, vm->def->name))) {
|
2011-06-17 13:43:54 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
"%s", _("Failed to build pidfile path."));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-17 13:43:54 +00:00
|
|
|
if (unlink(priv->pidfile) < 0 &&
|
|
|
|
errno != ENOENT) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportSystemError(errno,
|
2011-06-17 13:43:54 +00:00
|
|
|
_("Cannot remove stale PID file %s"),
|
|
|
|
priv->pidfile);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally PCI addresses are assigned in the virDomainCreate
|
|
|
|
* or virDomainDefine methods. We might still need to assign
|
|
|
|
* some here to cope with the question of upgrades. Regardless
|
|
|
|
* we also need to populate the PCi address set cache for later
|
|
|
|
* use in hotplug
|
|
|
|
*/
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, QEMU_CAPS_DEVICE)) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Assigning domain PCI addresses");
|
2012-08-20 16:44:14 +00:00
|
|
|
if ((qemuDomainAssignAddresses(vm->def, priv->caps, vm)) < 0)
|
2012-08-08 07:06:33 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Building emulator command line");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!(cmd = qemuBuildCommandLine(conn, driver, vm->def, priv->monConfig,
|
2012-08-20 16:44:14 +00:00
|
|
|
priv->monJSON != 0, priv->caps,
|
2011-08-25 20:44:48 +00:00
|
|
|
migrateFrom, stdin_fd, snapshot, vmop)))
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* now that we know it is about to start call the hook if present */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2012-05-04 19:23:17 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0, false);
|
2011-02-14 16:09:39 +00:00
|
|
|
int hookret;
|
|
|
|
|
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
2012-02-27 16:06:22 +00:00
|
|
|
VIR_HOOK_QEMU_OP_START, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, NULL);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
if ((timestamp = virTimeStringNow()) == NULL) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
if (safewrite(logfile, timestamp, strlen(timestamp)) < 0 ||
|
|
|
|
safewrite(logfile, START_POSTFIX, strlen(START_POSTFIX)) < 0) {
|
|
|
|
VIR_WARN("Unable to write timestamp to logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(timestamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
virCommandWriteArgLog(cmd, logfile);
|
|
|
|
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjCheckTaint(driver, vm, logfile);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if ((pos = lseek(logfile, 0, SEEK_END)) < 0)
|
|
|
|
VIR_WARN("Unable to seek to end of logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Clear emulator capabilities: %d",
|
|
|
|
driver->clearEmulatorCapabilities);
|
|
|
|
if (driver->clearEmulatorCapabilities)
|
|
|
|
virCommandClearCaps(cmd);
|
|
|
|
|
2012-01-31 04:52:00 +00:00
|
|
|
/* in case a certain disk is desirous of CAP_SYS_RAWIO, add this */
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
if (vm->def->disks[i]->rawio == 1)
|
|
|
|
virCommandAllowCap(cmd, CAP_SYS_RAWIO);
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandSetPreExecHook(cmd, qemuProcessHook, &hookData);
|
|
|
|
|
|
|
|
virCommandSetOutputFD(cmd, &logfile);
|
|
|
|
virCommandSetErrorFD(cmd, &logfile);
|
|
|
|
virCommandNonblockingFDs(cmd);
|
2011-06-17 13:43:54 +00:00
|
|
|
virCommandSetPidFile(cmd, priv->pidfile);
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandDaemonize(cmd);
|
2010-10-26 14:04:46 +00:00
|
|
|
virCommandRequireHandshake(cmd);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
ret = virCommandRun(cmd, NULL);
|
|
|
|
|
2011-04-11 22:25:25 +00:00
|
|
|
/* wait for qemu process to show up */
|
2011-02-14 16:09:39 +00:00
|
|
|
if (ret == 0) {
|
2011-08-05 13:13:12 +00:00
|
|
|
if (virPidFileReadPath(priv->pidfile, &vm->pid) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Domain %s didn't show up"), vm->def->name);
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
#if 0
|
|
|
|
} else if (ret == -2) {
|
|
|
|
/*
|
|
|
|
* XXX this is bogus. It isn't safe to set vm->pid = child
|
|
|
|
* because the child no longer exists.
|
|
|
|
*/
|
|
|
|
|
2011-05-06 20:10:40 +00:00
|
|
|
/* The virCommand process that launches the daemon failed. Pending on
|
2011-02-14 16:09:39 +00:00
|
|
|
* when it failed (we can't determine for sure), there may be
|
|
|
|
* extra info in the domain log (if the hook failed for example).
|
|
|
|
*
|
|
|
|
* Pretend like things succeeded, and let 'WaitForMonitor' report
|
|
|
|
* the log contents for us.
|
|
|
|
*/
|
|
|
|
vm->pid = child;
|
|
|
|
ret = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-07-12 09:45:16 +00:00
|
|
|
VIR_DEBUG("Writing early domain status to disk");
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Waiting for handshake from child");
|
|
|
|
if (virCommandHandshakeWait(cmd) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Setting domain security labels");
|
|
|
|
if (virSecurityManagerSetAllLabel(driver->securityManager,
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
vm->def, stdin_path) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* Security manager labeled all devices, therefore
|
|
|
|
* if any operation from now on fails and we goto cleanup,
|
|
|
|
* where virSecurityManagerRestoreAllLabel() is called
|
|
|
|
* (hidden under qemuProcessStop) we need to restore labels. */
|
|
|
|
stop_flags &= ~VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
if (stdin_fd != -1) {
|
|
|
|
/* if there's an fd to migrate from, and it's a pipe, put the
|
|
|
|
* proper security label on it
|
|
|
|
*/
|
|
|
|
struct stat stdin_sb;
|
|
|
|
|
|
|
|
VIR_DEBUG("setting security label on pipe used for migration");
|
|
|
|
|
|
|
|
if (fstat(stdin_fd, &stdin_sb) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot stat fd %d"), stdin_fd);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (S_ISFIFO(stdin_sb.st_mode) &&
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def, stdin_fd) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Labelling done, completing handshake to child");
|
|
|
|
if (virCommandHandshakeNotify(cmd) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
VIR_DEBUG("Handshake complete, child running");
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (migrateFrom)
|
2012-03-08 13:20:19 +00:00
|
|
|
flags |= VIR_QEMU_PROCESS_START_PAUSED;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (ret == -1) /* The VM failed to start; tear filters before taps */
|
|
|
|
virDomainConfVMNWFilterTeardown(vm);
|
|
|
|
|
|
|
|
if (ret == -1) /* The VM failed to start */
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Waiting for monitor to show up");
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuProcessWaitForMonitor(driver, vm, priv->caps, pos) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/* Failure to connect to agent shouldn't be fatal */
|
|
|
|
if (qemuConnectAgent(driver, vm) < 0) {
|
|
|
|
VIR_WARN("Cannot connect to QEMU guest agent for %s",
|
|
|
|
vm->def->name);
|
|
|
|
virResetLastError();
|
|
|
|
priv->agentError = true;
|
|
|
|
}
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Detecting VCPU PIDs");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuProcessDetectVcpuPIDs(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2012-08-21 09:18:26 +00:00
|
|
|
VIR_DEBUG("Setting cgroup for each VCPU (if required)");
|
2011-07-21 02:10:31 +00:00
|
|
|
if (qemuSetupCgroupForVcpu(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2012-08-21 09:18:26 +00:00
|
|
|
VIR_DEBUG("Setting cgroup for emulator (if required)");
|
|
|
|
if (qemuSetupCgroupForEmulator(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting VCPU affinities");
|
2011-03-29 13:41:25 +00:00
|
|
|
if (qemuProcessSetVcpuAffinites(conn, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2012-08-21 09:18:34 +00:00
|
|
|
VIR_DEBUG("Setting affinity of emulator threads");
|
|
|
|
if (qemuProcessSetEmulatorAffinites(conn, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting any required VM passwords");
|
2011-05-04 11:55:38 +00:00
|
|
|
if (qemuProcessInitPasswords(conn, driver, vm) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* If we have -device, then addresses are assigned explicitly.
|
|
|
|
* If not, then we have to detect dynamic ones here */
|
2012-08-20 16:44:14 +00:00
|
|
|
if (!qemuCapsGet(priv->caps, QEMU_CAPS_DEVICE)) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Determining domain device PCI addresses");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuProcessInitPCIAddresses(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
/* set default link states */
|
|
|
|
/* qemu doesn't support setting this on the command line, so
|
|
|
|
* enter the monitor */
|
|
|
|
VIR_DEBUG("Setting network link states");
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
if (qemuProcessSetLinkStates(vm) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
/* Technically, qemuProcessStart can be called from inside
|
|
|
|
* QEMU_ASYNC_JOB_MIGRATION_IN, but we are okay treating this like
|
|
|
|
* a sync job since no other job can call into the domain until
|
|
|
|
* migration completes. */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting initial memory amount");
|
2011-03-15 02:20:53 +00:00
|
|
|
cur_balloon = vm->def->mem.cur_balloon;
|
2012-03-02 20:27:39 +00:00
|
|
|
if (cur_balloon != vm->def->mem.cur_balloon) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OVERFLOW,
|
|
|
|
_("unable to set balloon to %lld"),
|
|
|
|
vm->def->mem.cur_balloon);
|
2012-03-02 20:27:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-03-15 02:20:53 +00:00
|
|
|
if (qemuMonitorSetBalloon(priv->mon, cur_balloon) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
2012-03-08 13:20:19 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_PAUSED)) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Starting domain CPUs");
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Allow the CPUS to start executing */
|
2011-05-04 09:07:01 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_BOOTED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-05-04 09:07:01 +00:00
|
|
|
} else {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
|
|
|
migrateFrom ?
|
|
|
|
VIR_DOMAIN_PAUSED_MIGRATION :
|
|
|
|
VIR_DOMAIN_PAUSED_USER);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2012-03-08 13:20:19 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_AUTODESROY &&
|
2011-06-23 09:37:57 +00:00
|
|
|
qemuProcessAutoDestroyAdd(driver, vm, conn) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Writing domain status to disk");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
/* finally we can call the 'started' hook script if any */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0, false);
|
|
|
|
int hookret;
|
|
|
|
|
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_STARTED, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, NULL);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandFree(cmd);
|
|
|
|
VIR_FORCE_CLOSE(logfile);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
/* We jump here if we failed to start the VM for any reason, or
|
|
|
|
* if we failed to initialize the now running VM. kill it off and
|
|
|
|
* pretend we never started it */
|
2012-05-12 12:53:15 +00:00
|
|
|
VIR_FREE(nodeset);
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapFree(nodemask);
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandFree(cmd);
|
|
|
|
VIR_FORCE_CLOSE(logfile);
|
2012-06-11 13:57:19 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, stop_flags);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
int
|
|
|
|
qemuProcessKill(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm, unsigned int flags)
|
2011-04-21 15:19:06 +00:00
|
|
|
{
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
int i, ret = -1;
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
const char *signame = "TERM";
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
bool driver_unlocked = false;
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s pid=%d flags=%x",
|
|
|
|
vm->def->name, vm->pid, flags);
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2012-03-30 06:21:49 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_KILL_NOCHECK)) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("VM '%s' not active", vm->def->name);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
/* This loop sends SIGTERM (or SIGKILL if flags has
|
|
|
|
* VIR_QEMU_PROCESS_KILL_FORCE and VIR_QEMU_PROCESS_KILL_NOWAIT),
|
2012-02-02 17:09:44 +00:00
|
|
|
* then waits a few iterations (10 seconds) to see if it dies. If
|
|
|
|
* the qemu process still hasn't exited, and
|
|
|
|
* VIR_QEMU_PROCESS_KILL_FORCE is requested, a SIGKILL will then
|
|
|
|
* be sent, and qemuProcessKill will wait up to 5 seconds more for
|
|
|
|
* the process to exit before returning. Note that the FORCE mode
|
|
|
|
* could result in lost data in the guest, so it should only be
|
|
|
|
* used if the guest is hung and can't be destroyed in any other
|
|
|
|
* manner.
|
2011-04-21 15:19:06 +00:00
|
|
|
*/
|
2012-02-02 17:09:44 +00:00
|
|
|
for (i = 0 ; i < 75; i++) {
|
2011-04-21 15:19:06 +00:00
|
|
|
int signum;
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
if (i == 0) {
|
|
|
|
if ((flags & VIR_QEMU_PROCESS_KILL_FORCE) &&
|
|
|
|
(flags & VIR_QEMU_PROCESS_KILL_NOWAIT)) {
|
|
|
|
signum = SIGKILL; /* kill it immediately */
|
|
|
|
signame="KILL";
|
|
|
|
} else {
|
|
|
|
signum = SIGTERM; /* kindly suggest it should exit */
|
|
|
|
}
|
2012-02-02 17:09:44 +00:00
|
|
|
} else if ((i == 50) & (flags & VIR_QEMU_PROCESS_KILL_FORCE)) {
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
VIR_WARN("Timed out waiting after SIG%s to process %d, "
|
|
|
|
"sending SIGKILL", signame, vm->pid);
|
|
|
|
signum = SIGKILL; /* kill it after a grace period */
|
|
|
|
signame="KILL";
|
|
|
|
} else {
|
2011-04-21 15:19:06 +00:00
|
|
|
signum = 0; /* Just check for existence */
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
}
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2011-06-04 10:05:28 +00:00
|
|
|
if (virKillProcess(vm->pid, signum) < 0) {
|
|
|
|
if (errno != ESRCH) {
|
2011-04-21 15:19:06 +00:00
|
|
|
char ebuf[1024];
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
VIR_WARN("Failed to terminate process %d with SIG%s: %s",
|
|
|
|
vm->pid, signame,
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
goto cleanup;
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
ret = 0;
|
|
|
|
goto cleanup; /* process is dead */
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
if (i == 0 && (flags & VIR_QEMU_PROCESS_KILL_NOWAIT)) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (driver && !driver_unlocked) {
|
|
|
|
/* THREADS.txt says we can't hold the driver lock while sleeping */
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
driver_unlocked = true;
|
|
|
|
}
|
2011-09-13 16:11:26 +00:00
|
|
|
|
2011-04-21 15:19:06 +00:00
|
|
|
usleep(200 * 1000);
|
|
|
|
}
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
VIR_WARN("Timed out waiting after SIG%s to process %d", signame, vm->pid);
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
cleanup:
|
|
|
|
if (driver_unlocked) {
|
|
|
|
/* We had unlocked the driver, so re-lock it. THREADS.txt says
|
|
|
|
* we can't have the domain locked when locking the driver, so
|
|
|
|
* we must first unlock the domain. BUT, before we can unlock
|
|
|
|
* the domain, we need to add a ref to it in case there aren't
|
|
|
|
* any active jobs (analysis of all callers didn't reveal such
|
|
|
|
* a case, but there are too many to maintain certainty, so we
|
|
|
|
* will do this as a precaution).
|
|
|
|
*/
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(vm);
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
}
|
|
|
|
return ret;
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
void qemuProcessStop(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2012-06-11 13:20:44 +00:00
|
|
|
virDomainShutoffReason reason,
|
|
|
|
unsigned int flags)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int retries = 0;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virErrorPtr orig_err;
|
|
|
|
virDomainDefPtr def;
|
2012-02-10 21:09:00 +00:00
|
|
|
virNetDevVPortProfilePtr vport = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
int i;
|
|
|
|
int logfile = -1;
|
|
|
|
char *timestamp;
|
|
|
|
char ebuf[1024];
|
|
|
|
|
2012-06-11 13:20:44 +00:00
|
|
|
VIR_DEBUG("Shutting down VM '%s' pid=%d flags=%x",
|
|
|
|
vm->def->name, vm->pid, flags);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("VM '%s' not active", vm->def->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-03-30 06:21:49 +00:00
|
|
|
/*
|
|
|
|
* We may unlock the driver and vm in qemuProcessKill(), and another thread
|
|
|
|
* can lock driver and vm, and then call qemuProcessStop(). So we should
|
|
|
|
* set vm->def->id to -1 here to avoid qemuProcessStop() to be called twice.
|
|
|
|
*/
|
|
|
|
vm->def->id = -1;
|
|
|
|
|
2011-05-05 11:38:04 +00:00
|
|
|
if ((logfile = qemuDomainCreateLog(driver, vm, true)) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
/* To not break the normal domain shutdown process, skip the
|
|
|
|
* timestamp log writing if failed on opening log file. */
|
|
|
|
VIR_WARN("Unable to open logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-02-14 16:09:39 +00:00
|
|
|
} else {
|
2011-11-29 12:33:23 +00:00
|
|
|
if ((timestamp = virTimeStringNow()) == NULL) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportOOMError();
|
|
|
|
} else {
|
|
|
|
if (safewrite(logfile, timestamp, strlen(timestamp)) < 0 ||
|
|
|
|
safewrite(logfile, SHUTDOWN_POSTFIX,
|
|
|
|
strlen(SHUTDOWN_POSTFIX)) < 0) {
|
|
|
|
VIR_WARN("Unable to write timestamp to logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(timestamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_CLOSE(logfile) < 0)
|
|
|
|
VIR_WARN("Unable to close logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This method is routinely used in clean up paths. Disable error
|
|
|
|
* reporting so we don't squash a legit error. */
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
|
|
|
virDomainConfVMNWFilterTeardown(vm);
|
|
|
|
|
|
|
|
if (driver->macFilter) {
|
|
|
|
def = vm->def;
|
|
|
|
for (i = 0 ; i < def->nnets ; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
|
|
|
if (net->ifname == NULL)
|
|
|
|
continue;
|
|
|
|
if ((errno = networkDisallowMacOnPort(driver, net->ifname,
|
2012-07-17 12:07:59 +00:00
|
|
|
&net->mac))) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportSystemError(errno,
|
2012-07-18 23:45:34 +00:00
|
|
|
_("failed to remove ebtables rule to allow MAC address on '%s'"),
|
2011-02-14 16:09:39 +00:00
|
|
|
net->ifname);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
if (priv->agent) {
|
|
|
|
qemuAgentClose(priv->agent);
|
|
|
|
priv->agent = NULL;
|
|
|
|
priv->agentError = false;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (priv->mon)
|
|
|
|
qemuMonitorClose(priv->mon);
|
|
|
|
|
|
|
|
if (priv->monConfig) {
|
|
|
|
if (priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX)
|
|
|
|
unlink(priv->monConfig->data.nix.path);
|
|
|
|
virDomainChrSourceDefFree(priv->monConfig);
|
|
|
|
priv->monConfig = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* shut it off for sure */
|
2012-03-30 06:21:49 +00:00
|
|
|
ignore_value(qemuProcessKill(driver, vm, VIR_QEMU_PROCESS_KILL_FORCE|
|
|
|
|
VIR_QEMU_PROCESS_KILL_NOCHECK));
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-03-16 06:52:26 +00:00
|
|
|
qemuDomainCleanupRun(driver, vm);
|
|
|
|
|
2011-06-23 09:37:57 +00:00
|
|
|
/* Stop autodestroy in case guest is restarted */
|
|
|
|
qemuProcessAutoDestroyRemove(driver, vm);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* now that we know it's stopped call the hook if present */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2012-05-04 19:23:17 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0, false);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* we can't stop the operation even if the script raised an error */
|
|
|
|
virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
2012-02-27 16:06:22 +00:00
|
|
|
VIR_HOOK_QEMU_OP_STOPPED, VIR_HOOK_SUBOP_END,
|
|
|
|
NULL, xml, NULL);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
}
|
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* Reset Security Labels unless caller don't want us to */
|
|
|
|
if (!(flags & VIR_QEMU_PROCESS_STOP_NO_RELABEL))
|
|
|
|
virSecurityManagerRestoreAllLabel(driver->securityManager,
|
|
|
|
vm->def,
|
|
|
|
flags & VIR_QEMU_PROCESS_STOP_MIGRATED);
|
Change security driver APIs to use virDomainDefPtr instead of virDomainObjPtr
When sVirt is integrated with the LXC driver, it will be neccessary
to invoke the security driver APIs using only a virDomainDefPtr
since the lxc_container.c code has no virDomainObjPtr available.
Aside from two functions which want obj->pid, every bit of the
security driver code only touches obj->def. So we don't need to
pass a virDomainObjPtr into the security drivers, a virDomainDefPtr
is sufficient. Two functions also gain a 'pid_t pid' argument.
* src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/security/security_apparmor.c,
src/security/security_dac.c,
src/security/security_driver.h,
src/security/security_manager.c,
src/security/security_manager.h,
src/security/security_nop.c,
src/security/security_selinux.c,
src/security/security_stack.c: Change all security APIs to use a
virDomainDefPtr instead of virDomainObjPtr
2011-07-14 13:32:06 +00:00
|
|
|
virSecurityManagerReleaseLabel(driver->securityManager, vm->def);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* Clear out dynamically assigned labels */
|
2012-08-15 22:10:37 +00:00
|
|
|
for (i = 0; i < vm->def->nseclabels; i++) {
|
|
|
|
if (vm->def->seclabels[i]->type == VIR_DOMAIN_SECLABEL_DYNAMIC) {
|
|
|
|
VIR_FREE(vm->def->seclabels[i]->label);
|
|
|
|
}
|
|
|
|
VIR_FREE(vm->def->seclabels[i]->imagelabel);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virDomainDefClearDeviceAliases(vm->def);
|
|
|
|
if (!priv->persistentAddrs) {
|
|
|
|
virDomainDefClearPCIAddresses(vm->def);
|
|
|
|
qemuDomainPCIAddressSetFree(priv->pciaddrs);
|
|
|
|
priv->pciaddrs = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainReAttachHostDevices(driver, vm->def);
|
|
|
|
|
|
|
|
def = vm->def;
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
2011-07-04 01:57:45 +00:00
|
|
|
if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
2011-11-02 17:19:48 +00:00
|
|
|
ignore_value(virNetDevMacVLanDeleteWithVPortProfile(
|
2012-07-17 12:07:59 +00:00
|
|
|
net->ifname, &net->mac,
|
2011-11-02 17:19:48 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
|
|
|
virDomainNetGetActualDirectMode(net),
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2011-11-02 17:19:48 +00:00
|
|
|
driver->stateDir));
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(net->ifname);
|
|
|
|
}
|
2011-07-04 06:27:12 +00:00
|
|
|
/* release the physical device (or any other resources used by
|
|
|
|
* this interface in the network driver
|
|
|
|
*/
|
2012-02-10 21:09:00 +00:00
|
|
|
vport = virDomainNetGetActualVirtPortProfile(net);
|
|
|
|
if (vport && vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH)
|
|
|
|
ignore_value(virNetDevOpenvswitchRemovePort(
|
|
|
|
virDomainNetGetActualBridgeName(net),
|
|
|
|
net->ifname));
|
|
|
|
|
2011-07-04 06:27:12 +00:00
|
|
|
networkReleaseActualDevice(net);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
retry:
|
|
|
|
if ((ret = qemuRemoveCgroup(driver, vm, 0)) < 0) {
|
|
|
|
if (ret == -EBUSY && (retries++ < 5)) {
|
|
|
|
usleep(200*1000);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
VIR_WARN("Failed to remove cgroup for %s",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuProcessRemoveDomainStatus(driver, vm);
|
|
|
|
|
|
|
|
/* Remove VNC port from port reservation bitmap, but only if it was
|
|
|
|
reserved by the driver (autoport=yes)
|
|
|
|
*/
|
|
|
|
if ((vm->def->ngraphics == 1) &&
|
|
|
|
vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
|
|
|
|
vm->def->graphics[0]->data.vnc.autoport) {
|
|
|
|
qemuProcessReturnPort(driver, vm->def->graphics[0]->data.vnc.port);
|
|
|
|
}
|
|
|
|
if ((vm->def->ngraphics == 1) &&
|
|
|
|
vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE &&
|
|
|
|
vm->def->graphics[0]->data.spice.autoport) {
|
|
|
|
qemuProcessReturnPort(driver, vm->def->graphics[0]->data.spice.port);
|
|
|
|
qemuProcessReturnPort(driver, vm->def->graphics[0]->data.spice.tlsPort);
|
|
|
|
}
|
|
|
|
|
2011-05-04 10:59:20 +00:00
|
|
|
vm->taint = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
vm->pid = -1;
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, reason);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(priv->vcpupids);
|
|
|
|
priv->nvcpupids = 0;
|
2012-08-20 16:44:14 +00:00
|
|
|
virObjectUnref(priv->caps);
|
|
|
|
priv->caps = NULL;
|
2011-06-17 13:43:54 +00:00
|
|
|
VIR_FREE(priv->pidfile);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-03-23 20:50:29 +00:00
|
|
|
/* The "release" hook cleans up additional resources */
|
2011-03-22 13:12:36 +00:00
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2012-05-04 19:23:17 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0, false);
|
2011-03-22 13:12:36 +00:00
|
|
|
|
|
|
|
/* we can't stop the operation even if the script raised an error */
|
|
|
|
virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
2012-02-27 16:06:22 +00:00
|
|
|
VIR_HOOK_QEMU_OP_RELEASE, VIR_HOOK_SUBOP_END,
|
|
|
|
NULL, xml, NULL);
|
2011-03-22 13:12:36 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (vm->newDef) {
|
|
|
|
virDomainDefFree(vm->def);
|
|
|
|
vm->def = vm->newDef;
|
|
|
|
vm->def->id = -1;
|
|
|
|
vm->newDef = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
}
|
2011-06-23 09:37:57 +00:00
|
|
|
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
build: use correct type for pid and similar types
No thanks to 64-bit windows, with 64-bit pid_t, we have to avoid
constructs like 'int pid'. Our API in libvirt-qemu cannot be
changed without breaking ABI; but then again, libvirt-qemu can
only be used on systems that support UNIX sockets, which rules
out Windows (even if qemu could be compiled there) - so for all
points on the call chain that interact with this API decision,
we require a different variable name to make it clear that we
audited the use for safety.
Adding a syntax-check rule only solves half the battle; anywhere
that uses printf on a pid_t still needs to be converted, but that
will be a separate patch.
* cfg.mk (sc_correct_id_types): New syntax check.
* src/libvirt-qemu.c (virDomainQemuAttach): Document why we didn't
use pid_t for pid, and validate for overflow.
* include/libvirt/libvirt-qemu.h (virDomainQemuAttach): Tweak name
for syntax check.
* src/vmware/vmware_conf.c (vmwareExtractPid): Likewise.
* src/driver.h (virDrvDomainQemuAttach): Likewise.
* tools/virsh.c (cmdQemuAttach): Likewise.
* src/remote/qemu_protocol.x (qemu_domain_attach_args): Likewise.
* src/qemu_protocol-structs (qemu_domain_attach_args): Likewise.
* src/util/cgroup.c (virCgroupPidCode, virCgroupKillInternal):
Likewise.
* src/qemu/qemu_command.c(qemuParseProcFileStrings): Likewise.
(qemuParseCommandLinePid): Use pid_t for pid.
* daemon/libvirtd.c (daemonForkIntoBackground): Likewise.
* src/conf/domain_conf.h (_virDomainObj): Likewise.
* src/probes.d (rpc_socket_new): Likewise.
* src/qemu/qemu_command.h (qemuParseCommandLinePid): Likewise.
* src/qemu/qemu_driver.c (qemudGetProcessInfo, qemuDomainAttach):
Likewise.
* src/qemu/qemu_process.c (qemuProcessAttach): Likewise.
* src/qemu/qemu_process.h (qemuProcessAttach): Likewise.
* src/uml/uml_driver.c (umlGetProcessInfo): Likewise.
* src/util/virnetdev.h (virNetDevSetNamespace): Likewise.
* src/util/virnetdev.c (virNetDevSetNamespace): Likewise.
* tests/testutils.c (virtTestCaptureProgramOutput): Likewise.
* src/conf/storage_conf.h (_virStoragePerms): Use mode_t, uid_t,
and gid_t rather than int.
* src/security/security_dac.c (virSecurityDACSetOwnership): Likewise.
* src/conf/storage_conf.c (virStorageDefParsePerms): Avoid
compiler warning.
2012-02-10 23:08:11 +00:00
|
|
|
pid_t pid,
|
2011-05-05 16:32:21 +00:00
|
|
|
const char *pidfile,
|
|
|
|
virDomainChrSourceDefPtr monConfig,
|
|
|
|
bool monJSON)
|
|
|
|
{
|
2012-08-15 22:10:37 +00:00
|
|
|
size_t i;
|
2011-05-05 16:32:21 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
int logfile = -1;
|
|
|
|
char *timestamp;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
bool running = true;
|
2011-09-27 09:42:04 +00:00
|
|
|
virDomainPausedReason reason;
|
2011-05-05 16:32:21 +00:00
|
|
|
virSecurityLabelPtr seclabel = NULL;
|
2012-08-15 22:10:37 +00:00
|
|
|
virSecurityLabelDefPtr seclabeldef = NULL;
|
|
|
|
virSecurityManagerPtr* sec_managers = NULL;
|
|
|
|
const char *model;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Beginning VM attach process");
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("VM is already active"));
|
2011-05-05 16:32:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do this upfront, so any part of the startup process can add
|
|
|
|
* runtime state to vm->def that won't be persisted. This let's us
|
|
|
|
* report implicit runtime defaults in the XML, like vnc listen/socket
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Setting current domain def as transient");
|
|
|
|
if (virDomainObjSetDefTransient(driver->caps, vm, true) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
vm->def->id = driver->nextvmid++;
|
|
|
|
|
|
|
|
if (virFileMakePath(driver->logDir) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot create log directory %s"),
|
|
|
|
driver->logDir);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(priv->pidfile);
|
|
|
|
if (pidfile &&
|
|
|
|
!(priv->pidfile = strdup(pidfile)))
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
VIR_DEBUG("Detect security driver config");
|
2012-08-15 22:10:37 +00:00
|
|
|
sec_managers = virSecurityManagerGetNested(driver->securityManager);
|
|
|
|
if (sec_managers == NULL) {
|
2011-05-05 16:32:21 +00:00
|
|
|
goto cleanup;
|
2012-08-15 22:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; sec_managers[i]; i++) {
|
|
|
|
model = virSecurityManagerGetModel(sec_managers[i]);
|
|
|
|
seclabeldef = virDomainDefGetSecurityLabelDef(vm->def, model);
|
|
|
|
if (seclabeldef == NULL) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
seclabeldef->type = VIR_DOMAIN_SECLABEL_STATIC;
|
|
|
|
if (VIR_ALLOC(seclabel) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
if (virSecurityManagerGetProcessLabel(driver->securityManager,
|
|
|
|
vm->def, vm->pid, seclabel) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(seclabeldef->model = strdup(model)))
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (!(seclabeldef->label = strdup(seclabel->label)))
|
|
|
|
goto no_memory;
|
|
|
|
VIR_FREE(seclabel);
|
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Creating domain log file");
|
|
|
|
if ((logfile = qemuDomainCreateLog(driver, vm, false)) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Determining emulator version");
|
2012-08-20 16:44:14 +00:00
|
|
|
virObjectUnref(priv->caps);
|
|
|
|
priv->caps = NULL;
|
2011-05-05 16:32:21 +00:00
|
|
|
if (qemuCapsExtractVersionInfo(vm->def->emulator,
|
|
|
|
vm->def->os.arch,
|
2012-07-04 09:44:58 +00:00
|
|
|
false,
|
2011-05-05 16:32:21 +00:00
|
|
|
NULL,
|
2012-08-20 16:44:14 +00:00
|
|
|
&priv->caps) < 0)
|
2011-05-05 16:32:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Preparing monitor state");
|
|
|
|
priv->monConfig = monConfig;
|
|
|
|
monConfig = NULL;
|
|
|
|
priv->monJSON = monJSON;
|
|
|
|
|
|
|
|
priv->gotShutdown = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally PCI addresses are assigned in the virDomainCreate
|
|
|
|
* or virDomainDefine methods. We might still need to assign
|
|
|
|
* some here to cope with the question of upgrades. Regardless
|
|
|
|
* we also need to populate the PCi address set cache for later
|
|
|
|
* use in hotplug
|
|
|
|
*/
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuCapsGet(priv->caps, QEMU_CAPS_DEVICE)) {
|
2011-05-05 16:32:21 +00:00
|
|
|
VIR_DEBUG("Assigning domain PCI addresses");
|
2012-08-20 16:44:14 +00:00
|
|
|
if ((qemuDomainAssignAddresses(vm->def, priv->caps, vm)) < 0)
|
2012-08-08 07:06:33 +00:00
|
|
|
goto cleanup;
|
2011-05-05 16:32:21 +00:00
|
|
|
}
|
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
if ((timestamp = virTimeStringNow()) == NULL) {
|
2011-05-05 16:32:21 +00:00
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
if (safewrite(logfile, timestamp, strlen(timestamp)) < 0 ||
|
|
|
|
safewrite(logfile, ATTACH_POSTFIX, strlen(ATTACH_POSTFIX)) < 0) {
|
|
|
|
VIR_WARN("Unable to write timestamp to logfile: %s",
|
2012-03-29 09:52:04 +00:00
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
2011-05-05 16:32:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(timestamp);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_EXTERNAL_LAUNCH, logfile);
|
|
|
|
|
|
|
|
vm->pid = pid;
|
|
|
|
|
|
|
|
VIR_DEBUG("Waiting for monitor to show up");
|
2012-08-20 16:44:14 +00:00
|
|
|
if (qemuProcessWaitForMonitor(driver, vm, priv->caps, -1) < 0)
|
2011-05-05 16:32:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/* Failure to connect to agent shouldn't be fatal */
|
|
|
|
if (qemuConnectAgent(driver, vm) < 0) {
|
|
|
|
VIR_WARN("Cannot connect to QEMU guest agent for %s",
|
|
|
|
vm->def->name);
|
|
|
|
virResetLastError();
|
|
|
|
priv->agentError = true;
|
|
|
|
}
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
VIR_DEBUG("Detecting VCPU PIDs");
|
|
|
|
if (qemuProcessDetectVcpuPIDs(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* If we have -device, then addresses are assigned explicitly.
|
|
|
|
* If not, then we have to detect dynamic ones here */
|
2012-08-20 16:44:14 +00:00
|
|
|
if (!qemuCapsGet(priv->caps, QEMU_CAPS_DEVICE)) {
|
2011-05-05 16:32:21 +00:00
|
|
|
VIR_DEBUG("Determining domain device PCI addresses");
|
|
|
|
if (qemuProcessInitPCIAddresses(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Getting initial memory amount");
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-05-05 16:32:21 +00:00
|
|
|
if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-09-27 09:42:04 +00:00
|
|
|
if (qemuMonitorGetStatus(priv->mon, &running, &reason) < 0) {
|
2011-05-05 16:32:21 +00:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (qemuMonitorGetVirtType(priv->mon, &vm->def->virtType) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (running)
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED);
|
|
|
|
else
|
2011-09-27 09:42:04 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Writing domain status to disk");
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
/* Run an hook to allow admins to do some magic */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0, false);
|
|
|
|
int hookret;
|
|
|
|
|
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_ATTACH, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, NULL);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
VIR_FORCE_CLOSE(logfile);
|
|
|
|
VIR_FREE(seclabel);
|
2012-08-15 22:10:37 +00:00
|
|
|
VIR_FREE(sec_managers);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
cleanup:
|
|
|
|
/* We jump here if we failed to start the VM for any reason, or
|
|
|
|
* if we failed to initialize the now running VM. kill it off and
|
|
|
|
* pretend we never started it */
|
|
|
|
VIR_FORCE_CLOSE(logfile);
|
|
|
|
VIR_FREE(seclabel);
|
2012-08-15 22:10:37 +00:00
|
|
|
VIR_FREE(sec_managers);
|
2011-05-05 16:32:21 +00:00
|
|
|
virDomainChrSourceDefFree(monConfig);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-19 13:31:41 +00:00
|
|
|
static virDomainObjPtr
|
|
|
|
qemuProcessAutoDestroy(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr dom,
|
|
|
|
virConnectPtr conn)
|
2011-06-23 09:37:57 +00:00
|
|
|
{
|
2012-03-19 13:31:41 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
2011-06-23 09:37:57 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
|
2012-03-19 13:31:41 +00:00
|
|
|
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
|
2011-06-23 09:37:57 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (priv->job.asyncJob) {
|
|
|
|
VIR_DEBUG("vm=%s has long-term job active, cancelling",
|
2011-06-23 09:37:57 +00:00
|
|
|
dom->def->name);
|
2012-03-19 13:31:41 +00:00
|
|
|
qemuDomainObjDiscardAsyncJob(driver, dom);
|
2011-06-23 09:37:57 +00:00
|
|
|
}
|
|
|
|
|
2012-03-19 13:31:41 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, dom,
|
2011-06-30 09:23:50 +00:00
|
|
|
QEMU_JOB_DESTROY) < 0)
|
2011-06-23 09:37:57 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Killing domain");
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(dom, "destroyed");
|
2011-06-23 09:37:57 +00:00
|
|
|
event = virDomainEventNewFromObj(dom,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
|
2012-03-19 13:31:41 +00:00
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!qemuDomainObjEndJob(driver, dom))
|
2011-06-23 09:37:57 +00:00
|
|
|
dom = NULL;
|
|
|
|
if (dom && !dom->persistent)
|
2012-03-19 13:31:41 +00:00
|
|
|
qemuDomainRemoveInactive(driver, dom);
|
2011-06-23 09:37:57 +00:00
|
|
|
if (event)
|
2012-03-19 13:31:41 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-06-23 09:37:57 +00:00
|
|
|
|
2012-03-19 13:31:41 +00:00
|
|
|
cleanup:
|
|
|
|
return dom;
|
2011-06-23 09:37:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int qemuProcessAutoDestroyAdd(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn)
|
|
|
|
{
|
2012-03-19 13:31:41 +00:00
|
|
|
VIR_DEBUG("vm=%s, conn=%p", vm->def->name, conn);
|
|
|
|
return qemuDriverCloseCallbackSet(driver, vm, conn,
|
|
|
|
qemuProcessAutoDestroy);
|
2011-06-23 09:37:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int qemuProcessAutoDestroyRemove(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2012-03-19 13:31:41 +00:00
|
|
|
VIR_DEBUG("vm=%s", vm->def->name);
|
|
|
|
return qemuDriverCloseCallbackUnset(driver, vm, qemuProcessAutoDestroy);
|
2011-06-23 09:37:57 +00:00
|
|
|
}
|
2011-06-23 10:41:57 +00:00
|
|
|
|
|
|
|
bool qemuProcessAutoDestroyActive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2012-03-19 13:31:41 +00:00
|
|
|
qemuDriverCloseCallback cb;
|
|
|
|
VIR_DEBUG("vm=%s", vm->def->name);
|
|
|
|
cb = qemuDriverCloseCallbackGet(driver, vm, NULL);
|
|
|
|
return cb == qemuProcessAutoDestroy;
|
2011-06-23 10:41:57 +00:00
|
|
|
}
|