2011-02-14 16:09:39 +00:00
|
|
|
/*
|
2013-07-26 12:24:55 +00:00
|
|
|
* qemu_process.c: QEMU process management
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
2016-01-19 19:20:54 +00:00
|
|
|
* Copyright (C) 2006-2016 Red Hat, Inc.
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <sys/stat.h>
|
2012-12-12 07:44:21 +00:00
|
|
|
#if defined(__linux__)
|
|
|
|
# include <linux/capability.h>
|
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
# include <sys/param.h>
|
|
|
|
# include <sys/cpuset.h>
|
|
|
|
#endif
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
#include "qemu_process.h"
|
2013-07-26 12:24:55 +00:00
|
|
|
#include "qemu_processpriv.h"
|
2016-02-16 15:24:35 +00:00
|
|
|
#include "qemu_alias.h"
|
2017-03-15 12:03:21 +00:00
|
|
|
#include "qemu_block.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "qemu_domain.h"
|
2016-02-15 18:08:02 +00:00
|
|
|
#include "qemu_domain_address.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "qemu_cgroup.h"
|
|
|
|
#include "qemu_capabilities.h"
|
|
|
|
#include "qemu_monitor.h"
|
|
|
|
#include "qemu_command.h"
|
|
|
|
#include "qemu_hostdev.h"
|
|
|
|
#include "qemu_hotplug.h"
|
2011-07-19 00:27:33 +00:00
|
|
|
#include "qemu_migration.h"
|
2014-09-16 20:50:53 +00:00
|
|
|
#include "qemu_interface.h"
|
2016-11-23 10:52:57 +00:00
|
|
|
#include "qemu_security.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-10-14 09:35:00 +00:00
|
|
|
#include "cpu/cpu.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "datatypes.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:00:34 +00:00
|
|
|
#include "virhook.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2011-08-05 13:13:12 +00:00
|
|
|
#include "virpidfile.h"
|
2016-04-13 17:53:02 +00:00
|
|
|
#include "virhostcpu.h"
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "domain_nwfilter.h"
|
2010-10-26 14:04:46 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2011-07-04 06:27:12 +00:00
|
|
|
#include "network/bridge_driver.h"
|
2012-12-13 18:01:25 +00:00
|
|
|
#include "viruuid.h"
|
2012-09-24 16:54:51 +00:00
|
|
|
#include "virprocess.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2012-02-10 21:09:00 +00:00
|
|
|
#include "virnetdevtap.h"
|
2015-03-17 17:46:44 +00:00
|
|
|
#include "virnetdevopenvswitch.h"
|
2015-02-23 20:54:56 +00:00
|
|
|
#include "virnetdevmidonet.h"
|
2012-12-04 11:56:32 +00:00
|
|
|
#include "virbitmap.h"
|
2013-02-19 13:57:46 +00:00
|
|
|
#include "viratomic.h"
|
2013-03-18 09:04:01 +00:00
|
|
|
#include "virnuma.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2014-03-05 12:14:38 +00:00
|
|
|
#include "virhostdev.h"
|
2016-05-30 11:47:46 +00:00
|
|
|
#include "secret_util.h"
|
2014-08-14 16:05:48 +00:00
|
|
|
#include "storage/storage_driver.h"
|
2014-08-07 14:59:21 +00:00
|
|
|
#include "configmake.h"
|
2014-11-05 13:28:57 +00:00
|
|
|
#include "nwfilter_conf.h"
|
2014-11-18 23:55:48 +00:00
|
|
|
#include "netdev_bandwidth_conf.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_process");
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/**
|
2012-10-31 19:03:48 +00:00
|
|
|
* qemuProcessRemoveDomainStatus
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
* remove all state files of a domain from statedir
|
|
|
|
*
|
|
|
|
* Returns 0 on success
|
|
|
|
*/
|
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuProcessRemoveDomainStatus(virQEMUDriverPtr driver,
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
char ebuf[1024];
|
|
|
|
char *file = NULL;
|
2011-06-17 13:43:54 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
int ret = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-07-04 10:14:12 +00:00
|
|
|
if (virAsprintf(&file, "%s/%s.xml", cfg->stateDir, vm->def->name) < 0)
|
2013-01-10 21:03:14 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (unlink(file) < 0 && errno != ENOENT && errno != ENOTDIR)
|
|
|
|
VIR_WARN("Failed to remove domain XML for %s: %s",
|
|
|
|
vm->def->name, virStrerror(errno, ebuf, sizeof(ebuf)));
|
|
|
|
VIR_FREE(file);
|
|
|
|
|
2011-06-17 13:43:54 +00:00
|
|
|
if (priv->pidfile &&
|
|
|
|
unlink(priv->pidfile) < 0 &&
|
|
|
|
errno != ENOENT)
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_WARN("Failed to remove PID file for %s: %s",
|
|
|
|
vm->def->name, virStrerror(errno, ebuf, sizeof(ebuf)));
|
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
ret = 0;
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/*
|
|
|
|
* This is a callback registered with a qemuAgentPtr instance,
|
|
|
|
* and to be invoked when the agent console hits an end of file
|
|
|
|
* condition, or error, thus indicating VM shutdown should be
|
|
|
|
* performed
|
|
|
|
*/
|
|
|
|
static void
|
2012-09-14 08:53:00 +00:00
|
|
|
qemuProcessHandleAgentEOF(qemuAgentPtr agent,
|
2011-10-05 17:31:54 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
VIR_DEBUG("Received EOF from agent on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
2013-01-21 10:52:44 +00:00
|
|
|
|
|
|
|
if (!priv->agent) {
|
|
|
|
VIR_DEBUG("Agent freed already");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->beingDestroyed) {
|
|
|
|
VIR_DEBUG("Domain is being destroyed, agent EOF is expected");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
qemu_agent: fix deadlock in qemuProcessHandleAgentEOF
If VM A is shutdown a by qemu agent at appoximately the same time
an agent EOF of VM A happened, there's a chance that deadlock may occur:
qemuProcessHandleAgentEOF in main thread
A) priv->agent = NULL; //A happened before B
//deadlock when we get agent lock which's held by worker thread
qemuAgentClose(agent);
qemuDomainObjExitAgent called by qemuDomainShutdownFlags in worker thread
B) hasRefs = virObjectUnref(priv->agent); // priv->agent is NULL,
// return false
if (hasRefs)
virObjectUnlock(priv->agent); //agent lock will not be released here
In order to resolve, during EOF close the agent first, then set priv->agent
to NULL to fix the deadlock.
This essentially reverts commit id '1020a504'. It's also of note that commit
id '362d0477' notes a possible/rare deadlock similar to what was seen in
the monitor in commit id '25f582e3'. However, it seems interceding changes
including commit id 'd960d06f' should remove the deadlock issue.
With this change, if EOF is called:
Get VM lock
Check if !priv->agent || priv->beingDestroyed, then unlock VM
Call qemuAgentClose
Unlock VM
When qemuAgentClose is called
Get Agent lock
If Agent->fd open, close it
Unlock Agent
Unref Agent
qemuDomainObjEnterAgent
Enter with VM lock
Get Agent lock
Increase Agent refcnt
Unlock VM
After running agent command, calling qemuDomainObjExitAgent
Enter with Agent lock
Unref Agent
If not last reference, unlock Agent
Get VM lock
If we were in the middle of an EnterAgent, call Agent command, and
ExitAgent sequence and the EOF code is triggered, then the EOF code
can get the VM lock, make it's checks against !priv->agent ||
priv->beingDestroyed, and call qemuAgentClose. The CloseAgent
would wait to get agent lock. The other thread then will eventually
call ExitAgent, release the Agent lock and unref the Agent. Once
ExitAgent releases the Agent lock, AgentClose will get the Agent
Agent lock, close the fd, unlock the agent, and unref the agent.
The final unref would cause deletion of the agent.
Signed-off-by: Wang Yufei <james.wangyufei@huawei.com>
Reviewed-by: Ren Guannan <renguannan@huawei.com>
2015-09-26 12:18:03 +00:00
|
|
|
qemuAgentClose(agent);
|
2013-01-21 10:52:44 +00:00
|
|
|
priv->agent = NULL;
|
2016-11-16 13:43:03 +00:00
|
|
|
priv->agentError = false;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2013-01-21 10:52:44 +00:00
|
|
|
return;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
unlock:
|
2013-01-21 10:52:44 +00:00
|
|
|
virObjectUnlock(vm);
|
|
|
|
return;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is invoked when there is some kind of error
|
|
|
|
* parsing data to/from the agent. The VM can continue
|
|
|
|
* to run, but no further agent commands will be
|
|
|
|
* allowed
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessHandleAgentError(qemuAgentPtr agent ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
VIR_DEBUG("Received error from agent on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
priv->agentError = true;
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void qemuProcessHandleAgentDestroy(qemuAgentPtr agent,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2012-06-15 14:14:38 +00:00
|
|
|
VIR_DEBUG("Received destroy agent=%p vm=%p", agent, vm);
|
|
|
|
|
2012-09-26 15:23:24 +00:00
|
|
|
virObjectUnref(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuAgentCallbacks agentCallbacks = {
|
|
|
|
.destroy = qemuProcessHandleAgentDestroy,
|
|
|
|
.eofNotify = qemuProcessHandleAgentEOF,
|
|
|
|
.errorNotify = qemuProcessHandleAgentError,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2015-04-24 14:48:26 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
qemuAgentPtr agent = NULL;
|
2016-01-08 15:21:30 +00:00
|
|
|
virDomainChrDefPtr config = qemuFindAgentConfig(vm->def);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (!config)
|
|
|
|
return 0;
|
|
|
|
|
2016-01-08 16:03:48 +00:00
|
|
|
if (priv->agent)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VSERPORT_CHANGE) &&
|
|
|
|
config->state != VIR_DOMAIN_CHR_DEVICE_STATE_CONNECTED) {
|
|
|
|
VIR_DEBUG("Deferring connecting to guest agent");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetDaemonSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ERROR(_("Failed to set security context for agent for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted while the agent is active */
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
agent = qemuAgentOpen(vm,
|
2016-10-21 11:45:54 +00:00
|
|
|
config->source,
|
2011-10-05 17:31:54 +00:00
|
|
|
&agentCallbacks);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2014-01-14 18:13:30 +00:00
|
|
|
if (agent == NULL)
|
|
|
|
virObjectUnref(vm);
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuAgentClose(agent);
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest crashed while connecting to the guest agent"));
|
2016-11-16 13:43:01 +00:00
|
|
|
return -1;
|
2014-01-14 18:13:30 +00:00
|
|
|
}
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ERROR(_("Failed to clear security context for agent for %s"),
|
|
|
|
vm->def->name);
|
2014-09-01 12:08:06 +00:00
|
|
|
qemuAgentClose(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->agent = agent;
|
2016-11-16 13:43:01 +00:00
|
|
|
if (!priv->agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_INFO("Failed to connect agent for %s", vm->def->name);
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2016-11-16 13:43:01 +00:00
|
|
|
if (!priv->agent) {
|
|
|
|
VIR_WARN("Cannot connect to QEMU guest agent for %s", vm->def->name);
|
|
|
|
priv->agentError = true;
|
|
|
|
virResetLastError();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/*
|
2011-09-21 19:08:51 +00:00
|
|
|
* This is a callback registered with a qemuMonitorPtr instance,
|
2011-02-14 16:09:39 +00:00
|
|
|
* and to be invoked when the monitor console hits an end of file
|
|
|
|
* condition, or error, thus indicating VM shutdown should be
|
|
|
|
* performed
|
|
|
|
*/
|
|
|
|
static void
|
2017-04-03 08:24:38 +00:00
|
|
|
qemuProcessHandleMonitorEOF(qemuMonitorPtr mon,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2016-02-11 14:32:48 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
VIR_DEBUG("Received EOF on %p '%s'", vm, vm->def->name);
|
2011-12-09 14:33:13 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
priv = vm->privateData;
|
2011-12-09 14:33:13 +00:00
|
|
|
if (priv->beingDestroyed) {
|
|
|
|
VIR_DEBUG("Domain is being destroyed, EOF is expected");
|
2015-07-15 07:07:50 +00:00
|
|
|
goto cleanup;
|
2011-12-09 14:33:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
if (VIR_ALLOC(processEvent) < 0)
|
2015-07-15 07:07:50 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_MONITOR_EOF;
|
|
|
|
processEvent->vm = vm;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
virObjectRef(vm);
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
|
ignore_value(virObjectUnref(vm));
|
|
|
|
VIR_FREE(processEvent);
|
|
|
|
goto cleanup;
|
2015-07-30 14:42:43 +00:00
|
|
|
}
|
2015-07-02 06:26:48 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
/* We don't want this EOF handler to be called over and over while the
|
|
|
|
* thread is waiting for a job.
|
|
|
|
*/
|
|
|
|
qemuMonitorUnregister(mon);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-03-10 12:34:15 +00:00
|
|
|
/* We don't want any cleanup from EOF handler (or any other
|
|
|
|
* thread) to enter qemu namespace. */
|
|
|
|
qemuDomainDestroyNamespace(driver, vm);
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-07-15 07:07:50 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-05-29 12:37:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is invoked when there is some kind of error
|
|
|
|
* parsing data to/from the monitor. The VM can continue
|
|
|
|
* to run, but no further monitor commands will be
|
|
|
|
* allowed
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessHandleMonitorError(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2011-05-29 12:37:29 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2011-05-29 12:37:29 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Received error on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-05-29 12:37:29 +00:00
|
|
|
|
2011-05-31 16:34:20 +00:00
|
|
|
((qemuDomainObjPrivatePtr) vm->privateData)->monError = true;
|
2011-05-29 12:37:29 +00:00
|
|
|
event = virDomainEventControlErrorNewFromObj(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-05-29 12:37:29 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-03-13 15:59:26 +00:00
|
|
|
virDomainDiskDefPtr
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuProcessFindDomainDiskByAlias(virDomainObjPtr vm,
|
|
|
|
const char *alias)
|
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-06-29 17:34:00 +00:00
|
|
|
alias = qemuAliasDiskDriveSkipPrefix(alias);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
|
|
|
|
disk = vm->def->disks[i];
|
|
|
|
if (disk->info.alias != NULL && STREQ(disk->info.alias, alias))
|
|
|
|
return disk;
|
|
|
|
}
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no disk found with alias %s"),
|
|
|
|
alias);
|
2011-02-14 16:09:39 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessGetVolumeQcowPassphrase(virConnectPtr conn,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
char **secretRet,
|
|
|
|
size_t *secretLen)
|
|
|
|
{
|
|
|
|
char *passphrase;
|
|
|
|
unsigned char *data;
|
|
|
|
size_t size;
|
|
|
|
int ret = -1;
|
|
|
|
virStorageEncryptionPtr enc;
|
|
|
|
|
2014-05-21 23:13:12 +00:00
|
|
|
if (!disk->src->encryption) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("disk %s does not have any encryption information"),
|
2014-05-21 23:13:12 +00:00
|
|
|
disk->src->path);
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2014-05-21 23:13:12 +00:00
|
|
|
enc = disk->src->encryption;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (!conn) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("cannot find secrets without a connection"));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (conn->secretDriver == NULL ||
|
2013-04-22 17:26:01 +00:00
|
|
|
conn->secretDriver->secretLookupByUUID == NULL ||
|
|
|
|
conn->secretDriver->secretGetValue == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("secret storage not supported"));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enc->format != VIR_STORAGE_ENCRYPTION_FORMAT_QCOW ||
|
|
|
|
enc->nsecrets != 1 ||
|
|
|
|
enc->secrets[0]->type !=
|
|
|
|
VIR_STORAGE_ENCRYPTION_SECRET_TYPE_PASSPHRASE) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
_("invalid <encryption> for volume %s"),
|
|
|
|
virDomainDiskGetSource(disk));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-05-30 11:47:46 +00:00
|
|
|
if (virSecretGetSecretString(conn, &enc->secrets[0]->seclookupdef,
|
|
|
|
VIR_SECRET_USAGE_TYPE_VOLUME,
|
|
|
|
&data, &size) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (memchr(data, '\0', size) != NULL) {
|
|
|
|
memset(data, 0, size);
|
|
|
|
VIR_FREE(data);
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("format='qcow' passphrase for %s must not contain a "
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
"'\\0'"), virDomainDiskGetSource(disk));
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_ALLOC_N(passphrase, size + 1) < 0) {
|
|
|
|
memset(data, 0, size);
|
|
|
|
VIR_FREE(data);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
memcpy(passphrase, data, size);
|
|
|
|
passphrase[size] = '\0';
|
|
|
|
|
|
|
|
memset(data, 0, size);
|
|
|
|
VIR_FREE(data);
|
|
|
|
|
|
|
|
*secretRet = passphrase;
|
|
|
|
*secretLen = size;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *path,
|
|
|
|
char **secretRet,
|
2013-07-25 17:26:15 +00:00
|
|
|
size_t *secretLen,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
int ret = -1;
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2015-05-21 09:21:51 +00:00
|
|
|
if (!(disk = virDomainDiskByName(vm->def, path, true))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no disk found with path %s"),
|
|
|
|
path);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
2015-05-21 09:21:51 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
ret = qemuProcessGetVolumeQcowPassphrase(conn, disk, secretRet, secretLen);
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleReset(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event;
|
2012-09-04 10:01:43 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2014-11-03 11:57:44 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventRebootNewFromObj(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_RESET);
|
2014-11-03 11:57:44 +00:00
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
2014-11-03 11:57:44 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
2012-09-04 10:01:43 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-11-03 11:57:44 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
/*
|
|
|
|
* Since we have the '-no-shutdown' flag set, the
|
|
|
|
* QEMU process will currently have guest OS shutdown
|
|
|
|
* and the CPUS stopped. To fake the reboot, we thus
|
|
|
|
* want todo a reset of the virtual hardware, followed
|
|
|
|
* by restart of the CPUs. This should result in the
|
|
|
|
* guest OS booting up again
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessFakeReboot(void *opaque)
|
|
|
|
{
|
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2017-07-21 13:46:56 +00:00
|
|
|
virQEMUDriverPtr driver = priv->driver;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2013-05-18 11:47:51 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2013-06-07 10:23:34 +00:00
|
|
|
virDomainRunningReason reason = VIR_DOMAIN_RUNNING_BOOTED;
|
2014-12-16 09:40:58 +00:00
|
|
|
int ret = -1, rc;
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
VIR_DEBUG("vm=%p", vm);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2014-12-16 09:40:58 +00:00
|
|
|
rc = qemuMonitorSystemReset(priv->mon);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2014-12-16 09:40:58 +00:00
|
|
|
if (rc < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2013-06-07 10:23:34 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_CRASHED)
|
|
|
|
reason = VIR_DOMAIN_RUNNING_CRASHED;
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, NULL,
|
2013-06-07 10:23:34 +00:00
|
|
|
reason,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2011-06-15 16:49:58 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2011-09-13 16:48:13 +00:00
|
|
|
priv->gotShutdown = false;
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2011-06-15 16:49:58 +00:00
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_UNPAUSED);
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2013-05-18 11:47:51 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after state change",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
endjob:
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuDomainObjEndJob(driver, vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
if (ret == -1)
|
|
|
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE));
|
2015-04-23 15:27:58 +00:00
|
|
|
virDomainObjEndAPI(&vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-05-18 11:47:51 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-07 10:23:33 +00:00
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuProcessShutdownOrReboot(virQEMUDriverPtr driver,
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainObjPtr vm)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2011-06-15 16:49:58 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->fakeReboot) {
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuDomainSetFakeReboot(driver, vm, false);
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
virThread th;
|
|
|
|
if (virThreadCreate(&th,
|
|
|
|
false,
|
|
|
|
qemuProcessFakeReboot,
|
|
|
|
vm) < 0) {
|
2011-06-24 11:20:20 +00:00
|
|
|
VIR_ERROR(_("Failed to create reboot thread, killing domain"));
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
2011-09-27 12:56:17 +00:00
|
|
|
}
|
2011-09-13 16:11:26 +00:00
|
|
|
|
2014-01-30 00:14:44 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleEvent(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *eventName,
|
|
|
|
long long seconds,
|
|
|
|
unsigned int micros,
|
|
|
|
const char *details,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%p", vm);
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
event = virDomainQemuMonitorEventNew(vm->def->id, vm->def->name,
|
|
|
|
vm->def->uuid, eventName,
|
|
|
|
seconds, micros, details);
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2014-01-30 00:14:44 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleShutdown(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
2017-04-12 10:00:37 +00:00
|
|
|
virTristateBool guest_initiated,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-09-27 12:56:17 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2017-04-12 10:00:37 +00:00
|
|
|
int detail = 0;
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
VIR_DEBUG("vm=%p", vm);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->gotShutdown) {
|
|
|
|
VIR_DEBUG("Ignoring repeated SHUTDOWN event from domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
goto unlock;
|
2011-12-07 11:52:59 +00:00
|
|
|
} else if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("Ignoring SHUTDOWN event from inactive domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
goto unlock;
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
|
|
|
priv->gotShutdown = true;
|
|
|
|
|
|
|
|
VIR_DEBUG("Transitioned guest %s to shutdown state",
|
|
|
|
vm->def->name);
|
|
|
|
virDomainObjSetState(vm,
|
|
|
|
VIR_DOMAIN_SHUTDOWN,
|
|
|
|
VIR_DOMAIN_SHUTDOWN_UNKNOWN);
|
2017-04-12 10:00:37 +00:00
|
|
|
|
|
|
|
switch (guest_initiated) {
|
|
|
|
case VIR_TRISTATE_BOOL_YES:
|
|
|
|
detail = VIR_DOMAIN_EVENT_SHUTDOWN_GUEST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_TRISTATE_BOOL_NO:
|
|
|
|
detail = VIR_DOMAIN_EVENT_SHUTDOWN_HOST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
detail = VIR_DOMAIN_EVENT_SHUTDOWN_FINISHED;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2017-04-12 10:00:37 +00:00
|
|
|
VIR_DOMAIN_EVENT_SHUTDOWN,
|
|
|
|
detail);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2011-11-30 14:31:45 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after state change",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
2012-06-15 16:00:13 +00:00
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SHUTDOWN);
|
|
|
|
|
2011-11-30 14:31:45 +00:00
|
|
|
qemuProcessShutdownOrReboot(driver, vm);
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
unlock:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2016-01-05 21:19:28 +00:00
|
|
|
virDomainPausedReason reason = VIR_DOMAIN_PAUSED_UNKNOWN;
|
|
|
|
virDomainEventSuspendedDetailType detail = VIR_DOMAIN_EVENT_SUSPENDED_PAUSED;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-05-04 09:07:01 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2010-10-26 14:04:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-09-15 13:07:51 +00:00
|
|
|
if (priv->gotShutdown) {
|
2011-11-30 14:31:45 +00:00
|
|
|
VIR_DEBUG("Ignoring STOP event after SHUTDOWN");
|
|
|
|
goto unlock;
|
2011-09-15 13:07:51 +00:00
|
|
|
}
|
|
|
|
|
2016-01-05 21:19:28 +00:00
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
2015-11-26 14:37:23 +00:00
|
|
|
if (priv->job.current->stats.status ==
|
|
|
|
QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY) {
|
|
|
|
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
|
|
|
|
detail = VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY;
|
|
|
|
} else {
|
|
|
|
reason = VIR_DOMAIN_PAUSED_MIGRATION;
|
|
|
|
detail = VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED;
|
|
|
|
}
|
2016-01-05 21:19:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state, reason %s",
|
|
|
|
vm->def->name, virDomainPausedReasonTypeToString(reason));
|
2011-09-15 13:07:51 +00:00
|
|
|
|
2014-08-28 14:37:38 +00:00
|
|
|
if (priv->job.current)
|
|
|
|
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
|
|
|
|
|
2016-01-05 21:19:28 +00:00
|
|
|
if (priv->signalStop)
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2016-01-05 21:19:28 +00:00
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
detail);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2011-03-08 13:42:05 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after state change",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
unlock:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-07 21:25:01 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleResume(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2013-01-07 21:25:01 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2013-01-07 21:25:01 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2013-01-07 21:25:01 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->gotShutdown) {
|
|
|
|
VIR_DEBUG("Ignoring RESUME event after SHUTDOWN");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Transitioned guest %s out of paused into resumed state",
|
|
|
|
vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED);
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2013-01-07 21:25:01 +00:00
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_UNPAUSED);
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2013-01-07 21:25:01 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after state change",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
unlock:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2013-01-07 21:25:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleRTCChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
long long offset,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
qemu: fix RTC_CHANGE event for <clock offset='variable' basis='utc'/>
commit e31b5cf393857 attempted to fix libvirt's
VIR_DOMAIN_EVENT_ID_RTC_CHANGE, which is documentated to always
provide the new offset of the domain's real time clock from UTC. The
problem was that, in the case that qemu is provided with an "-rtc
base=x" where x is an absolute time (rather than "utc" or
"localtime"), the offset sent by qemu's RTC_CHANGE event is *not* the
new offset from UTC, but rather is the sum of all changes to the
domain's RTC since it was started with base=x.
So, despite what was said in commit e31b5cf393857, if we assume that
the original value stored in "adjustment" was the offset from UTC at
the time the domain was started, we can always determine the current
offset from UTC by simply adding the most recent (i.e. current) offset
from qemu to that original adjustment.
This patch accomplishes that by storing the initial adjustment in the
domain's status as "adjustment0". Each time a new RTC_CHANGE event is
received from qemu, we simply add adjustment0 to the value sent by
qemu, store that as the new adjustment, and forward that value on to
any event handler.
This patch (*not* e31b5cf393857, which should be reverted prior to
applying this patch) fixes:
https://bugzilla.redhat.com/show_bug.cgi?id=964177
(for the case where basis='utc'. It does not fix basis='localtime')
2014-05-21 09:54:34 +00:00
|
|
|
if (vm->def->clock.offset == VIR_DOMAIN_CLOCK_OFFSET_VARIABLE) {
|
|
|
|
/* when a basedate is manually given on the qemu commandline
|
|
|
|
* rather than simply "-rtc base=utc", the offset sent by qemu
|
|
|
|
* in this event is *not* the new offset from UTC, but is
|
|
|
|
* instead the new offset from the *original basedate* +
|
|
|
|
* uptime. For example, if the original offset was 3600 and
|
|
|
|
* the guest clock has been advanced by 10 seconds, qemu will
|
|
|
|
* send "10" in the event - this means that the new offset
|
|
|
|
* from UTC is 3610, *not* 10. If the guest clock is advanced
|
|
|
|
* by another 10 seconds, qemu will now send "20" - i.e. each
|
|
|
|
* event is the sum of the most recent change and all previous
|
|
|
|
* changes since the domain was started. Fortunately, we have
|
|
|
|
* saved the initial offset in "adjustment0", so to arrive at
|
|
|
|
* the proper new "adjustment", we just add the most recent
|
|
|
|
* offset to adjustment0.
|
|
|
|
*/
|
|
|
|
offset += vm->def->clock.data.variable.adjustment0;
|
2012-02-06 13:59:16 +00:00
|
|
|
vm->def->clock.data.variable.adjustment = offset;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
qemu: fix RTC_CHANGE event for <clock offset='variable' basis='utc'/>
commit e31b5cf393857 attempted to fix libvirt's
VIR_DOMAIN_EVENT_ID_RTC_CHANGE, which is documentated to always
provide the new offset of the domain's real time clock from UTC. The
problem was that, in the case that qemu is provided with an "-rtc
base=x" where x is an absolute time (rather than "utc" or
"localtime"), the offset sent by qemu's RTC_CHANGE event is *not* the
new offset from UTC, but rather is the sum of all changes to the
domain's RTC since it was started with base=x.
So, despite what was said in commit e31b5cf393857, if we assume that
the original value stored in "adjustment" was the offset from UTC at
the time the domain was started, we can always determine the current
offset from UTC by simply adding the most recent (i.e. current) offset
from qemu to that original adjustment.
This patch accomplishes that by storing the initial adjustment in the
domain's status as "adjustment0". Each time a new RTC_CHANGE event is
received from qemu, we simply add adjustment0 to the value sent by
qemu, store that as the new adjustment, and forward that value on to
any event handler.
This patch (*not* e31b5cf393857, which should be reverted prior to
applying this patch) fixes:
https://bugzilla.redhat.com/show_bug.cgi?id=964177
(for the case where basis='utc'. It does not fix basis='localtime')
2014-05-21 09:54:34 +00:00
|
|
|
VIR_WARN("unable to save domain status with RTC change");
|
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventRTCChangeNewFromObj(vm, offset);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleWatchdog(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
int action,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr watchdogEvent = NULL;
|
|
|
|
virObjectEventPtr lifecycleEvent = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
watchdogEvent = virDomainEventWatchdogNewFromObj(vm, action);
|
|
|
|
|
|
|
|
if (action == VIR_DOMAIN_EVENT_WATCHDOG_PAUSE &&
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2010-10-26 14:04:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state due to watchdog", vm->def->name);
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_WATCHDOG);
|
2013-11-21 17:03:26 +00:00
|
|
|
lifecycleEvent = virDomainEventLifecycleNewFromObj(vm,
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_WATCHDOG);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2011-03-08 13:42:05 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after watchdog event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->watchdog->action == VIR_DOMAIN_WATCHDOG_ACTION_DUMP) {
|
2013-06-07 10:23:32 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
|
|
|
if (VIR_ALLOC(processEvent) == 0) {
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_WATCHDOG;
|
|
|
|
processEvent->action = VIR_DOMAIN_WATCHDOG_ACTION_DUMP;
|
|
|
|
processEvent->vm = vm;
|
2011-04-15 03:11:39 +00:00
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted before handling watchdog event is finished.
|
|
|
|
*/
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2013-06-07 10:23:32 +00:00
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
2012-07-11 13:35:46 +00:00
|
|
|
if (!virObjectUnref(vm))
|
2011-04-18 11:41:23 +00:00
|
|
|
vm = NULL;
|
2013-06-07 10:23:32 +00:00
|
|
|
VIR_FREE(processEvent);
|
2011-04-15 03:11:39 +00:00
|
|
|
}
|
2011-04-18 11:41:23 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-04-18 11:41:23 +00:00
|
|
|
if (vm)
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, watchdogEvent);
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessHandleIOError(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *diskAlias,
|
|
|
|
int action,
|
2013-07-25 17:26:15 +00:00
|
|
|
const char *reason,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr ioErrorEvent = NULL;
|
|
|
|
virObjectEventPtr ioErrorEvent2 = NULL;
|
|
|
|
virObjectEventPtr lifecycleEvent = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
const char *srcPath;
|
|
|
|
const char *devAlias;
|
|
|
|
virDomainDiskDefPtr disk;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias);
|
|
|
|
|
|
|
|
if (disk) {
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
srcPath = virDomainDiskGetSource(disk);
|
2011-02-14 16:09:39 +00:00
|
|
|
devAlias = disk->info.alias;
|
|
|
|
} else {
|
|
|
|
srcPath = "";
|
|
|
|
devAlias = "";
|
|
|
|
}
|
|
|
|
|
|
|
|
ioErrorEvent = virDomainEventIOErrorNewFromObj(vm, srcPath, devAlias, action);
|
|
|
|
ioErrorEvent2 = virDomainEventIOErrorReasonNewFromObj(vm, srcPath, devAlias, action, reason);
|
|
|
|
|
|
|
|
if (action == VIR_DOMAIN_EVENT_IO_ERROR_PAUSE &&
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2010-10-26 14:04:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state due to IO error", vm->def->name);
|
|
|
|
|
2015-05-29 06:38:44 +00:00
|
|
|
if (priv->signalIOError)
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_IOERROR);
|
2013-11-21 17:03:26 +00:00
|
|
|
lifecycleEvent = virDomainEventLifecycleNewFromObj(vm,
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_IOERROR);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after IO error", vm->def->name);
|
|
|
|
}
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, ioErrorEvent);
|
|
|
|
qemuDomainEventQueue(driver, ioErrorEvent2);
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-22 05:57:42 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleBlockJob(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *diskAlias,
|
|
|
|
int type,
|
2013-07-25 17:26:15 +00:00
|
|
|
int status,
|
|
|
|
void *opaque)
|
2011-07-22 05:57:42 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2015-03-13 16:00:03 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
2015-03-30 09:26:20 +00:00
|
|
|
virDomainDiskDefPtr disk;
|
2015-05-13 09:20:36 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv;
|
2015-03-30 09:26:20 +00:00
|
|
|
char *data = NULL;
|
2011-07-22 05:57:42 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
blockjob: properly track blockcopy xml changes on disk
We were not directly saving the domain XML to file after starting
or finishing a blockcopy. Without the startup write, a libvirtd
restart in the middle of a copy job would forget that the job was
underway. Then at pivot, we were indirectly writing new XML in
reaction to events that occur as we stop and restart the guest CPUs.
But there was a race: since pivot is an async action, it is possible
that libvirtd is restarted before the pivot completes, so if XML
changes during the event, that change was not written. The original
blockcopy code cleared out the <mirror> element prior to restarting
the CPUs, but this is also a race, observed if a user does an async
pivot and a dumpxml before the event occurs. Furthermore, this race
will interfere with active commit in a future patch, because that
code will rely on the <mirror> element at the time of the qemu event
to determine whether to inform the user of a normal commit or an
active commit.
Fix things by saving state any time we modify live XML, while
delaying XML disk modifications until after the event completes. We
still need a to teach libvirtd restarts to examine all existing
<mirror> elements to see if the job completed in the meantime (that
is, if libvirtd misses the event, the updated state still needs to be
updated in live XML), but that will be a later patch, in part because
we also need to to start taking advantage of newer qemu's ability to
keep the job around after completion rather than the current usage
where the job disappears both on error and on success.
* src/qemu/qemu_driver.c (qemuDomainBlockCopy): Track XML change
on disk.
(qemuDomainBlockJobImpl, qemuDomainBlockPivot): Move job-end XML
rewrites...
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): ...here.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-07-29 20:42:45 +00:00
|
|
|
|
2015-03-13 16:00:03 +00:00
|
|
|
VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
|
|
|
|
diskAlias, vm, vm->def->name, type, status);
|
2015-02-10 14:32:59 +00:00
|
|
|
|
2015-03-30 09:26:20 +00:00
|
|
|
if (!(disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias)))
|
2015-03-13 16:00:03 +00:00
|
|
|
goto error;
|
2015-05-13 09:20:36 +00:00
|
|
|
diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2015-02-10 14:32:59 +00:00
|
|
|
|
2015-05-13 09:20:36 +00:00
|
|
|
if (diskPriv->blockJobSync) {
|
2015-05-14 12:28:12 +00:00
|
|
|
/* We have a SYNC API waiting for this event, dispatch it back */
|
2015-05-13 09:20:36 +00:00
|
|
|
diskPriv->blockJobType = type;
|
|
|
|
diskPriv->blockJobStatus = status;
|
2015-06-29 14:28:35 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
2015-03-30 09:26:20 +00:00
|
|
|
} else {
|
|
|
|
/* there is no waiting SYNC API, dispatch the update to a thread */
|
|
|
|
if (VIR_ALLOC(processEvent) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
|
|
|
|
if (VIR_STRDUP(data, diskAlias) < 0)
|
|
|
|
goto error;
|
|
|
|
processEvent->data = data;
|
|
|
|
processEvent->vm = vm;
|
|
|
|
processEvent->action = type;
|
|
|
|
processEvent->status = status;
|
2015-02-10 14:32:59 +00:00
|
|
|
|
2015-03-30 09:26:20 +00:00
|
|
|
virObjectRef(vm);
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
|
ignore_value(virObjectUnref(vm));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-07-22 05:57:42 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 16:00:03 +00:00
|
|
|
cleanup:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-07-22 05:57:42 +00:00
|
|
|
return 0;
|
2015-03-13 16:00:03 +00:00
|
|
|
error:
|
|
|
|
if (processEvent)
|
|
|
|
VIR_FREE(processEvent->data);
|
|
|
|
VIR_FREE(processEvent);
|
|
|
|
goto cleanup;
|
2011-07-22 05:57:42 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-03-13 16:00:03 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleGraphics(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int phase,
|
|
|
|
int localFamily,
|
|
|
|
const char *localNode,
|
|
|
|
const char *localService,
|
|
|
|
int remoteFamily,
|
|
|
|
const char *remoteNode,
|
|
|
|
const char *remoteService,
|
|
|
|
const char *authScheme,
|
|
|
|
const char *x509dname,
|
2013-07-25 17:26:15 +00:00
|
|
|
const char *saslUsername,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event;
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainEventGraphicsAddressPtr localAddr = NULL;
|
|
|
|
virDomainEventGraphicsAddressPtr remoteAddr = NULL;
|
|
|
|
virDomainEventGraphicsSubjectPtr subject = NULL;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(localAddr) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
localAddr->family = localFamily;
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(localAddr->service, localService) < 0 ||
|
|
|
|
VIR_STRDUP(localAddr->node, localNode) < 0)
|
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(remoteAddr) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
remoteAddr->family = remoteFamily;
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(remoteAddr->service, remoteService) < 0 ||
|
|
|
|
VIR_STRDUP(remoteAddr->node, remoteNode) < 0)
|
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (VIR_ALLOC(subject) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
if (x509dname) {
|
|
|
|
if (VIR_REALLOC_N(subject->identities, subject->nidentity+1) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
subject->nidentity++;
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(subject->identities[subject->nidentity-1].type, "x509dname") < 0 ||
|
|
|
|
VIR_STRDUP(subject->identities[subject->nidentity-1].name, x509dname) < 0)
|
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
if (saslUsername) {
|
|
|
|
if (VIR_REALLOC_N(subject->identities, subject->nidentity+1) < 0)
|
2013-07-04 10:14:12 +00:00
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
subject->nidentity++;
|
2013-05-20 09:23:13 +00:00
|
|
|
if (VIR_STRDUP(subject->identities[subject->nidentity-1].type, "saslUsername") < 0 ||
|
|
|
|
VIR_STRDUP(subject->identities[subject->nidentity-1].name, saslUsername) < 0)
|
|
|
|
goto error;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventGraphicsNewFromObj(vm, phase, localAddr, remoteAddr, authScheme, subject);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2011-02-14 16:09:39 +00:00
|
|
|
if (localAddr) {
|
|
|
|
VIR_FREE(localAddr->service);
|
|
|
|
VIR_FREE(localAddr->node);
|
|
|
|
VIR_FREE(localAddr);
|
|
|
|
}
|
|
|
|
if (remoteAddr) {
|
|
|
|
VIR_FREE(remoteAddr->service);
|
|
|
|
VIR_FREE(remoteAddr->node);
|
|
|
|
VIR_FREE(remoteAddr);
|
|
|
|
}
|
|
|
|
if (subject) {
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < subject->nidentity; i++) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(subject->identities[i].type);
|
|
|
|
VIR_FREE(subject->identities[i].name);
|
|
|
|
}
|
|
|
|
VIR_FREE(subject->identities);
|
|
|
|
VIR_FREE(subject);
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-03-23 13:44:50 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleTrayChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *devAlias,
|
2013-07-25 17:26:15 +00:00
|
|
|
int reason,
|
|
|
|
void *opaque)
|
2012-03-23 13:44:50 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2012-03-23 13:44:50 +00:00
|
|
|
virDomainDiskDefPtr disk;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2012-03-23 13:44:50 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-03-23 13:44:50 +00:00
|
|
|
disk = qemuProcessFindDomainDiskByAlias(vm, devAlias);
|
|
|
|
|
|
|
|
if (disk) {
|
|
|
|
event = virDomainEventTrayChangeNewFromObj(vm,
|
|
|
|
devAlias,
|
|
|
|
reason);
|
2012-03-14 15:26:50 +00:00
|
|
|
/* Update disk tray status */
|
|
|
|
if (reason == VIR_DOMAIN_EVENT_TRAY_CHANGE_OPEN)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
|
|
|
|
else if (reason == VIR_DOMAIN_EVENT_TRAY_CHANGE_CLOSE)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2012-03-14 15:26:50 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after tray moved event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2015-06-29 14:19:44 +00:00
|
|
|
|
|
|
|
virDomainObjBroadcast(vm);
|
2012-03-23 13:44:50 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2012-03-23 13:44:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-23 14:43:14 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandlePMWakeup(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2012-03-23 14:43:14 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
virObjectEventPtr lifecycleEvent = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2012-03-23 14:43:14 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-03-23 14:43:14 +00:00
|
|
|
event = virDomainEventPMWakeupNewFromObj(vm);
|
|
|
|
|
2012-03-14 15:26:55 +00:00
|
|
|
/* Don't set domain status back to running if it wasn't paused
|
|
|
|
* from guest side, otherwise it can just cause confusion.
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PMSUSPENDED) {
|
|
|
|
VIR_DEBUG("Transitioned guest %s from pmsuspended to running "
|
|
|
|
"state due to QMP wakeup event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_WAKEUP);
|
2013-11-21 17:03:26 +00:00
|
|
|
lifecycleEvent = virDomainEventLifecycleNewFromObj(vm,
|
2012-03-14 15:26:55 +00:00
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_WAKEUP);
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2012-03-14 15:26:55 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after wakeup event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2012-03-23 14:43:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2012-03-23 13:44:50 +00:00
|
|
|
|
2012-03-23 14:50:36 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandlePMSuspend(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2012-03-23 14:50:36 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
virObjectEventPtr lifecycleEvent = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2012-03-23 14:50:36 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-03-23 14:50:36 +00:00
|
|
|
event = virDomainEventPMSuspendNewFromObj(vm);
|
|
|
|
|
2012-03-14 15:26:54 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2012-06-15 16:00:13 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-03-14 15:26:54 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to pmsuspended state due to "
|
|
|
|
"QMP suspend event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_PMSUSPENDED_UNKNOWN);
|
2012-09-06 15:00:43 +00:00
|
|
|
lifecycleEvent =
|
2013-11-21 17:03:26 +00:00
|
|
|
virDomainEventLifecycleNewFromObj(vm,
|
2012-09-06 15:00:43 +00:00
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY);
|
2012-03-14 15:26:54 +00:00
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2012-03-14 15:26:54 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after suspend event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2012-06-15 16:00:13 +00:00
|
|
|
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND);
|
2012-03-14 15:26:54 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2012-03-23 14:50:36 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2012-03-23 14:50:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:45:57 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleBalloonChange(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
unsigned long long actual,
|
|
|
|
void *opaque)
|
2012-07-12 15:45:57 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2012-07-12 15:45:57 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-07-12 15:45:57 +00:00
|
|
|
event = virDomainEventBalloonChangeNewFromObj(vm, actual);
|
|
|
|
|
|
|
|
VIR_DEBUG("Updating balloon from %lld to %lld kb",
|
|
|
|
vm->def->mem.cur_balloon, actual);
|
|
|
|
vm->def->mem.cur_balloon = actual;
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
2012-07-12 15:45:57 +00:00
|
|
|
VIR_WARN("unable to save domain status with balloon change");
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2012-07-12 15:45:57 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2012-07-12 15:45:57 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-12 19:13:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandlePMSuspendDisk(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque)
|
2012-10-12 19:13:39 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
virObjectEventPtr lifecycleEvent = NULL;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2012-10-12 19:13:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-10-12 19:13:39 +00:00
|
|
|
event = virDomainEventPMSuspendDiskNewFromObj(vm);
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
VIR_DEBUG("Transitioned guest %s to pmsuspended state due to "
|
|
|
|
"QMP suspend_disk event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_PMSUSPENDED_UNKNOWN);
|
|
|
|
lifecycleEvent =
|
2013-11-21 17:03:26 +00:00
|
|
|
virDomainEventLifecycleNewFromObj(vm,
|
2012-10-12 19:13:39 +00:00
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED_DISK);
|
|
|
|
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0) {
|
2012-10-12 19:13:39 +00:00
|
|
|
VIR_WARN("Unable to save status on vm %s after suspend event",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND);
|
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2012-10-12 19:13:39 +00:00
|
|
|
|
2015-07-07 13:33:53 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
qemuDomainEventQueue(driver, lifecycleEvent);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
|
|
|
|
2012-10-12 19:13:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:45:57 +00:00
|
|
|
|
2013-06-07 10:23:34 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleGuestPanic(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
2013-07-25 17:26:15 +00:00
|
|
|
virDomainObjPtr vm,
|
2017-03-20 13:35:33 +00:00
|
|
|
qemuMonitorEventPanicInfoPtr info,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2013-06-07 10:23:34 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2013-06-07 10:23:34 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
2013-07-04 10:14:12 +00:00
|
|
|
if (VIR_ALLOC(processEvent) < 0)
|
2013-06-07 10:23:34 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_GUESTPANIC;
|
|
|
|
processEvent->action = vm->def->onCrash;
|
|
|
|
processEvent->vm = vm;
|
2017-03-20 13:35:33 +00:00
|
|
|
processEvent->data = info;
|
2013-06-07 10:23:34 +00:00
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted before handling guest panic event is finished.
|
|
|
|
*/
|
|
|
|
virObjectRef(vm);
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
|
if (!virObjectUnref(vm))
|
|
|
|
vm = NULL;
|
|
|
|
VIR_FREE(processEvent);
|
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-06-07 10:23:34 +00:00
|
|
|
if (vm)
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
virObjectUnlock(vm);
|
2013-06-07 10:23:34 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-26 12:24:55 +00:00
|
|
|
int
|
2013-07-11 15:11:02 +00:00
|
|
|
qemuProcessHandleDeviceDeleted(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
const char *devAlias,
|
|
|
|
void *opaque)
|
2013-07-11 15:11:02 +00:00
|
|
|
{
|
2013-07-25 17:26:15 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2014-05-26 15:02:05 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
char *data;
|
2013-07-11 15:11:02 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Device %s removed from domain %p %s",
|
|
|
|
devAlias, vm, vm->def->name);
|
|
|
|
|
2016-04-04 15:17:43 +00:00
|
|
|
if (qemuDomainSignalDeviceRemoval(vm, devAlias,
|
|
|
|
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_OK))
|
2014-05-26 15:01:52 +00:00
|
|
|
goto cleanup;
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2014-05-26 15:02:05 +00:00
|
|
|
if (VIR_ALLOC(processEvent) < 0)
|
|
|
|
goto error;
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2014-05-26 15:02:05 +00:00
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_DEVICE_DELETED;
|
|
|
|
if (VIR_STRDUP(data, devAlias) < 0)
|
|
|
|
goto error;
|
|
|
|
processEvent->data = data;
|
|
|
|
processEvent->vm = vm;
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2014-05-26 15:02:05 +00:00
|
|
|
virObjectRef(vm);
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
|
ignore_value(virObjectUnref(vm));
|
|
|
|
goto error;
|
|
|
|
}
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-07-11 15:11:02 +00:00
|
|
|
virObjectUnlock(vm);
|
|
|
|
return 0;
|
2014-05-26 15:02:05 +00:00
|
|
|
error:
|
|
|
|
if (processEvent)
|
|
|
|
VIR_FREE(processEvent->data);
|
|
|
|
VIR_FREE(processEvent);
|
|
|
|
goto cleanup;
|
2013-07-11 15:11:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-01 15:48:20 +00:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* Meaning of fields reported by the event according to the ACPI standard:
|
|
|
|
* @source:
|
|
|
|
* 0x00 - 0xff: Notification values, as passed at the request time
|
|
|
|
* 0x100: Operating System Shutdown Processing
|
|
|
|
* 0x103: Ejection processing
|
|
|
|
* 0x200: Insertion processing
|
|
|
|
* other values are reserved
|
|
|
|
*
|
|
|
|
* @status:
|
|
|
|
* general values
|
|
|
|
* 0x00: success
|
|
|
|
* 0x01: non-specific failure
|
|
|
|
* 0x02: unrecognized notify code
|
|
|
|
* 0x03 - 0x7f: reserved
|
|
|
|
* other values are specific to the notification type
|
|
|
|
*
|
|
|
|
* for the 0x100 source the following additional codes are standardized
|
|
|
|
* 0x80: OS Shutdown request denied
|
|
|
|
* 0x81: OS Shutdown in progress
|
|
|
|
* 0x82: OS Shutdown completed
|
|
|
|
* 0x83: OS Graceful shutdown not supported
|
|
|
|
* other values are reserved
|
|
|
|
*
|
|
|
|
* Other fields and semantics are specific to the qemu handling of the event.
|
|
|
|
* - @alias may be NULL for successful unplug operations
|
|
|
|
* - @slotType describes the device type a bit more closely, currently the
|
|
|
|
* only known value is 'DIMM'
|
|
|
|
* - @slot describes the specific device
|
|
|
|
*
|
|
|
|
* Note that qemu does not emit the event for all the documented sources or
|
|
|
|
* devices.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessHandleAcpiOstInfo(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *alias,
|
|
|
|
const char *slotType,
|
|
|
|
const char *slot,
|
|
|
|
unsigned int source,
|
|
|
|
unsigned int status,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("ACPI OST info for device %s domain %p %s. "
|
|
|
|
"slotType='%s' slot='%s' source=%u status=%u",
|
|
|
|
NULLSTR(alias), vm, vm->def->name, slotType, slot, source, status);
|
|
|
|
|
|
|
|
/* handle memory unplug failure */
|
|
|
|
if (STREQ(slotType, "DIMM") && alias && status == 1) {
|
|
|
|
qemuDomainSignalDeviceRemoval(vm, alias,
|
|
|
|
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_GUEST_REJECTED);
|
|
|
|
|
|
|
|
event = virDomainEventDeviceRemovalFailedNewFromObj(vm, alias);
|
|
|
|
}
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-22 16:51:26 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleBlockThreshold(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *nodename,
|
|
|
|
unsigned long long threshold,
|
|
|
|
unsigned long long excess,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
virStorageSourcePtr src;
|
|
|
|
unsigned int idx;
|
|
|
|
char *dev = NULL;
|
|
|
|
const char *path = NULL;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("BLOCK_WRITE_THRESHOLD event for block node '%s' in domain %p %s:"
|
|
|
|
"threshold '%llu' exceeded by '%llu'",
|
|
|
|
nodename, vm, vm->def->name, threshold, excess);
|
|
|
|
|
|
|
|
if ((disk = qemuDomainDiskLookupByNodename(vm->def, nodename, &src, &idx))) {
|
|
|
|
if (virStorageSourceIsLocalStorage(src))
|
|
|
|
path = src->path;
|
|
|
|
|
|
|
|
if ((dev = qemuDomainDiskBackingStoreGetName(disk, src, idx))) {
|
|
|
|
event = virDomainEventBlockThresholdNewFromObj(vm, dev, path,
|
|
|
|
threshold, excess);
|
|
|
|
VIR_FREE(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-09-17 17:07:50 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleNicRxFilterChanged(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *devAlias,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Device %s RX Filter changed in domain %p %s",
|
|
|
|
devAlias, vm, vm->def->name);
|
|
|
|
|
|
|
|
if (VIR_ALLOC(processEvent) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED;
|
|
|
|
if (VIR_STRDUP(data, devAlias) < 0)
|
|
|
|
goto error;
|
|
|
|
processEvent->data = data;
|
|
|
|
processEvent->vm = vm;
|
|
|
|
|
|
|
|
virObjectRef(vm);
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
|
ignore_value(virObjectUnref(vm));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
if (processEvent)
|
|
|
|
VIR_FREE(processEvent->data);
|
|
|
|
VIR_FREE(processEvent);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-13 13:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleSerialChanged(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *devAlias,
|
|
|
|
bool connected,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Serial port %s state changed to '%d' in domain %p %s",
|
|
|
|
devAlias, connected, vm, vm->def->name);
|
|
|
|
|
|
|
|
if (VIR_ALLOC(processEvent) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_SERIAL_CHANGED;
|
|
|
|
if (VIR_STRDUP(data, devAlias) < 0)
|
|
|
|
goto error;
|
|
|
|
processEvent->data = data;
|
|
|
|
processEvent->action = connected;
|
|
|
|
processEvent->vm = vm;
|
|
|
|
|
|
|
|
virObjectRef(vm);
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, processEvent) < 0) {
|
|
|
|
ignore_value(virObjectUnref(vm));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
if (processEvent)
|
|
|
|
VIR_FREE(processEvent->data);
|
|
|
|
VIR_FREE(processEvent);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-25 14:57:49 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleSpiceMigrated(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Spice migration completed for domain %p %s",
|
|
|
|
vm, vm->def->name);
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
|
|
|
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->job.spiceMigrated = true;
|
2015-06-29 14:28:35 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
2015-05-25 14:57:49 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 06:37:59 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleMigrationStatus(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int status,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Migration of domain %p %s changed state to %s",
|
|
|
|
vm, vm->def->name,
|
|
|
|
qemuMonitorMigrationStatusTypeToString(status));
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
2015-07-13 12:15:03 +00:00
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
2015-05-29 06:37:59 +00:00
|
|
|
VIR_DEBUG("got MIGRATION event without a migration job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-11-26 12:23:08 +00:00
|
|
|
priv->job.current->stats.status = status;
|
2015-05-29 06:37:59 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-08 14:23:35 +00:00
|
|
|
static int
|
|
|
|
qemuProcessHandleMigrationPass(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int pass,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Migrating domain %p %s, iteration %d",
|
|
|
|
vm, vm->def->name, pass);
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
|
|
|
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainEventQueue(driver,
|
|
|
|
virDomainEventMigrationIterationNewFromObj(vm, pass));
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static qemuMonitorCallbacks monitorCallbacks = {
|
|
|
|
.eofNotify = qemuProcessHandleMonitorEOF,
|
2011-05-29 12:37:29 +00:00
|
|
|
.errorNotify = qemuProcessHandleMonitorError,
|
2011-02-14 16:09:39 +00:00
|
|
|
.diskSecretLookup = qemuProcessFindVolumeQcowPassphrase,
|
2014-01-30 00:14:44 +00:00
|
|
|
.domainEvent = qemuProcessHandleEvent,
|
2011-02-14 16:09:39 +00:00
|
|
|
.domainShutdown = qemuProcessHandleShutdown,
|
|
|
|
.domainStop = qemuProcessHandleStop,
|
2013-01-07 21:25:01 +00:00
|
|
|
.domainResume = qemuProcessHandleResume,
|
2011-02-14 16:09:39 +00:00
|
|
|
.domainReset = qemuProcessHandleReset,
|
|
|
|
.domainRTCChange = qemuProcessHandleRTCChange,
|
|
|
|
.domainWatchdog = qemuProcessHandleWatchdog,
|
|
|
|
.domainIOError = qemuProcessHandleIOError,
|
|
|
|
.domainGraphics = qemuProcessHandleGraphics,
|
2011-07-22 05:57:42 +00:00
|
|
|
.domainBlockJob = qemuProcessHandleBlockJob,
|
2012-03-23 13:44:50 +00:00
|
|
|
.domainTrayChange = qemuProcessHandleTrayChange,
|
2012-03-23 14:43:14 +00:00
|
|
|
.domainPMWakeup = qemuProcessHandlePMWakeup,
|
2012-03-23 14:50:36 +00:00
|
|
|
.domainPMSuspend = qemuProcessHandlePMSuspend,
|
2012-07-12 15:45:57 +00:00
|
|
|
.domainBalloonChange = qemuProcessHandleBalloonChange,
|
2012-10-12 19:13:39 +00:00
|
|
|
.domainPMSuspendDisk = qemuProcessHandlePMSuspendDisk,
|
2013-06-07 10:23:34 +00:00
|
|
|
.domainGuestPanic = qemuProcessHandleGuestPanic,
|
2013-07-11 15:11:02 +00:00
|
|
|
.domainDeviceDeleted = qemuProcessHandleDeviceDeleted,
|
2014-09-17 17:07:50 +00:00
|
|
|
.domainNicRxFilterChanged = qemuProcessHandleNicRxFilterChanged,
|
2014-11-13 13:09:39 +00:00
|
|
|
.domainSerialChange = qemuProcessHandleSerialChanged,
|
2015-05-25 14:57:49 +00:00
|
|
|
.domainSpiceMigrated = qemuProcessHandleSpiceMigrated,
|
2015-05-29 06:37:59 +00:00
|
|
|
.domainMigrationStatus = qemuProcessHandleMigrationStatus,
|
2015-12-08 14:23:35 +00:00
|
|
|
.domainMigrationPass = qemuProcessHandleMigrationPass,
|
2016-04-01 15:48:20 +00:00
|
|
|
.domainAcpiOstInfo = qemuProcessHandleAcpiOstInfo,
|
2017-02-22 16:51:26 +00:00
|
|
|
.domainBlockThreshold = qemuProcessHandleBlockThreshold,
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
static void
|
|
|
|
qemuProcessMonitorReportLogError(qemuMonitorPtr mon,
|
|
|
|
const char *msg,
|
|
|
|
void *opaque);
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuProcessMonitorLogFree(void *opaque)
|
|
|
|
{
|
|
|
|
qemuDomainLogContextPtr logCtxt = opaque;
|
2017-04-03 08:24:36 +00:00
|
|
|
virObjectUnref(logCtxt);
|
2015-11-12 13:54:04 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2014-08-12 02:54:42 +00:00
|
|
|
qemuConnectMonitor(virQEMUDriverPtr driver, virDomainObjPtr vm, int asyncJob,
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret = -1;
|
2011-08-16 10:51:36 +00:00
|
|
|
qemuMonitorPtr mon = NULL;
|
2017-03-11 06:23:42 +00:00
|
|
|
unsigned long long timeout = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetDaemonSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_ERROR(_("Failed to set security context for monitor for %s"),
|
|
|
|
vm->def->name);
|
2015-07-07 16:17:25 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2017-03-11 06:23:42 +00:00
|
|
|
/* When using hugepages, kernel zeroes them out before
|
|
|
|
* handing them over to qemu. This can be very time
|
|
|
|
* consuming. Therefore, add a second to timeout for each
|
|
|
|
* 1GiB of guest RAM. */
|
|
|
|
timeout = vm->def->mem.total_memory / (1024 * 1024);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
2016-09-20 07:37:21 +00:00
|
|
|
* deleted until the monitor gets its own reference. */
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
ignore_value(virTimeMillisNow(&priv->monStart));
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-08-16 10:51:36 +00:00
|
|
|
|
|
|
|
mon = qemuMonitorOpen(vm,
|
|
|
|
priv->monConfig,
|
|
|
|
priv->monJSON,
|
2017-03-11 06:23:42 +00:00
|
|
|
timeout,
|
2013-07-25 17:26:15 +00:00
|
|
|
&monitorCallbacks,
|
|
|
|
driver);
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if (mon && logCtxt) {
|
2017-04-03 08:24:36 +00:00
|
|
|
virObjectRef(logCtxt);
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(mon,
|
|
|
|
qemuProcessMonitorReportLogError,
|
|
|
|
logCtxt,
|
|
|
|
qemuProcessMonitorLogFree);
|
|
|
|
}
|
qemu: Wire up better early error reporting
The previous patches added infrastructure to report better errors from
monitor in some cases. This patch finalizes this "feature" by enabling
this enhanced error reporting on early phases of VM startup. In these
phases the possibility of qemu producing a useful error message is
really high compared to running it during the whole life cycle. After
the start up is complete, the feature is disabled to provide the usual
error messages so that users are not confused by possibly irrelevant
messages that may be in the domain log.
The original motivation to do this enhancement is to capture errors when
using VFIO device passthrough, where qemu reports errors after the
monitor is initialized and the existing error catching code couldn't
catch this producing a unhelpful message:
# virsh start test
error: Failed to start domain test
error: Unable to read from monitor: Connection reset by peer
With this change, the message is changed to:
# virsh start test
error: Failed to start domain test
error: internal error: early end of file from monitor: possible problem:
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: vfio: error, group 8 is not viable, please ensure all devices within the iommu_group are bound to their vfio bus driver.
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: vfio: failed to get group 8
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: Device 'vfio-pci' could not be initialized
2013-09-18 14:23:14 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2013-11-06 10:38:58 +00:00
|
|
|
virObjectUnref(vm);
|
2011-08-16 10:51:36 +00:00
|
|
|
priv->monStart = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-11-06 10:38:58 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2011-08-16 10:51:36 +00:00
|
|
|
qemuMonitorClose(mon);
|
2012-09-26 14:46:47 +00:00
|
|
|
mon = NULL;
|
2011-08-16 10:51:36 +00:00
|
|
|
}
|
|
|
|
priv->mon = mon;
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_ERROR(_("Failed to clear security context for monitor for %s"),
|
|
|
|
vm->def->name);
|
2015-07-07 16:17:25 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->mon == NULL) {
|
|
|
|
VIR_INFO("Failed to connect monitor for %s", vm->def->name);
|
2015-07-07 16:17:25 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2014-12-16 09:40:58 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-07-07 16:17:25 +00:00
|
|
|
if (qemuMonitorSetCapabilities(priv->mon) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT) &&
|
|
|
|
qemuMonitorSetMigrationCapability(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATION_CAPS_EVENTS,
|
|
|
|
true) < 0) {
|
|
|
|
VIR_DEBUG("Cannot enable migration events; clearing capability");
|
|
|
|
virQEMUCapsClear(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-07-07 16:17:25 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-09-18 12:43:52 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuProcessReadLog: Read log file of a qemu VM
|
2015-11-12 13:54:04 +00:00
|
|
|
* @logCtxt: the domain log context
|
2015-11-12 11:01:07 +00:00
|
|
|
* @msg: pointer to buffer to store the read messages in
|
2013-09-18 12:43:52 +00:00
|
|
|
*
|
|
|
|
* Reads log of a qemu VM. Skips messages not produced by qemu or irrelevant
|
2015-11-12 11:01:07 +00:00
|
|
|
* messages. Returns returns 0 on success or -1 on error
|
2013-09-18 12:43:52 +00:00
|
|
|
*/
|
2015-11-12 11:01:07 +00:00
|
|
|
static int
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuProcessReadLog(qemuDomainLogContextPtr logCtxt, char **msg)
|
2013-04-22 16:16:57 +00:00
|
|
|
{
|
2015-11-12 11:01:07 +00:00
|
|
|
char *buf;
|
|
|
|
ssize_t got;
|
2013-04-22 16:16:57 +00:00
|
|
|
char *eol;
|
2015-11-12 11:01:07 +00:00
|
|
|
char *filter_next;
|
2013-04-22 16:16:57 +00:00
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if ((got = qemuDomainLogContextRead(logCtxt, &buf)) < 0)
|
2015-11-12 11:01:07 +00:00
|
|
|
return -1;
|
2013-12-03 16:38:14 +00:00
|
|
|
|
2015-11-12 11:01:07 +00:00
|
|
|
/* Filter out debug messages from intermediate libvirt process */
|
|
|
|
filter_next = buf;
|
|
|
|
while ((eol = strchr(filter_next, '\n'))) {
|
|
|
|
*eol = '\0';
|
|
|
|
if (virLogProbablyLogMessage(filter_next) ||
|
|
|
|
STRPREFIX(filter_next, "char device redirected to")) {
|
|
|
|
size_t skip = (eol + 1) - filter_next;
|
2016-01-18 09:50:14 +00:00
|
|
|
memmove(filter_next, eol + 1, buf + got - eol);
|
2015-11-12 11:01:07 +00:00
|
|
|
got -= skip;
|
|
|
|
} else {
|
|
|
|
filter_next = eol + 1;
|
|
|
|
*eol = '\n';
|
2013-12-03 16:38:14 +00:00
|
|
|
}
|
|
|
|
}
|
2015-11-12 11:01:07 +00:00
|
|
|
filter_next = NULL; /* silence false coverity warning */
|
2013-12-03 16:38:14 +00:00
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if (got > 0 &&
|
|
|
|
buf[got - 1] == '\n') {
|
2015-11-12 11:01:07 +00:00
|
|
|
buf[got - 1] = '\0';
|
|
|
|
got--;
|
2013-12-03 16:38:14 +00:00
|
|
|
}
|
2015-11-12 13:54:04 +00:00
|
|
|
ignore_value(VIR_REALLOC_N_QUIET(buf, got + 1));
|
2015-11-12 11:01:07 +00:00
|
|
|
*msg = buf;
|
|
|
|
return 0;
|
|
|
|
}
|
2013-12-03 16:38:14 +00:00
|
|
|
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
static int
|
|
|
|
qemuProcessReportLogError(qemuDomainLogContextPtr logCtxt,
|
2015-11-12 11:01:07 +00:00
|
|
|
const char *msgprefix)
|
|
|
|
{
|
|
|
|
char *logmsg = NULL;
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if (qemuProcessReadLog(logCtxt, &logmsg) < 0)
|
2015-11-12 11:01:07 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
virResetLastError();
|
2016-06-08 10:03:38 +00:00
|
|
|
if (virStringIsEmpty(logmsg))
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", msgprefix);
|
|
|
|
else
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"), msgprefix, logmsg);
|
|
|
|
|
2015-11-12 11:01:07 +00:00
|
|
|
VIR_FREE(logmsg);
|
|
|
|
return 0;
|
2013-12-03 16:38:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
static void
|
|
|
|
qemuProcessMonitorReportLogError(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
|
|
|
const char *msg,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
qemuDomainLogContextPtr logCtxt = opaque;
|
|
|
|
qemuProcessReportLogError(logCtxt, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-30 09:07:59 +00:00
|
|
|
static int
|
2017-07-10 21:30:03 +00:00
|
|
|
qemuProcessLookupPTYs(virDomainChrDefPtr *devices,
|
2011-03-30 09:07:59 +00:00
|
|
|
int count,
|
2014-11-13 15:17:21 +00:00
|
|
|
virHashTablePtr info)
|
2011-03-30 09:07:59 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
2011-03-30 09:07:59 +00:00
|
|
|
virDomainChrDefPtr chr = devices[i];
|
2016-10-21 11:45:54 +00:00
|
|
|
if (chr->source->type == VIR_DOMAIN_CHR_TYPE_PTY) {
|
2011-06-08 16:25:11 +00:00
|
|
|
char id[32];
|
2014-11-13 18:29:14 +00:00
|
|
|
qemuMonitorChardevInfoPtr entry;
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2017-06-26 18:01:52 +00:00
|
|
|
if (snprintf(id, sizeof(id), "char%s",
|
2014-11-13 19:10:51 +00:00
|
|
|
chr->info.alias) >= sizeof(id)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to format device alias "
|
|
|
|
"for PTY retrieval"));
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2014-11-13 19:10:51 +00:00
|
|
|
}
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2014-11-13 18:29:14 +00:00
|
|
|
entry = virHashLookup(info, id);
|
|
|
|
if (!entry || !entry->ptyPath) {
|
2016-10-21 11:45:54 +00:00
|
|
|
if (chr->source->data.file.path == NULL) {
|
2011-03-30 09:07:59 +00:00
|
|
|
/* neither the log output nor 'info chardev' had a
|
|
|
|
* pty path for this chardev, report an error
|
|
|
|
*/
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no assigned pty for device %s"), id);
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
/* 'info chardev' had no pty path for this chardev,
|
|
|
|
* but the log output had, so we're fine
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-21 11:45:54 +00:00
|
|
|
VIR_FREE(chr->source->data.file.path);
|
|
|
|
if (VIR_STRDUP(chr->source->data.file.path, entry->ptyPath) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessFindCharDevicePTYsMonitor(virDomainObjPtr vm,
|
2014-11-13 15:17:21 +00:00
|
|
|
virHashTablePtr info)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i = 0;
|
2011-06-08 16:25:11 +00:00
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->serials, vm->def->nserials, info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->parallels, vm->def->nparallels,
|
2014-11-13 15:17:21 +00:00
|
|
|
info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->channels, vm->def->nchannels, info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2013-01-02 14:36:33 +00:00
|
|
|
/* For historical reasons, console[0] can be just an alias
|
|
|
|
* for serial[0]. That's why we need to update it as well. */
|
|
|
|
if (vm->def->nconsoles) {
|
|
|
|
virDomainChrDefPtr chr = vm->def->consoles[0];
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2013-01-02 14:36:33 +00:00
|
|
|
if (vm->def->nserials &&
|
|
|
|
chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE &&
|
|
|
|
chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_SERIAL) {
|
|
|
|
/* yes, the first console is just an alias for serials[0] */
|
|
|
|
i = 1;
|
2016-10-21 11:45:54 +00:00
|
|
|
if (virDomainChrSourceDefCopy(chr->source,
|
|
|
|
((vm->def->serials[0])->source)) < 0)
|
2013-01-02 14:36:33 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->consoles + i, vm->def->nconsoles - i,
|
2014-11-13 15:17:21 +00:00
|
|
|
info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
static int
|
2014-11-19 09:31:21 +00:00
|
|
|
qemuProcessRefreshChannelVirtioState(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virHashTablePtr info,
|
|
|
|
int booted)
|
2014-11-13 19:16:46 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
2014-11-19 09:31:21 +00:00
|
|
|
int agentReason = VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_CHANNEL;
|
2014-11-13 19:16:46 +00:00
|
|
|
qemuMonitorChardevInfoPtr entry;
|
2014-11-19 09:31:21 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2014-11-13 19:16:46 +00:00
|
|
|
char id[32];
|
|
|
|
|
2014-11-19 09:31:21 +00:00
|
|
|
if (booted)
|
|
|
|
agentReason = VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_DOMAIN_STARTED;
|
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
for (i = 0; i < vm->def->nchannels; i++) {
|
|
|
|
virDomainChrDefPtr chr = vm->def->channels[i];
|
|
|
|
if (chr->targetType == VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO) {
|
|
|
|
if (snprintf(id, sizeof(id), "char%s",
|
|
|
|
chr->info.alias) >= sizeof(id)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to format device alias "
|
|
|
|
"for PTY retrieval"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* port state not reported */
|
|
|
|
if (!(entry = virHashLookup(info, id)) ||
|
|
|
|
!entry->state)
|
|
|
|
continue;
|
|
|
|
|
2014-11-19 09:31:21 +00:00
|
|
|
if (entry->state != VIR_DOMAIN_CHR_DEVICE_STATE_DEFAULT &&
|
|
|
|
STREQ_NULLABLE(chr->target.name, "org.qemu.guest_agent.0") &&
|
|
|
|
(event = virDomainEventAgentLifecycleNewFromObj(vm, entry->state,
|
|
|
|
agentReason)))
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
chr->state = entry->state;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-08 16:03:48 +00:00
|
|
|
int
|
|
|
|
qemuRefreshVirtioChannelState(virQEMUDriverPtr driver,
|
2016-06-29 13:52:49 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2014-11-13 19:16:46 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virHashTablePtr info = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
2016-06-29 13:52:49 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
ret = qemuMonitorGetChardevInfo(priv->mon, &info);
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
2014-11-13 19:16:46 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2014-11-19 09:31:21 +00:00
|
|
|
ret = qemuProcessRefreshChannelVirtioState(driver, vm, info, false);
|
2014-11-13 19:16:46 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virHashFree(info);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-04-29 16:01:39 +00:00
|
|
|
static void
|
|
|
|
qemuRefreshRTC(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
time_t now, then;
|
|
|
|
struct tm thenbits;
|
|
|
|
long localOffset;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (vm->def->clock.offset != VIR_DOMAIN_CLOCK_OFFSET_VARIABLE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&thenbits, 0, sizeof(thenbits));
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
now = time(NULL);
|
|
|
|
rv = qemuMonitorGetRTCTime(priv->mon, &thenbits);
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
rv = -1;
|
|
|
|
|
|
|
|
if (rv < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
thenbits.tm_isdst = -1;
|
|
|
|
if ((then = mktime(&thenbits)) == (time_t) -1) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Unable to convert time"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Thing is, @now is in local TZ but @then in UTC. */
|
|
|
|
if (virTimeLocalOffsetFromUTC(&localOffset) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vm->def->clock.data.variable.adjustment = then - now + localOffset;
|
|
|
|
}
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2016-04-06 13:57:57 +00:00
|
|
|
int
|
2015-06-30 14:31:24 +00:00
|
|
|
qemuProcessRefreshBalloonState(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
unsigned long long balloon;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* if no ballooning is available, the current size equals to the current
|
|
|
|
* full memory size */
|
2016-04-06 13:02:31 +00:00
|
|
|
if (!virDomainDefHasMemballoon(vm->def)) {
|
2016-06-15 13:34:04 +00:00
|
|
|
vm->def->mem.cur_balloon = virDomainDefGetMemoryTotal(vm->def);
|
2015-06-30 14:31:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rc = qemuMonitorGetBalloonInfo(qemuDomainGetMonitor(vm), &balloon);
|
2016-04-06 13:43:40 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
|
2015-06-30 14:31:24 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
vm->def->mem.cur_balloon = balloon;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuProcessWaitForMonitor(virQEMUDriverPtr driver,
|
2011-06-08 16:25:11 +00:00
|
|
|
virDomainObjPtr vm,
|
2014-08-12 02:54:42 +00:00
|
|
|
int asyncJob,
|
2015-11-12 14:28:36 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
2014-11-13 15:17:21 +00:00
|
|
|
virHashTablePtr info = NULL;
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Connect monitor to %p '%s'", vm, vm->def->name);
|
2015-11-12 13:54:04 +00:00
|
|
|
if (qemuConnectMonitor(driver, vm, asyncJob, logCtxt) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Try to get the pty path mappings again via the monitor. This is much more
|
|
|
|
* reliable if it's available.
|
|
|
|
* Note that the monitor itself can be on a pty, so we still need to try the
|
|
|
|
* log output method. */
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
priv = vm->privateData;
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
2014-11-13 15:17:21 +00:00
|
|
|
ret = qemuMonitorGetChardevInfo(priv->mon, &info);
|
|
|
|
VIR_DEBUG("qemuMonitorGetChardevInfo returned %i", ret);
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
if (ret == 0) {
|
2017-07-10 21:30:03 +00:00
|
|
|
if ((ret = qemuProcessFindCharDevicePTYsMonitor(vm, info)) < 0)
|
2014-11-13 19:16:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2014-11-19 09:31:21 +00:00
|
|
|
if ((ret = qemuProcessRefreshChannelVirtioState(driver, vm, info,
|
|
|
|
true)) < 0)
|
2014-11-13 19:16:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2014-11-13 15:17:21 +00:00
|
|
|
virHashFree(info);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if (logCtxt && kill(vm->pid, 0) == -1 && errno == ESRCH) {
|
|
|
|
qemuProcessReportLogError(logCtxt,
|
2015-11-12 11:01:07 +00:00
|
|
|
_("process exited while connecting to monitor"));
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-20 07:16:16 +00:00
|
|
|
|
2014-09-03 13:07:38 +00:00
|
|
|
static int
|
|
|
|
qemuProcessDetectIOThreadPIDs(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2015-03-25 15:59:37 +00:00
|
|
|
qemuMonitorIOThreadInfoPtr *iothreads = NULL;
|
2014-09-03 13:07:38 +00:00
|
|
|
int niothreads = 0;
|
|
|
|
int ret = -1;
|
|
|
|
size_t i;
|
|
|
|
|
2017-03-08 09:45:10 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2014-09-03 13:07:38 +00:00
|
|
|
/* Get the list of IOThreads from qemu */
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
niothreads = qemuMonitorGetIOThreads(priv->mon, &iothreads);
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
2014-09-15 23:57:22 +00:00
|
|
|
if (niothreads < 0)
|
2014-09-03 13:07:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-10-15 14:26:26 +00:00
|
|
|
if (niothreads != vm->def->niothreadids) {
|
2014-09-03 13:07:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("got wrong number of IOThread pids from QEMU monitor. "
|
2015-10-15 14:26:26 +00:00
|
|
|
"got %d, wanted %zu"),
|
|
|
|
niothreads, vm->def->niothreadids);
|
2014-09-03 13:07:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-04-27 18:24:34 +00:00
|
|
|
/* Nothing to do */
|
|
|
|
if (niothreads == 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-04-10 13:21:23 +00:00
|
|
|
for (i = 0; i < niothreads; i++) {
|
|
|
|
virDomainIOThreadIDDefPtr iothrid;
|
|
|
|
|
2015-04-27 18:16:54 +00:00
|
|
|
if (!(iothrid = virDomainIOThreadIDFind(vm->def,
|
|
|
|
iothreads[i]->iothread_id))) {
|
2015-04-10 13:21:23 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2015-04-27 18:16:54 +00:00
|
|
|
_("iothread %d not found"),
|
|
|
|
iothreads[i]->iothread_id);
|
2015-04-10 13:21:23 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
iothrid->thread_id = iothreads[i]->thread_id;
|
|
|
|
}
|
2014-09-03 13:07:38 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (iothreads) {
|
|
|
|
for (i = 0; i < niothreads; i++)
|
2015-04-28 10:32:52 +00:00
|
|
|
VIR_FREE(iothreads[i]);
|
2014-09-03 13:07:38 +00:00
|
|
|
VIR_FREE(iothreads);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-24 09:22:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* To be run between fork/exec of QEMU only
|
|
|
|
*/
|
|
|
|
static int
|
2015-03-27 12:11:37 +00:00
|
|
|
qemuProcessInitCpuAffinity(virDomainObjPtr vm)
|
2012-10-24 09:22:52 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virBitmapPtr cpumap = NULL;
|
|
|
|
virBitmapPtr cpumapToSet = NULL;
|
2016-11-25 13:57:05 +00:00
|
|
|
virBitmapPtr hostcpumap = NULL;
|
2014-12-12 14:23:12 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-10-24 09:22:52 +00:00
|
|
|
|
2013-07-22 14:21:15 +00:00
|
|
|
if (!vm->pid) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Cannot setup CPU affinity until process is started"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-10-24 09:22:52 +00:00
|
|
|
if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
|
|
|
|
VIR_DEBUG("Set CPU affinity with advisory nodeset from numad");
|
2015-03-27 12:11:37 +00:00
|
|
|
cpumapToSet = priv->autoCpuset;
|
2011-02-14 16:09:39 +00:00
|
|
|
} else {
|
numad: Set memory policy from numad advisory nodeset
Though numad will manage the memory allocation of task dynamically,
it wants management application (libvirt) to pre-set the memory
policy according to the advisory nodeset returned from querying numad,
(just like pre-bind CPU nodeset for domain process), and thus the
performance could benefit much more from it.
This patch introduces new XML tag 'placement', value 'auto' indicates
whether to set the memory policy with the advisory nodeset from numad,
and its value defaults to the value of <vcpu> placement, or 'static'
if 'nodeset' is specified. Example of the new XML tag's usage:
<numatune>
<memory placement='auto' mode='interleave'/>
</numatune>
Just like what current "numatune" does, the 'auto' numa memory policy
setting uses libnuma's API too.
If <vcpu> "placement" is "auto", and <numatune> is not specified
explicitly, a default <numatume> will be added with "placement"
set as "auto", and "mode" set as "strict".
The following XML can now fully drive numad:
1) <vcpu> placement is 'auto', no <numatune> is specified.
<vcpu placement='auto'>10</vcpu>
2) <vcpu> placement is 'auto', no 'placement' is specified for
<numatune>.
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='interleave'/>
</numatune>
And it's also able to control the CPU placement and memory policy
independently. e.g.
1) <vcpu> placement is 'auto', and <numatune> placement is 'static'
<vcpu placement='auto'>10</vcpu>
<numatune>
<memory mode='strict' nodeset='0-10,^7'/>
</numatune>
2) <vcpu> placement is 'static', and <numatune> placement is 'auto'
<vcpu placement='static' cpuset='0-24,^12'>10</vcpu>
<numatune>
<memory mode='interleave' placement='auto'/>
</numatume>
A follow up patch will change the XML formatting codes to always output
'placement' for <vcpu>, even it's 'static'.
2012-05-08 16:04:34 +00:00
|
|
|
VIR_DEBUG("Set CPU affinity with specified cpuset");
|
2012-03-08 13:36:26 +00:00
|
|
|
if (vm->def->cpumask) {
|
2012-09-14 07:47:01 +00:00
|
|
|
cpumapToSet = vm->def->cpumask;
|
2012-03-08 13:36:26 +00:00
|
|
|
} else {
|
|
|
|
/* You may think this is redundant, but we can't assume libvirtd
|
|
|
|
* itself is running on all pCPUs, so we need to explicitly set
|
|
|
|
* the spawned QEMU instance to all pCPUs if no map is given in
|
|
|
|
* its config file */
|
2015-03-27 12:11:37 +00:00
|
|
|
int hostcpus;
|
|
|
|
|
2016-11-25 13:57:05 +00:00
|
|
|
if (virHostCPUHasBitmap()) {
|
|
|
|
hostcpumap = virHostCPUGetOnlineBitmap();
|
|
|
|
cpumap = virProcessGetAffinity(vm->pid);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hostcpumap && cpumap && virBitmapEqual(hostcpumap, cpumap)) {
|
|
|
|
/* we're using all available CPUs, no reason to set
|
|
|
|
* mask. If libvirtd is running without explicit
|
|
|
|
* affinity, we can use hotplugged CPUs for this VM */
|
|
|
|
ret = 0;
|
2015-03-27 12:11:37 +00:00
|
|
|
goto cleanup;
|
2016-11-25 13:57:05 +00:00
|
|
|
} else {
|
|
|
|
/* setaffinity fails if you set bits for CPUs which
|
|
|
|
* aren't present, so we have to limit ourselves */
|
|
|
|
if ((hostcpus = virHostCPUGetCount()) < 0)
|
|
|
|
goto cleanup;
|
2015-03-27 12:11:37 +00:00
|
|
|
|
2016-11-25 13:57:05 +00:00
|
|
|
if (hostcpus > QEMUD_CPUMASK_LEN)
|
|
|
|
hostcpus = QEMUD_CPUMASK_LEN;
|
2015-03-27 12:11:37 +00:00
|
|
|
|
2016-11-25 13:57:05 +00:00
|
|
|
virBitmapFree(cpumap);
|
|
|
|
if (!(cpumap = virBitmapNew(hostcpus)))
|
|
|
|
goto cleanup;
|
2015-03-27 12:11:37 +00:00
|
|
|
|
2016-11-25 13:57:05 +00:00
|
|
|
virBitmapSetAll(cpumap);
|
2015-03-27 12:11:37 +00:00
|
|
|
|
2016-11-25 13:57:05 +00:00
|
|
|
cpumapToSet = cpumap;
|
|
|
|
}
|
2012-03-08 13:36:26 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2013-07-22 14:21:15 +00:00
|
|
|
if (virProcessSetAffinity(vm->pid, cpumapToSet) < 0)
|
2012-04-13 08:53:17 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-04-13 08:53:17 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2012-09-14 07:46:59 +00:00
|
|
|
virBitmapFree(cpumap);
|
2016-11-25 13:57:05 +00:00
|
|
|
virBitmapFree(hostcpumap);
|
2012-04-13 08:53:17 +00:00
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
/* set link states to down on interfaces at qemu start */
|
|
|
|
static int
|
2015-11-02 09:50:21 +00:00
|
|
|
qemuProcessSetLinkStates(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-09-06 08:23:47 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainDefPtr def = vm->def;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2015-11-02 09:50:21 +00:00
|
|
|
int ret = -1;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2011-09-06 08:23:47 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
if (def->nets[i]->linkstate == VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN) {
|
2015-04-14 13:26:36 +00:00
|
|
|
if (!def->nets[i]->info.alias) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("missing alias for network device"));
|
2015-11-02 09:50:21 +00:00
|
|
|
goto cleanup;
|
2015-04-14 13:26:36 +00:00
|
|
|
}
|
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
VIR_DEBUG("Setting link state: %s", def->nets[i]->info.alias);
|
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NETDEV)) {
|
2013-03-20 15:57:08 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
2012-07-18 15:22:03 +00:00
|
|
|
_("Setting of link state is not supported by this qemu"));
|
2015-11-02 09:50:21 +00:00
|
|
|
goto cleanup;
|
2011-09-06 08:23:47 +00:00
|
|
|
}
|
|
|
|
|
2015-11-02 09:50:21 +00:00
|
|
|
rv = qemuMonitorSetLink(priv->mon,
|
|
|
|
def->nets[i]->info.alias,
|
|
|
|
VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN);
|
|
|
|
if (rv < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
2015-11-02 09:50:21 +00:00
|
|
|
_("Couldn't set link state on interface: %s"),
|
|
|
|
def->nets[i]->info.alias);
|
|
|
|
goto cleanup;
|
2011-09-06 08:23:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-02 09:50:21 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
2011-09-06 08:23:47 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-06-21 15:33:06 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessSetupPid:
|
|
|
|
*
|
|
|
|
* This function sets resource properities (affinity, cgroups,
|
|
|
|
* scheduler) for any PID associated with a domain. It should be used
|
|
|
|
* to set up emulator PIDs as well as vCPU and I/O thread pids to
|
|
|
|
* ensure they are all handled the same way.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessSetupPid(virDomainObjPtr vm,
|
|
|
|
pid_t pid,
|
|
|
|
virCgroupThreadName nameval,
|
|
|
|
int id,
|
|
|
|
virBitmapPtr cpumask,
|
|
|
|
unsigned long long period,
|
|
|
|
long long quota,
|
|
|
|
virDomainThreadSchedParamPtr sched)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainNumatuneMemMode mem_mode;
|
|
|
|
virCgroupPtr cgroup = NULL;
|
|
|
|
virBitmapPtr use_cpumask;
|
|
|
|
char *mem_mask = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if ((period || quota) &&
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Infer which cpumask shall be used. */
|
|
|
|
if (cpumask)
|
|
|
|
use_cpumask = cpumask;
|
|
|
|
else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
|
|
|
|
use_cpumask = priv->autoCpuset;
|
|
|
|
else
|
|
|
|
use_cpumask = vm->def->cpumask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If CPU cgroup controller is not initialized here, then we need
|
|
|
|
* neither period nor quota settings. And if CPUSET controller is
|
|
|
|
* not initialized either, then there's nothing to do anyway.
|
|
|
|
*/
|
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) ||
|
|
|
|
virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
|
|
|
|
|
|
|
|
if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
|
|
|
|
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
|
|
|
|
virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
|
|
|
|
priv->autoNodeset,
|
|
|
|
&mem_mask, -1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virCgroupNewThread(priv->cgroup, nameval, id, true, &cgroup) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
|
|
|
|
if (use_cpumask &&
|
|
|
|
qemuSetupCgroupCpusetCpus(cgroup, use_cpumask) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't setup cpuset.mems for the emulator, they need to
|
|
|
|
* be set up after initialization in order for kvm
|
|
|
|
* allocations to succeed.
|
|
|
|
*/
|
|
|
|
if (nameval != VIR_CGROUP_THREAD_EMULATOR &&
|
|
|
|
mem_mask && virCgroupSetCpusetMems(cgroup, mem_mask) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((period || quota) &&
|
|
|
|
qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Move the thread to the sub dir */
|
|
|
|
if (virCgroupAddTask(cgroup, pid) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup legacy affinity. */
|
|
|
|
if (use_cpumask && virProcessSetAffinity(pid, use_cpumask) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Set scheduler type and priority. */
|
|
|
|
if (sched &&
|
|
|
|
virProcessSetScheduler(pid, sched->policy, sched->priority) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(mem_mask);
|
|
|
|
if (cgroup) {
|
|
|
|
if (ret < 0)
|
|
|
|
virCgroupRemove(cgroup);
|
|
|
|
virCgroupFree(&cgroup);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-21 09:18:34 +00:00
|
|
|
static int
|
2016-02-24 13:45:44 +00:00
|
|
|
qemuProcessSetupEmulator(virDomainObjPtr vm)
|
2012-08-21 09:18:34 +00:00
|
|
|
{
|
2016-07-04 15:14:43 +00:00
|
|
|
return qemuProcessSetupPid(vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR,
|
|
|
|
0, vm->def->cputune.emulatorpin,
|
|
|
|
vm->def->cputune.emulator_period,
|
|
|
|
vm->def->cputune.emulator_quota,
|
|
|
|
NULL);
|
2012-08-21 09:18:34 +00:00
|
|
|
}
|
|
|
|
|
2015-01-08 14:37:50 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuProcessInitPasswords(virConnectPtr conn,
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver,
|
2014-08-12 02:54:42 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2015-01-07 13:50:34 +00:00
|
|
|
char *alias = NULL;
|
|
|
|
char *secret = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; ++i) {
|
2012-11-10 01:40:23 +00:00
|
|
|
virDomainGraphicsDefPtr graphics = vm->def->graphics[i];
|
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = qemuDomainChangeGraphicsPasswords(driver, vm,
|
|
|
|
VIR_DOMAIN_GRAPHICS_TYPE_VNC,
|
2012-11-10 01:40:23 +00:00
|
|
|
&graphics->data.vnc.auth,
|
2014-08-12 02:54:42 +00:00
|
|
|
cfg->vncPassword,
|
|
|
|
asyncJob);
|
2012-11-10 01:40:23 +00:00
|
|
|
} else if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = qemuDomainChangeGraphicsPasswords(driver, vm,
|
|
|
|
VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
|
2012-11-10 01:40:23 +00:00
|
|
|
&graphics->data.spice.auth,
|
2014-08-12 02:54:42 +00:00
|
|
|
cfg->spicePassword,
|
|
|
|
asyncJob);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2013-07-01 07:23:04 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-01-22 19:09:22 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
size_t secretLen;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-01-22 19:09:22 +00:00
|
|
|
if (!vm->def->disks[i]->src->encryption ||
|
|
|
|
!virDomainDiskGetSource(vm->def->disks[i]))
|
|
|
|
continue;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-06-01 19:01:31 +00:00
|
|
|
if (vm->def->disks[i]->src->encryption->format !=
|
|
|
|
VIR_STORAGE_ENCRYPTION_FORMAT_DEFAULT &&
|
|
|
|
vm->def->disks[i]->src->encryption->format !=
|
|
|
|
VIR_STORAGE_ENCRYPTION_FORMAT_QCOW)
|
|
|
|
continue;
|
|
|
|
|
2016-01-22 19:09:22 +00:00
|
|
|
VIR_FREE(secret);
|
|
|
|
if (qemuProcessGetVolumeQcowPassphrase(conn,
|
|
|
|
vm->def->disks[i],
|
|
|
|
&secret, &secretLen) < 0)
|
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-01-22 19:09:22 +00:00
|
|
|
VIR_FREE(alias);
|
2016-08-01 12:11:44 +00:00
|
|
|
if (!(alias = qemuAliasFromDisk(vm->def->disks[i])))
|
2016-01-22 19:09:22 +00:00
|
|
|
goto cleanup;
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
ret = qemuMonitorSetDrivePassphrase(priv->mon, alias, secret);
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-01-07 13:50:34 +00:00
|
|
|
VIR_FREE(alias);
|
|
|
|
VIR_FREE(secret);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessPrepareChardevDevice(virDomainDefPtr def ATTRIBUTE_UNUSED,
|
|
|
|
virDomainChrDefPtr dev,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
int fd;
|
2016-10-21 11:45:54 +00:00
|
|
|
if (dev->source->type != VIR_DOMAIN_CHR_TYPE_FILE)
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-10-21 11:45:54 +00:00
|
|
|
if ((fd = open(dev->source->data.file.path,
|
2011-02-14 16:09:39 +00:00
|
|
|
O_CREAT | O_APPEND, S_IRUSR|S_IWUSR)) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to pre-create chardev file '%s'"),
|
2016-10-21 11:45:54 +00:00
|
|
|
dev->source->data.file.path);
|
2011-02-14 16:09:39 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-13 06:54:20 +00:00
|
|
|
static int
|
|
|
|
qemuProcessCleanupChardevDevice(virDomainDefPtr def ATTRIBUTE_UNUSED,
|
|
|
|
virDomainChrDefPtr dev,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
|
|
|
{
|
2016-10-21 11:45:54 +00:00
|
|
|
if (dev->source->type == VIR_DOMAIN_CHR_TYPE_UNIX &&
|
|
|
|
dev->source->data.nix.listen &&
|
|
|
|
dev->source->data.nix.path)
|
|
|
|
unlink(dev->source->data.nix.path);
|
2014-05-13 06:54:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-12-10 14:31:23 +00:00
|
|
|
/**
|
|
|
|
* Loads and update video memory size for video devices according to QEMU
|
|
|
|
* process as the QEMU will silently update the values that we pass to QEMU
|
|
|
|
* through command line. We need to load these updated values and store them
|
|
|
|
* into the status XML.
|
|
|
|
*
|
|
|
|
* We will fail if for some reason the values cannot be loaded from QEMU because
|
|
|
|
* its mandatory to get the correct video memory size to status XML to not break
|
|
|
|
* migration.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessUpdateVideoRamSize(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
ssize_t i;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainVideoDefPtr video = NULL;
|
|
|
|
virQEMUDriverConfigPtr cfg = NULL;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nvideos; i++) {
|
|
|
|
video = vm->def->videos[i];
|
|
|
|
|
|
|
|
switch (video->type) {
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VGA:
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VGA_VGAMEM)) {
|
|
|
|
if (qemuMonitorUpdateVideoMemorySize(priv->mon, video, "VGA") < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_QXL:
|
|
|
|
if (i == 0) {
|
2016-10-11 15:42:37 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VGAMEM) &&
|
2016-02-23 16:04:19 +00:00
|
|
|
qemuMonitorUpdateVideoMemorySize(priv->mon, video,
|
|
|
|
"qxl-vga") < 0)
|
2014-12-10 14:31:23 +00:00
|
|
|
goto error;
|
2016-02-23 16:04:19 +00:00
|
|
|
|
2016-10-11 15:42:37 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VRAM64) &&
|
2016-02-23 16:04:19 +00:00
|
|
|
qemuMonitorUpdateVideoVram64Size(priv->mon, video,
|
|
|
|
"qxl-vga") < 0)
|
|
|
|
goto error;
|
2014-12-10 14:31:23 +00:00
|
|
|
} else {
|
2016-02-23 16:04:19 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VGAMEM) &&
|
|
|
|
qemuMonitorUpdateVideoMemorySize(priv->mon, video,
|
|
|
|
"qxl") < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VRAM64) &&
|
|
|
|
qemuMonitorUpdateVideoVram64Size(priv->mon, video,
|
|
|
|
"qxl") < 0)
|
2014-12-10 14:31:23 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VMVGA:
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VMWARE_SVGA_VGAMEM)) {
|
|
|
|
if (qemuMonitorUpdateVideoMemorySize(priv->mon, video,
|
|
|
|
"vmware-svga") < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_CIRRUS:
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_XEN:
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VBOX:
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
2014-12-10 14:31:23 +00:00
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
2016-02-04 12:32:45 +00:00
|
|
|
ret = virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps);
|
2014-12-10 14:31:23 +00:00
|
|
|
virObjectUnref(cfg);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
error:
|
2014-12-16 09:40:58 +00:00
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
2014-12-10 14:31:23 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
struct qemuProcessHookData {
|
|
|
|
virConnectPtr conn;
|
|
|
|
virDomainObjPtr vm;
|
2012-11-28 16:43:10 +00:00
|
|
|
virQEMUDriverPtr driver;
|
2013-02-11 16:08:42 +00:00
|
|
|
virQEMUDriverConfigPtr cfg;
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int qemuProcessHook(void *data)
|
|
|
|
{
|
|
|
|
struct qemuProcessHookData *h = data;
|
2014-12-12 14:23:12 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = h->vm->privateData;
|
2010-10-26 14:04:46 +00:00
|
|
|
int ret = -1;
|
2011-06-24 14:14:41 +00:00
|
|
|
int fd;
|
2014-11-06 11:16:54 +00:00
|
|
|
virBitmapPtr nodeset = NULL;
|
|
|
|
virDomainNumatuneMemMode mode;
|
|
|
|
|
2013-02-11 16:08:42 +00:00
|
|
|
/* This method cannot use any mutexes, which are not
|
|
|
|
* protected across fork()
|
|
|
|
*/
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecurityPostFork(h->driver->securityManager);
|
2010-10-26 14:04:46 +00:00
|
|
|
|
|
|
|
/* Some later calls want pid present */
|
|
|
|
h->vm->pid = getpid();
|
|
|
|
|
|
|
|
VIR_DEBUG("Obtaining domain lock");
|
2011-08-26 15:06:31 +00:00
|
|
|
/*
|
|
|
|
* Since we're going to leak the returned FD to QEMU,
|
|
|
|
* we need to make sure it gets a sensible label.
|
|
|
|
* This mildly sucks, because there could be other
|
|
|
|
* sockets the lock driver opens that we don't want
|
|
|
|
* labelled. So far we're ok though.
|
|
|
|
*/
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetSocketLabel(h->driver->securityManager, h->vm->def) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
if (virDomainLockProcessStart(h->driver->lockManager,
|
2013-02-11 16:08:42 +00:00
|
|
|
h->cfg->uri,
|
2010-10-26 14:04:46 +00:00
|
|
|
h->vm,
|
2012-10-11 16:31:20 +00:00
|
|
|
/* QEMU is always paused initially */
|
2011-06-24 14:14:41 +00:00
|
|
|
true,
|
|
|
|
&fd) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(h->driver->securityManager, h->vm->def) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-02-21 12:11:00 +00:00
|
|
|
if (qemuDomainBuildNamespace(h->cfg, h->driver->securityManager, h->vm) < 0)
|
2016-11-15 10:30:18 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-05-19 09:55:26 +00:00
|
|
|
if (virDomainNumatuneGetMode(h->vm->def->numa, -1, &mode) == 0) {
|
|
|
|
if (mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
|
|
|
|
h->cfg->cgroupControllers & (1 << VIR_CGROUP_CONTROLLER_CPUSET) &&
|
|
|
|
virCgroupControllerAvailable(VIR_CGROUP_CONTROLLER_CPUSET)) {
|
|
|
|
/* Use virNuma* API iff necessary. Once set and child is exec()-ed,
|
|
|
|
* there's no way for us to change it. Rely on cgroups (if available
|
|
|
|
* and enabled in the config) rather than virNuma*. */
|
|
|
|
VIR_DEBUG("Relying on CGroups for memory binding");
|
|
|
|
} else {
|
|
|
|
nodeset = virDomainNumatuneGetNodeset(h->vm->def->numa,
|
|
|
|
priv->autoNodeset, -1);
|
qemuProcessHook: Call virNuma*() only when needed
https://bugzilla.redhat.com/show_bug.cgi?id=1198645
Once upon a time, there was a little domain. And the domain was pinned
onto a NUMA node and hasn't fully allocated its memory:
<memory unit='KiB'>2355200</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<numatune>
<memory mode='strict' nodeset='0'/>
</numatune>
Oh little me, said the domain, what will I do with so little memory.
If I only had a few megabytes more. But the old admin noticed the
whimpering, barely audible to untrained human ear. And good admin he
was, he gave the domain yet more memory. But the old NUMA topology
witch forbade to allocate more memory on the node zero. So he
decided to allocate it on a different node:
virsh # numatune little_domain --nodeset 0-1
virsh # setmem little_domain 2355200
The little domain was happy. For a while. Until bad, sharp teeth
shaped creature came. Every process in the system was afraid of him.
The OOM Killer they called him. Oh no, he's after the little domain.
There's no escape.
Do you kids know why? Because when the little domain was born, her
father, Libvirt, called numa_set_membind(). So even if the admin
allowed her to allocate memory from other nodes in the cgroups, the
membind() forbid it.
So what's the lesson? Libvirt should rely on cgroups, whenever
possible and use numa_set_membind() as the last ditch effort.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-03-27 10:39:45 +00:00
|
|
|
|
2015-05-19 09:55:26 +00:00
|
|
|
if (virNumaSetupMemoryPolicy(mode, nodeset) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
qemuProcessHook: Call virNuma*() only when needed
https://bugzilla.redhat.com/show_bug.cgi?id=1198645
Once upon a time, there was a little domain. And the domain was pinned
onto a NUMA node and hasn't fully allocated its memory:
<memory unit='KiB'>2355200</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<numatune>
<memory mode='strict' nodeset='0'/>
</numatune>
Oh little me, said the domain, what will I do with so little memory.
If I only had a few megabytes more. But the old admin noticed the
whimpering, barely audible to untrained human ear. And good admin he
was, he gave the domain yet more memory. But the old NUMA topology
witch forbade to allocate more memory on the node zero. So he
decided to allocate it on a different node:
virsh # numatune little_domain --nodeset 0-1
virsh # setmem little_domain 2355200
The little domain was happy. For a while. Until bad, sharp teeth
shaped creature came. Every process in the system was afraid of him.
The OOM Killer they called him. Oh no, he's after the little domain.
There's no escape.
Do you kids know why? Because when the little domain was born, her
father, Libvirt, called numa_set_membind(). So even if the admin
allowed her to allocate memory from other nodes in the cgroups, the
membind() forbid it.
So what's the lesson? Libvirt should rely on cgroups, whenever
possible and use numa_set_membind() as the last ditch effort.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-03-27 10:39:45 +00:00
|
|
|
}
|
2011-06-20 07:16:16 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-02-11 16:08:42 +00:00
|
|
|
virObjectUnref(h->cfg);
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Hook complete ret=%d", ret);
|
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2016-02-26 08:15:55 +00:00
|
|
|
qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
|
|
|
|
const char *domainDir)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
monConfig->type = VIR_DOMAIN_CHR_TYPE_UNIX;
|
|
|
|
monConfig->data.nix.listen = true;
|
|
|
|
|
2016-02-26 08:15:55 +00:00
|
|
|
if (virAsprintf(&monConfig->data.nix.path, "%s/monitor.sock",
|
|
|
|
domainDir) < 0)
|
2013-07-18 10:13:46 +00:00
|
|
|
return -1;
|
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
/*
|
2013-02-06 18:17:20 +00:00
|
|
|
* Precondition: vm must be locked, and a job must be active.
|
|
|
|
* This method will call {Enter,Exit}Monitor
|
2011-06-15 16:49:58 +00:00
|
|
|
*/
|
2011-03-15 02:20:53 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virConnectPtr conn, virDomainRunningReason reason,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-01-10 21:03:14 +00:00
|
|
|
int ret = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-09-16 20:50:53 +00:00
|
|
|
/* Bring up netdevs before starting CPUs */
|
2014-12-11 19:49:13 +00:00
|
|
|
if (qemuInterfaceStartDevices(vm->def) < 0)
|
2014-09-16 20:50:53 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Using lock state '%s'", NULLSTR(priv->lockState));
|
2013-01-10 21:03:14 +00:00
|
|
|
if (virDomainLockProcessResume(driver->lockManager, cfg->uri,
|
2012-09-17 13:36:47 +00:00
|
|
|
vm, priv->lockState) < 0) {
|
2010-10-26 14:04:46 +00:00
|
|
|
/* Don't free priv->lockState on error, because we need
|
|
|
|
* to make sure we have state still present if the user
|
|
|
|
* tries to resume again
|
|
|
|
*/
|
2013-01-10 21:03:14 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto release;
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
ret = qemuMonitorStartCPUs(priv->mon, conn);
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
2014-05-14 11:22:34 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto release;
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
2014-05-14 11:22:34 +00:00
|
|
|
|
|
|
|
release:
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-08-28 14:37:38 +00:00
|
|
|
int qemuProcessStopCPUs(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainPausedReason reason,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2014-05-14 11:22:34 +00:00
|
|
|
int ret = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
ret = qemuMonitorStopCPUs(priv->mon);
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
2014-05-14 11:22:34 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2014-12-11 20:11:10 +00:00
|
|
|
/* de-activate netdevs after stopping CPUs */
|
|
|
|
ignore_value(qemuInterfaceStopDevices(vm->def));
|
|
|
|
|
2014-08-28 14:37:38 +00:00
|
|
|
if (priv->job.current)
|
|
|
|
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
|
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
cleanup:
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2017-04-25 16:20:30 +00:00
|
|
|
static void
|
2011-07-04 06:27:12 +00:00
|
|
|
qemuProcessNotifyNets(virDomainDefPtr def)
|
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-07-04 06:27:12 +00:00
|
|
|
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
2016-01-19 19:20:54 +00:00
|
|
|
/* keep others from trying to use the macvtap device name, but
|
|
|
|
* don't return error if this happens, since that causes the
|
|
|
|
* domain to be unceremoniously killed, which would be *very*
|
|
|
|
* impolite.
|
|
|
|
*/
|
|
|
|
if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT)
|
|
|
|
ignore_value(virNetDevMacVLanReserveName(net->ifname, false));
|
|
|
|
|
2017-04-25 16:20:30 +00:00
|
|
|
networkNotifyActualDevice(def, net);
|
2011-07-04 06:27:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2014-11-07 10:37:37 +00:00
|
|
|
qemuProcessFiltersInstantiate(virDomainDefPtr def)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
|
|
|
if ((net->filter) && (net->ifname)) {
|
2014-11-07 10:37:37 +00:00
|
|
|
if (virDomainConfNWFilterInstantiate(def->uuid, net) < 0)
|
2013-05-24 10:29:28 +00:00
|
|
|
return 1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-24 10:29:28 +00:00
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-05-05 11:50:25 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm)
|
2011-05-05 11:50:25 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainState state;
|
2011-09-27 09:42:04 +00:00
|
|
|
virDomainPausedReason reason;
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainState newState = VIR_DOMAIN_NOSTATE;
|
2015-02-16 14:17:00 +00:00
|
|
|
int oldReason;
|
2011-11-30 14:31:45 +00:00
|
|
|
int newReason;
|
2011-05-05 11:50:25 +00:00
|
|
|
bool running;
|
2011-11-30 14:31:45 +00:00
|
|
|
char *msg = NULL;
|
2011-05-05 11:50:25 +00:00
|
|
|
int ret;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-09-27 09:42:04 +00:00
|
|
|
ret = qemuMonitorGetStatus(priv->mon, &running, &reason);
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2014-12-16 09:40:58 +00:00
|
|
|
if (ret < 0)
|
2011-05-05 11:50:25 +00:00
|
|
|
return -1;
|
|
|
|
|
2015-02-16 14:17:00 +00:00
|
|
|
state = virDomainObjGetState(vm, &oldReason);
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2015-02-16 14:17:00 +00:00
|
|
|
if (running &&
|
|
|
|
(state == VIR_DOMAIN_SHUTOFF ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
|
|
|
oldReason == VIR_DOMAIN_PAUSED_STARTING_UP))) {
|
|
|
|
newState = VIR_DOMAIN_RUNNING;
|
|
|
|
newReason = VIR_DOMAIN_RUNNING_BOOTED;
|
|
|
|
ignore_value(VIR_STRDUP_QUIET(msg, "finished booting"));
|
|
|
|
} else if (state == VIR_DOMAIN_PAUSED && running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
newState = VIR_DOMAIN_RUNNING;
|
|
|
|
newReason = VIR_DOMAIN_RUNNING_UNPAUSED;
|
2013-05-20 09:23:13 +00:00
|
|
|
ignore_value(VIR_STRDUP_QUIET(msg, "was unpaused"));
|
2011-05-05 11:50:25 +00:00
|
|
|
} else if (state == VIR_DOMAIN_RUNNING && !running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
if (reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN) {
|
|
|
|
newState = VIR_DOMAIN_SHUTDOWN;
|
|
|
|
newReason = VIR_DOMAIN_SHUTDOWN_UNKNOWN;
|
2013-05-20 09:23:13 +00:00
|
|
|
ignore_value(VIR_STRDUP_QUIET(msg, "shutdown"));
|
2013-07-29 16:54:57 +00:00
|
|
|
} else if (reason == VIR_DOMAIN_PAUSED_CRASHED) {
|
2013-06-07 10:23:34 +00:00
|
|
|
newState = VIR_DOMAIN_CRASHED;
|
|
|
|
newReason = VIR_DOMAIN_CRASHED_PANICKED;
|
|
|
|
ignore_value(VIR_STRDUP_QUIET(msg, "crashed"));
|
2011-11-30 14:31:45 +00:00
|
|
|
} else {
|
|
|
|
newState = VIR_DOMAIN_PAUSED;
|
|
|
|
newReason = reason;
|
2012-04-27 21:25:35 +00:00
|
|
|
ignore_value(virAsprintf(&msg, "was paused (%s)",
|
|
|
|
virDomainPausedReasonTypeToString(reason)));
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newState != VIR_DOMAIN_NOSTATE) {
|
|
|
|
VIR_DEBUG("Domain %s %s while its monitor was disconnected;"
|
|
|
|
" changing state to %s (%s)",
|
|
|
|
vm->def->name,
|
2013-05-20 09:23:13 +00:00
|
|
|
NULLSTR(msg),
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainStateTypeToString(newState),
|
|
|
|
virDomainStateReasonToString(newState, newReason));
|
|
|
|
VIR_FREE(msg);
|
|
|
|
virDomainObjSetState(vm, newState, newReason);
|
2011-05-05 11:50:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:33 +00:00
|
|
|
static int
|
2016-01-14 14:55:18 +00:00
|
|
|
qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
|
|
|
qemuMigrationJobPhase phase,
|
|
|
|
virDomainState state,
|
2016-01-13 15:29:58 +00:00
|
|
|
int reason)
|
2016-01-14 14:55:18 +00:00
|
|
|
{
|
2016-01-13 15:29:58 +00:00
|
|
|
bool postcopy = (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED) ||
|
|
|
|
(state == VIR_DOMAIN_RUNNING &&
|
|
|
|
reason == VIR_DOMAIN_RUNNING_POSTCOPY);
|
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
switch (phase) {
|
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
/* N/A for incoming migration */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
VIR_DEBUG("Killing unfinished incoming migration for domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
/* source domain is already killed so let's just resume the domain
|
|
|
|
* and hope we are all set */
|
|
|
|
VIR_DEBUG("Incoming migration finished, resuming domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
/* migration finished, we started resuming the domain but didn't
|
|
|
|
* confirm success or failure yet; killing it seems safest unless
|
2016-01-13 15:29:58 +00:00
|
|
|
* we already started guest CPUs or we were in post-copy mode */
|
|
|
|
if (postcopy) {
|
|
|
|
qemuMigrationPostcopyFailed(driver, vm);
|
|
|
|
} else if (state != VIR_DOMAIN_RUNNING) {
|
2016-01-14 14:55:18 +00:00
|
|
|
VIR_DEBUG("Killing migrated domain %s", vm->def->name);
|
2011-07-19 00:27:33 +00:00
|
|
|
return -1;
|
2016-01-14 14:55:18 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2017-04-05 12:48:43 +00:00
|
|
|
qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_NONE);
|
2016-01-14 14:55:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
static int
|
|
|
|
qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
|
|
|
qemuMigrationJobPhase phase,
|
|
|
|
virDomainState state,
|
2016-11-23 13:07:53 +00:00
|
|
|
int reason,
|
|
|
|
unsigned int *stopFlags)
|
2016-01-14 14:55:18 +00:00
|
|
|
{
|
2016-01-13 15:29:58 +00:00
|
|
|
bool postcopy = state == VIR_DOMAIN_PAUSED &&
|
|
|
|
(reason == VIR_DOMAIN_PAUSED_POSTCOPY ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED);
|
2017-04-04 18:54:57 +00:00
|
|
|
bool resume = false;
|
2016-01-13 15:29:58 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
switch (phase) {
|
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
/* N/A for outgoing migration */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
/* nothing happened so far, just forget we were about to migrate the
|
|
|
|
* domain */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
/* migration is still in progress, let's cancel it and resume the
|
2016-01-13 15:29:58 +00:00
|
|
|
* domain; however we can only do that before migration enters
|
|
|
|
* post-copy mode
|
|
|
|
*/
|
|
|
|
if (postcopy) {
|
|
|
|
qemuMigrationPostcopyFailed(driver, vm);
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Cancelling unfinished migration of domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
if (qemuMigrationCancel(driver, vm) < 0) {
|
|
|
|
VIR_WARN("Could not cancel ongoing migration of domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2017-04-04 18:54:57 +00:00
|
|
|
resume = true;
|
2011-07-19 00:27:33 +00:00
|
|
|
}
|
2016-01-13 15:29:58 +00:00
|
|
|
break;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
/* migration finished but we didn't have a chance to get the result
|
2016-01-13 15:29:58 +00:00
|
|
|
* of Finish3 step; third party needs to check what to do next; in
|
|
|
|
* post-copy mode we can use PAUSED_POSTCOPY_FAILED state for this
|
2016-01-14 14:55:18 +00:00
|
|
|
*/
|
2016-01-13 15:29:58 +00:00
|
|
|
if (postcopy)
|
|
|
|
qemuMigrationPostcopyFailed(driver, vm);
|
2016-01-14 14:55:18 +00:00
|
|
|
break;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
2016-01-13 15:29:58 +00:00
|
|
|
/* Finish3 failed, we need to resume the domain, but once we enter
|
|
|
|
* post-copy mode there's no way back, so let's just mark the domain
|
|
|
|
* as broken in that case
|
|
|
|
*/
|
|
|
|
if (postcopy) {
|
|
|
|
qemuMigrationPostcopyFailed(driver, vm);
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Resuming domain %s after failed migration",
|
|
|
|
vm->def->name);
|
2017-04-04 18:54:57 +00:00
|
|
|
resume = true;
|
2016-01-13 15:29:58 +00:00
|
|
|
}
|
|
|
|
break;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
/* migration completed, we need to kill the domain here */
|
2016-11-23 13:07:53 +00:00
|
|
|
*stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2016-01-14 14:55:18 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2017-04-04 18:54:57 +00:00
|
|
|
if (resume) {
|
|
|
|
/* resume the domain but only if it was paused as a result of
|
|
|
|
* migration
|
|
|
|
*/
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-04 18:54:57 +00:00
|
|
|
|
2017-04-05 12:48:43 +00:00
|
|
|
qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_NONE);
|
2011-07-19 00:27:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuProcessRecoverJob(virQEMUDriverPtr driver,
|
2011-07-04 21:33:39 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
2016-11-23 13:07:53 +00:00
|
|
|
const struct qemuDomainJobObj *job,
|
|
|
|
unsigned int *stopFlags)
|
2011-07-04 21:33:39 +00:00
|
|
|
{
|
2011-07-19 00:27:39 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-07-04 21:33:39 +00:00
|
|
|
virDomainState state;
|
|
|
|
int reason;
|
|
|
|
|
|
|
|
state = virDomainObjGetState(vm, &reason);
|
|
|
|
|
|
|
|
switch (job->asyncJob) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
2016-01-14 14:55:18 +00:00
|
|
|
if (qemuProcessRecoverMigrationOut(driver, vm, conn, job->phase,
|
2016-11-23 13:07:53 +00:00
|
|
|
state, reason, stopFlags) < 0)
|
2016-01-14 14:55:18 +00:00
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
2016-01-14 14:55:18 +00:00
|
|
|
if (qemuProcessRecoverMigrationIn(driver, vm, conn, job->phase,
|
|
|
|
state, reason) < 0)
|
2011-07-19 00:27:33 +00:00
|
|
|
return -1;
|
2011-07-04 21:33:39 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
2012-10-08 14:34:19 +00:00
|
|
|
case QEMU_ASYNC_JOB_SNAPSHOT:
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:39 +00:00
|
|
|
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
2011-07-04 21:33:39 +00:00
|
|
|
/* resume the domain but only if it was paused as a result of
|
2012-10-08 14:34:19 +00:00
|
|
|
* running a migration-to-file operation. Although we are
|
|
|
|
* recovering an async job, this function is run at startup
|
|
|
|
* and must resume things using sync monitor connections. */
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_DUMP) ||
|
|
|
|
(job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_SAVE) ||
|
|
|
|
(job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT &&
|
2017-01-05 09:34:17 +00:00
|
|
|
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
|
2012-10-08 14:34:19 +00:00
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
|
VIR_WARN("Could not resume domain '%s' after migration to file",
|
|
|
|
vm->def->name);
|
2011-07-04 21:33:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2015-10-21 08:55:43 +00:00
|
|
|
case QEMU_ASYNC_JOB_START:
|
|
|
|
/* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */
|
|
|
|
break;
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_ASYNC_JOB_NONE:
|
|
|
|
case QEMU_ASYNC_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm))
|
|
|
|
return -1;
|
|
|
|
|
2012-04-06 17:42:34 +00:00
|
|
|
/* In case any special handling is added for job type that has been ignored
|
|
|
|
* before, QEMU_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated
|
|
|
|
* for the job to be properly tracked in domain state XML.
|
|
|
|
*/
|
2011-07-04 21:33:39 +00:00
|
|
|
switch (job->active) {
|
|
|
|
case QEMU_JOB_QUERY:
|
|
|
|
/* harmless */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_JOB_DESTROY:
|
|
|
|
VIR_DEBUG("Domain %s should have already been destroyed",
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
case QEMU_JOB_SUSPEND:
|
|
|
|
/* mostly harmless */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_JOB_MODIFY:
|
|
|
|
/* XXX depending on the command we may be in an inconsistent state and
|
|
|
|
* we should probably fall back to "monitor error" state and refuse to
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
2011-07-19 00:27:36 +00:00
|
|
|
case QEMU_JOB_MIGRATION_OP:
|
2011-07-19 00:27:39 +00:00
|
|
|
case QEMU_JOB_ABORT:
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_JOB_ASYNC:
|
|
|
|
case QEMU_JOB_ASYNC_NESTED:
|
|
|
|
/* async job was already handled above */
|
|
|
|
case QEMU_JOB_NONE:
|
|
|
|
case QEMU_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-19 13:08:29 +00:00
|
|
|
static int
|
|
|
|
qemuProcessUpdateDevices(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainDeviceDef dev;
|
2016-08-16 10:20:56 +00:00
|
|
|
const char **qemuDevices;
|
2013-07-19 13:08:29 +00:00
|
|
|
char **old;
|
|
|
|
char **tmp;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
old = priv->qemuDevices;
|
|
|
|
priv->qemuDevices = NULL;
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
2013-07-19 13:08:29 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-16 10:20:56 +00:00
|
|
|
qemuDevices = (const char **) priv->qemuDevices;
|
2013-07-19 13:08:29 +00:00
|
|
|
if ((tmp = old)) {
|
|
|
|
while (*tmp) {
|
2016-11-25 08:18:35 +00:00
|
|
|
if (!virStringListHasString(qemuDevices, *tmp) &&
|
2014-12-16 14:50:20 +00:00
|
|
|
virDomainDefFindDevice(vm->def, *tmp, &dev, false) == 0 &&
|
|
|
|
qemuDomainRemoveDevice(driver, vm, &dev) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2013-07-19 13:08:29 +00:00
|
|
|
tmp++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2016-11-25 08:18:35 +00:00
|
|
|
virStringListFree(old);
|
2013-07-19 13:08:29 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-28 13:30:31 +00:00
|
|
|
static int
|
|
|
|
qemuDomainPerfRestart(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2016-04-27 13:22:33 +00:00
|
|
|
if (!(priv->perf = virPerfNew()))
|
2016-03-28 13:30:31 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < VIR_PERF_EVENT_LAST; i++) {
|
2016-06-28 12:37:29 +00:00
|
|
|
if (def->perf.events[i] &&
|
|
|
|
def->perf.events[i] == VIR_TRISTATE_BOOL_YES) {
|
2016-04-27 12:58:32 +00:00
|
|
|
|
|
|
|
/* Failure to re-enable the perf event should not be fatal */
|
|
|
|
if (virPerfEventEnable(priv->perf, i, vm->pid) < 0)
|
2016-06-28 12:37:29 +00:00
|
|
|
def->perf.events[i] = VIR_TRISTATE_BOOL_NO;
|
2016-03-28 13:30:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-31 15:49:49 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
qemuProcessReconnectCheckMemAliasOrderMismatch(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int aliasidx;
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!virDomainDefHasMemoryHotplug(def) || def->nmems == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nmems; i++) {
|
|
|
|
aliasidx = qemuDomainDeviceAliasIndex(&def->mems[i]->info, "dimm");
|
|
|
|
|
|
|
|
if (def->mems[i]->info.addr.dimm.slot != aliasidx) {
|
|
|
|
priv->memAliasOrderMismatch = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-07 11:38:14 +00:00
|
|
|
static bool
|
2017-06-07 12:47:37 +00:00
|
|
|
qemuProcessNeedHugepagesPath(virDomainDefPtr def,
|
|
|
|
virDomainMemoryDefPtr mem)
|
2017-06-07 11:38:14 +00:00
|
|
|
{
|
|
|
|
const long system_pagesize = virGetSystemPageSizeKB();
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (def->mem.source == VIR_DOMAIN_MEMORY_SOURCE_FILE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < def->mem.nhugepages; i++) {
|
|
|
|
if (def->mem.hugepages[i].size != system_pagesize)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < def->nmems; i++) {
|
|
|
|
if (def->mems[i]->model == VIR_DOMAIN_MEMORY_MODEL_DIMM &&
|
|
|
|
def->mems[i]->pagesize &&
|
|
|
|
def->mems[i]->pagesize != system_pagesize)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-07 12:47:37 +00:00
|
|
|
if (mem &&
|
|
|
|
mem->model == VIR_DOMAIN_MEMORY_MODEL_DIMM &&
|
|
|
|
mem->pagesize &&
|
|
|
|
mem->pagesize != system_pagesize)
|
|
|
|
return true;
|
|
|
|
|
2017-06-07 11:38:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-07 12:47:37 +00:00
|
|
|
int
|
2016-11-22 12:21:51 +00:00
|
|
|
qemuProcessBuildDestroyHugepagesPath(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2017-06-07 12:47:37 +00:00
|
|
|
virDomainMemoryDefPtr mem,
|
2016-11-22 12:21:51 +00:00
|
|
|
bool build)
|
|
|
|
{
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
char *hugepagePath = NULL;
|
|
|
|
size_t i;
|
2017-06-07 11:38:14 +00:00
|
|
|
bool shouldBuild = false;
|
2016-11-22 12:21:51 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
2017-06-07 11:38:14 +00:00
|
|
|
if (build)
|
2017-06-07 12:47:37 +00:00
|
|
|
shouldBuild = qemuProcessNeedHugepagesPath(vm->def, mem);
|
2017-06-07 11:38:14 +00:00
|
|
|
|
|
|
|
if (!build || shouldBuild) {
|
2016-11-22 12:21:51 +00:00
|
|
|
for (i = 0; i < cfg->nhugetlbfs; i++) {
|
|
|
|
VIR_FREE(hugepagePath);
|
|
|
|
hugepagePath = qemuGetDomainHugepagePath(vm->def, &cfg->hugetlbfs[i]);
|
|
|
|
|
|
|
|
if (!hugepagePath)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (build) {
|
2017-06-07 12:47:37 +00:00
|
|
|
if (virFileExists(hugepagePath)) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-11-22 12:21:51 +00:00
|
|
|
if (virFileMakePathWithMode(hugepagePath, 0700) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to create %s"),
|
|
|
|
hugepagePath);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver->securityManager,
|
|
|
|
vm->def, hugepagePath) < 0) {
|
2016-11-22 12:21:51 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Unable to set huge path in security driver"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
2017-06-20 15:55:21 +00:00
|
|
|
if (rmdir(hugepagePath) < 0 &&
|
|
|
|
errno != ENOENT)
|
2016-11-22 12:21:51 +00:00
|
|
|
VIR_WARN("Unable to remove hugepage path: %s (errno=%d)",
|
|
|
|
hugepagePath, errno);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(hugepagePath);
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
|
|
|
qemuProcessVNCAllocatePorts(virQEMUDriverPtr driver,
|
|
|
|
virDomainGraphicsDefPtr graphics,
|
|
|
|
bool allocate)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
unsigned short port;
|
2014-11-05 13:28:57 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!allocate) {
|
|
|
|
if (graphics->data.vnc.autoport)
|
|
|
|
graphics->data.vnc.port = 5900;
|
2014-12-04 15:13:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-06-15 11:48:19 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (graphics->data.vnc.autoport) {
|
|
|
|
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
|
|
|
|
return -1;
|
|
|
|
graphics->data.vnc.port = port;
|
|
|
|
}
|
2016-02-12 08:58:22 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (graphics->data.vnc.websocket == -1) {
|
|
|
|
if (virPortAllocatorAcquire(driver->webSocketPorts, &port) < 0)
|
|
|
|
return -1;
|
|
|
|
graphics->data.vnc.websocket = port;
|
|
|
|
graphics->data.vnc.websocketGenerated = true;
|
|
|
|
}
|
2014-12-03 13:22:05 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-03-29 22:22:46 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSPICEAllocatePorts(virQEMUDriverPtr driver,
|
|
|
|
virDomainGraphicsDefPtr graphics,
|
|
|
|
bool allocate)
|
|
|
|
{
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
unsigned short port = 0;
|
|
|
|
unsigned short tlsPort;
|
|
|
|
size_t i;
|
|
|
|
int defaultMode = graphics->data.spice.defaultMode;
|
|
|
|
int ret = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
bool needTLSPort = false;
|
|
|
|
bool needPort = false;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (graphics->data.spice.autoport) {
|
|
|
|
/* check if tlsPort or port need allocation */
|
|
|
|
for (i = 0; i < VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_LAST; i++) {
|
|
|
|
switch (graphics->data.spice.channels[i]) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
|
|
|
|
needTLSPort = true;
|
|
|
|
break;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
|
|
|
|
needPort = true;
|
|
|
|
break;
|
2013-05-03 18:07:29 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
|
|
|
|
/* default mode will be used */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch (defaultMode) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
|
|
|
|
needTLSPort = true;
|
|
|
|
break;
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
|
|
|
|
needPort = true;
|
|
|
|
break;
|
2016-03-28 13:30:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
|
|
|
|
if (cfg->spiceTLS)
|
|
|
|
needTLSPort = true;
|
|
|
|
needPort = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-05-03 18:07:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!allocate) {
|
|
|
|
if (needPort || graphics->data.spice.port == -1)
|
|
|
|
graphics->data.spice.port = 5901;
|
2013-05-03 18:07:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (needTLSPort || graphics->data.spice.tlsPort == -1)
|
|
|
|
graphics->data.spice.tlsPort = 5902;
|
2014-05-16 13:16:18 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
2013-02-19 12:27:43 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (needPort || graphics->data.spice.port == -1) {
|
|
|
|
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
|
|
|
|
goto cleanup;
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
graphics->data.spice.port = port;
|
|
|
|
|
|
|
|
if (!graphics->data.spice.autoport)
|
|
|
|
graphics->data.spice.portReserved = true;
|
2011-07-12 09:45:16 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (needTLSPort || graphics->data.spice.tlsPort == -1) {
|
|
|
|
if (!cfg->spiceTLS) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Auto allocation of spice TLS port requested "
|
|
|
|
"but spice TLS is disabled in qemu.conf"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-05-04 11:55:38 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virPortAllocatorAcquire(driver->remotePorts, &tlsPort) < 0)
|
|
|
|
goto cleanup;
|
2011-09-27 12:56:17 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
graphics->data.spice.tlsPort = tlsPort;
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!graphics->data.spice.autoport)
|
|
|
|
graphics->data.spice.tlsPortReserved = true;
|
2016-11-03 20:33:32 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
ret = 0;
|
2015-05-05 11:24:41 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
cleanup:
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
|
|
|
}
|
2016-08-05 12:48:27 +00:00
|
|
|
|
2017-06-06 05:39:25 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
|
|
|
qemuValidateCpuCount(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
unsigned int maxCpus = virQEMUCapsGetMachineMaxCpus(qemuCaps, def->os.machine);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virDomainDefGetVcpus(def) == 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Domain requires at least 1 vCPU"));
|
|
|
|
return -1;
|
|
|
|
}
|
2011-07-04 06:27:12 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (maxCpus > 0 && virDomainDefGetVcpusMax(def) > maxCpus) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Maximum CPUs greater than specified machine type limit"));
|
|
|
|
return -1;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-09-13 13:49:50 +00:00
|
|
|
|
2017-03-15 12:03:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
|
|
|
qemuProcessVerifyHypervFeatures(virDomainDefPtr def,
|
|
|
|
virCPUDataPtr cpu)
|
|
|
|
{
|
|
|
|
char *cpuFeature;
|
|
|
|
size_t i;
|
|
|
|
int rc;
|
2016-10-31 15:49:49 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
for (i = 0; i < VIR_DOMAIN_HYPERV_LAST; i++) {
|
|
|
|
/* always supported string property */
|
|
|
|
if (i == VIR_DOMAIN_HYPERV_VENDOR_ID)
|
|
|
|
continue;
|
2016-01-08 16:03:48 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (def->hyperv_features[i] != VIR_TRISTATE_SWITCH_ON)
|
|
|
|
continue;
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virAsprintf(&cpuFeature, "__kvm_hv_%s",
|
|
|
|
virDomainHypervTypeToString(i)) < 0)
|
|
|
|
return -1;
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = virCPUDataCheckFeature(cpu, cpuFeature);
|
|
|
|
VIR_FREE(cpuFeature);
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
|
|
|
else if (rc == 1)
|
|
|
|
continue;
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
switch ((virDomainHyperv) i) {
|
|
|
|
case VIR_DOMAIN_HYPERV_RELAXED:
|
|
|
|
case VIR_DOMAIN_HYPERV_VAPIC:
|
|
|
|
case VIR_DOMAIN_HYPERV_SPINLOCKS:
|
|
|
|
VIR_WARN("host doesn't support hyperv '%s' feature",
|
|
|
|
virDomainHypervTypeToString(i));
|
|
|
|
break;
|
2012-10-31 19:03:55 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_VPINDEX:
|
|
|
|
case VIR_DOMAIN_HYPERV_RUNTIME:
|
|
|
|
case VIR_DOMAIN_HYPERV_SYNIC:
|
|
|
|
case VIR_DOMAIN_HYPERV_STIMER:
|
|
|
|
case VIR_DOMAIN_HYPERV_RESET:
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("host doesn't support hyperv '%s' feature"),
|
|
|
|
virDomainHypervTypeToString(i));
|
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* coverity[dead_error_begin] */
|
|
|
|
case VIR_DOMAIN_HYPERV_VENDOR_ID:
|
|
|
|
case VIR_DOMAIN_HYPERV_LAST:
|
|
|
|
break;
|
2011-09-21 19:02:44 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2017-07-11 13:53:58 +00:00
|
|
|
|
|
|
|
return 0;
|
2011-08-16 10:51:36 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2013-01-11 13:54:15 +00:00
|
|
|
static int
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessVerifyKVMFeatures(virDomainDefPtr def,
|
|
|
|
virCPUDataPtr cpu)
|
2011-08-16 10:51:36 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
int rc = 0;
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (def->features[VIR_DOMAIN_FEATURE_PVSPINLOCK] != VIR_TRISTATE_SWITCH_ON)
|
2013-10-28 11:49:18 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = virCPUDataCheckFeature(cpu, VIR_CPU_x86_KVM_PV_UNHALT);
|
|
|
|
|
|
|
|
if (rc <= 0) {
|
|
|
|
if (rc == 0)
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("host doesn't support paravirtual spinlocks"));
|
2013-01-11 13:54:15 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
|
|
|
qemuProcessVerifyCPUFeatures(virDomainDefPtr def,
|
|
|
|
virCPUDataPtr cpu)
|
|
|
|
{
|
|
|
|
int rc;
|
2011-09-16 13:44:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = virCPUCheckFeature(def->os.arch, def->cpu, "invtsc");
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (rc < 0) {
|
2014-12-03 13:22:05 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
} else if (rc == 1) {
|
|
|
|
rc = virCPUDataCheckFeature(cpu, "invtsc");
|
|
|
|
if (rc <= 0) {
|
|
|
|
if (rc == 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("host doesn't support invariant TSC"));
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-03 13:22:05 +00:00
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2013-01-11 13:54:15 +00:00
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-09 12:30:27 +00:00
|
|
|
static int
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessFetchGuestCPU(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
virCPUDataPtr *enabled,
|
|
|
|
virCPUDataPtr *disabled)
|
2013-04-30 14:26:43 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCPUDataPtr dataEnabled = NULL;
|
|
|
|
virCPUDataPtr dataDisabled = NULL;
|
|
|
|
int rc;
|
2013-04-30 14:26:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
*enabled = NULL;
|
|
|
|
*disabled = NULL;
|
2016-04-25 13:24:48 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!ARCH_IS_X86(vm->def->os.arch))
|
2016-04-25 13:24:48 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto error;
|
2013-04-30 14:26:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = qemuMonitorGetGuestCPU(priv->mon, vm->def->os.arch,
|
|
|
|
&dataEnabled, &dataDisabled);
|
2013-04-30 14:26:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (rc == -1)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
*enabled = dataEnabled;
|
|
|
|
*disabled = dataDisabled;
|
2013-04-30 14:26:43 +00:00
|
|
|
return 0;
|
2017-07-11 13:53:58 +00:00
|
|
|
|
|
|
|
error:
|
|
|
|
virCPUDataFree(dataEnabled);
|
|
|
|
virCPUDataFree(dataDisabled);
|
|
|
|
return -1;
|
2013-04-30 14:26:43 +00:00
|
|
|
}
|
2013-04-22 14:26:57 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2016-05-09 12:30:27 +00:00
|
|
|
static int
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessVerifyCPU(virDomainObjPtr vm,
|
|
|
|
virCPUDataPtr cpu)
|
2013-04-22 14:26:57 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
virDomainDefPtr def = vm->def;
|
2013-04-23 05:01:38 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!cpu)
|
|
|
|
return 0;
|
2017-07-11 11:26:12 +00:00
|
|
|
|
|
|
|
if (qemuProcessVerifyKVMFeatures(def, cpu) < 0 ||
|
|
|
|
qemuProcessVerifyHypervFeatures(def, cpu) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!def->cpu ||
|
|
|
|
(def->cpu->mode == VIR_CPU_MODE_CUSTOM &&
|
|
|
|
!def->cpu->model))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuProcessVerifyCPUFeatures(def, cpu) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 11:51:17 +00:00
|
|
|
static int
|
|
|
|
qemuProcessUpdateLiveGuestCPU(virDomainObjPtr vm,
|
|
|
|
virCPUDataPtr enabled,
|
|
|
|
virCPUDataPtr disabled)
|
|
|
|
{
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCPUDefPtr orig = NULL;
|
|
|
|
int rc;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!def->cpu ||
|
|
|
|
(def->cpu->mode == VIR_CPU_MODE_CUSTOM &&
|
|
|
|
!def->cpu->model))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(orig = virCPUDefCopy(def->cpu)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if ((rc = virCPUUpdateLive(def->os.arch, def->cpu, enabled, disabled)) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
} else if (rc == 0) {
|
|
|
|
/* Store the original CPU in priv if QEMU changed it and we didn't
|
|
|
|
* get the original CPU via migration, restore, or snapshot revert.
|
|
|
|
*/
|
|
|
|
if (!priv->origCPU && !virCPUDefIsEqual(def->cpu, orig, false))
|
|
|
|
VIR_STEAL_PTR(priv->origCPU, orig);
|
|
|
|
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_FULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCPUDefFree(orig);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-10 23:45:37 +00:00
|
|
|
static int
|
2017-07-11 11:30:09 +00:00
|
|
|
qemuProcessUpdateAndVerifyCPU(virQEMUDriverPtr driver,
|
2017-03-13 11:32:02 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2013-10-14 09:35:00 +00:00
|
|
|
{
|
2017-03-10 23:45:37 +00:00
|
|
|
virCPUDataPtr cpu = NULL;
|
2017-03-13 11:32:02 +00:00
|
|
|
virCPUDataPtr disabled = NULL;
|
2017-03-10 23:45:37 +00:00
|
|
|
int ret = -1;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2017-07-11 11:18:45 +00:00
|
|
|
if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0)
|
|
|
|
goto cleanup;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2017-07-11 11:26:12 +00:00
|
|
|
if (qemuProcessVerifyCPU(vm, cpu) < 0)
|
|
|
|
goto cleanup;
|
2014-05-06 11:55:44 +00:00
|
|
|
|
2017-07-11 11:51:17 +00:00
|
|
|
if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0)
|
|
|
|
goto cleanup;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2017-03-10 23:45:37 +00:00
|
|
|
ret = 0;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2017-03-10 23:45:37 +00:00
|
|
|
virCPUDataFree(cpu);
|
2017-03-13 11:32:02 +00:00
|
|
|
virCPUDataFree(disabled);
|
2013-10-14 09:35:00 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 12:16:40 +00:00
|
|
|
static int
|
|
|
|
qemuProcessUpdateCPU(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
virCPUDataPtr cpu = NULL;
|
|
|
|
virCPUDataPtr disabled = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCPUDataFree(cpu);
|
|
|
|
virCPUDataFree(disabled);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-08-07 14:59:21 +00:00
|
|
|
static int
|
|
|
|
qemuPrepareNVRAM(virQEMUDriverConfigPtr cfg,
|
2016-03-09 15:10:54 +00:00
|
|
|
virDomainObjPtr vm)
|
2014-08-07 14:59:21 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
int srcFD = -1;
|
|
|
|
int dstFD = -1;
|
2014-11-12 17:09:46 +00:00
|
|
|
virDomainLoaderDefPtr loader = vm->def->os.loader;
|
2014-08-07 14:59:21 +00:00
|
|
|
bool created = false;
|
2016-03-09 15:10:54 +00:00
|
|
|
const char *master_nvram_path;
|
|
|
|
ssize_t r;
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
if (!loader || !loader->nvram || virFileExists(loader->nvram))
|
2014-08-07 14:59:21 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
master_nvram_path = loader->templt;
|
|
|
|
if (!loader->templt) {
|
|
|
|
size_t i;
|
2016-05-17 22:45:27 +00:00
|
|
|
for (i = 0; i < cfg->nfirmwares; i++) {
|
|
|
|
if (STREQ(cfg->firmwares[i]->name, loader->path)) {
|
|
|
|
master_nvram_path = cfg->firmwares[i]->nvram;
|
2016-03-09 15:10:54 +00:00
|
|
|
break;
|
2014-08-07 14:59:21 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-09 15:10:54 +00:00
|
|
|
}
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
if (!master_nvram_path) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("unable to find any master var store for "
|
|
|
|
"loader: %s"), loader->path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
if ((srcFD = virFileOpenAs(master_nvram_path, O_RDONLY,
|
|
|
|
0, -1, -1, 0)) < 0) {
|
|
|
|
virReportSystemError(-srcFD,
|
|
|
|
_("Failed to open file '%s'"),
|
|
|
|
master_nvram_path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if ((dstFD = virFileOpenAs(loader->nvram,
|
|
|
|
O_WRONLY | O_CREAT | O_EXCL,
|
|
|
|
S_IRUSR | S_IWUSR,
|
|
|
|
cfg->user, cfg->group, 0)) < 0) {
|
|
|
|
virReportSystemError(-dstFD,
|
|
|
|
_("Failed to create file '%s'"),
|
|
|
|
loader->nvram);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
created = true;
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
do {
|
|
|
|
char buf[1024];
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
if ((r = saferead(srcFD, buf, sizeof(buf))) < 0) {
|
2014-08-07 14:59:21 +00:00
|
|
|
virReportSystemError(errno,
|
2016-03-09 15:10:54 +00:00
|
|
|
_("Unable to read from file '%s'"),
|
2014-08-07 14:59:21 +00:00
|
|
|
master_nvram_path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2016-03-09 15:10:54 +00:00
|
|
|
|
|
|
|
if (safewrite(dstFD, buf, r) < 0) {
|
2014-08-07 14:59:21 +00:00
|
|
|
virReportSystemError(errno,
|
2016-03-09 15:10:54 +00:00
|
|
|
_("Unable to write to file '%s'"),
|
2014-08-07 14:59:21 +00:00
|
|
|
loader->nvram);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2016-03-09 15:10:54 +00:00
|
|
|
} while (r);
|
|
|
|
|
|
|
|
if (VIR_CLOSE(srcFD) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to close file '%s'"),
|
|
|
|
master_nvram_path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (VIR_CLOSE(dstFD) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to close file '%s'"),
|
|
|
|
loader->nvram);
|
|
|
|
goto cleanup;
|
2014-08-07 14:59:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
/* We successfully generated the nvram path, but failed to
|
|
|
|
* copy the file content. Roll back. */
|
|
|
|
if (ret < 0) {
|
|
|
|
if (created)
|
|
|
|
unlink(loader->nvram);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(srcFD);
|
|
|
|
VIR_FORCE_CLOSE(dstFD);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-02-02 10:28:30 +00:00
|
|
|
static void
|
|
|
|
qemuLogOperation(virDomainObjPtr vm,
|
|
|
|
const char *msg,
|
2015-11-12 13:02:46 +00:00
|
|
|
virCommandPtr cmd,
|
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2015-02-02 10:28:30 +00:00
|
|
|
{
|
|
|
|
char *timestamp;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int qemuVersion = virQEMUCapsGetVersion(priv->qemuCaps);
|
|
|
|
const char *package = virQEMUCapsGetPackage(priv->qemuCaps);
|
2015-12-04 17:25:22 +00:00
|
|
|
char *hostname = virGetHostname();
|
2015-02-02 10:28:30 +00:00
|
|
|
|
|
|
|
if ((timestamp = virTimeStringNow()) == NULL)
|
2015-11-12 13:02:46 +00:00
|
|
|
goto cleanup;
|
2015-02-02 10:28:30 +00:00
|
|
|
|
2015-12-04 17:25:22 +00:00
|
|
|
if (qemuDomainLogContextWrite(logCtxt,
|
|
|
|
"%s: %s %s, qemu version: %d.%d.%d%s, hostname: %s\n",
|
2015-11-12 13:02:46 +00:00
|
|
|
timestamp, msg, VIR_LOG_VERSION_STRING,
|
|
|
|
(qemuVersion / 1000000) % 1000,
|
|
|
|
(qemuVersion / 1000) % 1000,
|
|
|
|
qemuVersion % 1000,
|
2015-12-04 17:25:22 +00:00
|
|
|
package ? package : "",
|
|
|
|
hostname ? hostname : "") < 0)
|
2015-11-12 13:02:46 +00:00
|
|
|
goto cleanup;
|
2015-02-02 10:28:30 +00:00
|
|
|
|
2015-11-12 13:02:46 +00:00
|
|
|
if (cmd) {
|
|
|
|
char *args = virCommandToString(cmd);
|
|
|
|
qemuDomainLogContextWrite(logCtxt, "%s\n", args);
|
|
|
|
VIR_FREE(args);
|
|
|
|
}
|
2015-02-02 10:28:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2015-12-04 17:25:22 +00:00
|
|
|
VIR_FREE(hostname);
|
2015-02-02 10:28:30 +00:00
|
|
|
VIR_FREE(timestamp);
|
|
|
|
}
|
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr inc)
|
|
|
|
{
|
|
|
|
if (!inc)
|
|
|
|
return;
|
|
|
|
|
2016-01-07 23:07:37 +00:00
|
|
|
VIR_FREE(inc->address);
|
2015-11-06 17:41:37 +00:00
|
|
|
VIR_FREE(inc->launchURI);
|
2015-11-11 17:02:23 +00:00
|
|
|
VIR_FREE(inc->deferredURI);
|
2015-11-06 17:41:37 +00:00
|
|
|
VIR_FREE(inc);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function does not copy @path, the caller is responsible for keeping
|
|
|
|
* the @path pointer valid during the lifetime of the allocated
|
|
|
|
* qemuProcessIncomingDef structure.
|
2017-06-19 15:00:28 +00:00
|
|
|
*
|
|
|
|
* The caller is responsible for closing @fd, calling
|
|
|
|
* qemuProcessIncomingDefFree will NOT close it.
|
2015-11-06 17:41:37 +00:00
|
|
|
*/
|
|
|
|
qemuProcessIncomingDefPtr
|
|
|
|
qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
|
2016-01-07 23:07:37 +00:00
|
|
|
const char *listenAddress,
|
2015-11-06 17:41:37 +00:00
|
|
|
const char *migrateFrom,
|
|
|
|
int fd,
|
|
|
|
const char *path)
|
|
|
|
{
|
|
|
|
qemuProcessIncomingDefPtr inc = NULL;
|
|
|
|
|
|
|
|
if (qemuMigrationCheckIncoming(qemuCaps, migrateFrom) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(inc) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2016-01-07 23:07:37 +00:00
|
|
|
if (VIR_STRDUP(inc->address, listenAddress) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
inc->launchURI = qemuMigrationIncomingURI(migrateFrom, fd);
|
|
|
|
if (!inc->launchURI)
|
|
|
|
goto error;
|
|
|
|
|
2015-11-11 17:02:23 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_INCOMING_DEFER)) {
|
|
|
|
inc->deferredURI = inc->launchURI;
|
|
|
|
if (VIR_STRDUP(inc->launchURI, "defer") < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
inc->fd = fd;
|
|
|
|
inc->path = path;
|
|
|
|
|
|
|
|
return inc;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuProcessIncomingDefFree(inc);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-21 08:55:43 +00:00
|
|
|
/*
|
|
|
|
* This function starts a new QEMU_ASYNC_JOB_START async job. The user is
|
|
|
|
* responsible for calling qemuProcessEndJob to stop this job and for passing
|
|
|
|
* QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this
|
|
|
|
* parameter between qemuProcessBeginJob and qemuProcessEndJob.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessBeginJob(virQEMUDriverPtr driver,
|
2017-04-26 10:00:09 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainJobOperation operation)
|
2015-10-21 08:55:43 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2017-04-26 10:00:09 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
|
|
|
|
operation) < 0)
|
2015-10-21 08:55:43 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
|
|
|
priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
qemuProcessEndJob(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjEndAsyncJob(driver, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-30 16:59:43 +00:00
|
|
|
static int
|
|
|
|
qemuProcessStartHook(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virHookQemuOpType op,
|
|
|
|
virHookSubopType subop)
|
|
|
|
{
|
|
|
|
char *xml;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!virHookPresent(VIR_HOOK_DRIVER_QEMU))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(xml = qemuDomainDefFormatXML(driver, vm->def, 0)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name, op, subop,
|
|
|
|
NULL, xml, NULL);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-30 17:16:53 +00:00
|
|
|
static int
|
2016-05-09 12:20:08 +00:00
|
|
|
qemuProcessGraphicsReservePorts(virQEMUDriverPtr driver,
|
2016-08-13 19:01:38 +00:00
|
|
|
virDomainGraphicsDefPtr graphics)
|
2015-10-30 17:16:53 +00:00
|
|
|
{
|
2016-08-13 19:03:15 +00:00
|
|
|
virDomainGraphicsListenDefPtr glisten;
|
|
|
|
|
|
|
|
if (graphics->nListens <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
glisten = &graphics->listens[0];
|
|
|
|
|
|
|
|
if (glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS &&
|
|
|
|
glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NETWORK)
|
|
|
|
return 0;
|
|
|
|
|
2016-11-22 11:09:31 +00:00
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
|
|
|
if (!graphics->data.vnc.autoport) {
|
|
|
|
if (virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.vnc.port,
|
|
|
|
true) < 0)
|
|
|
|
return -1;
|
|
|
|
graphics->data.vnc.portReserved = true;
|
|
|
|
}
|
2016-11-22 11:09:32 +00:00
|
|
|
if (graphics->data.vnc.websocket > 0 &&
|
|
|
|
virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.vnc.websocket,
|
|
|
|
true) < 0)
|
|
|
|
return -1;
|
2016-11-22 11:09:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
|
|
|
if (graphics->data.spice.autoport)
|
|
|
|
return 0;
|
2015-10-30 17:16:53 +00:00
|
|
|
|
2016-08-13 19:01:38 +00:00
|
|
|
if (graphics->data.spice.port > 0) {
|
2015-10-30 17:16:53 +00:00
|
|
|
if (virPortAllocatorSetUsed(driver->remotePorts,
|
2016-08-13 19:01:38 +00:00
|
|
|
graphics->data.spice.port,
|
2015-10-30 17:16:53 +00:00
|
|
|
true) < 0)
|
2016-05-09 12:20:08 +00:00
|
|
|
return -1;
|
2016-08-13 19:01:38 +00:00
|
|
|
graphics->data.spice.portReserved = true;
|
|
|
|
}
|
2015-10-30 17:16:53 +00:00
|
|
|
|
2016-08-13 19:01:38 +00:00
|
|
|
if (graphics->data.spice.tlsPort > 0) {
|
|
|
|
if (virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.spice.tlsPort,
|
|
|
|
true) < 0)
|
|
|
|
return -1;
|
|
|
|
graphics->data.spice.tlsPortReserved = true;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
2016-11-22 11:09:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
|
|
|
|
2016-05-09 12:20:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-13 18:58:55 +00:00
|
|
|
static int
|
|
|
|
qemuProcessGraphicsAllocatePorts(virQEMUDriverPtr driver,
|
|
|
|
virDomainGraphicsDefPtr graphics,
|
|
|
|
bool allocate)
|
|
|
|
{
|
|
|
|
virDomainGraphicsListenDefPtr glisten;
|
|
|
|
|
|
|
|
if (graphics->nListens <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
glisten = &graphics->listens[0];
|
|
|
|
|
|
|
|
if (glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS &&
|
|
|
|
glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NETWORK)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
|
|
|
if (qemuProcessVNCAllocatePorts(driver, graphics, allocate) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
|
|
|
if (qemuProcessSPICEAllocatePorts(driver, graphics, allocate) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-18 08:52:22 +00:00
|
|
|
static int
|
|
|
|
qemuProcessGraphicsSetupNetworkAddress(virDomainGraphicsListenDefPtr glisten,
|
|
|
|
const char *listenAddr)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* TODO: reject configuration without network specified for network listen */
|
|
|
|
if (!glisten->network) {
|
|
|
|
if (VIR_STRDUP(glisten->address, listenAddr) < 0)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = networkGetNetworkAddress(glisten->network, &glisten->address);
|
|
|
|
if (rc <= -2) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("network-based listen isn't possible, "
|
|
|
|
"network driver isn't present"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-18 11:27:28 +00:00
|
|
|
static int
|
2016-08-13 18:54:58 +00:00
|
|
|
qemuProcessGraphicsSetupListen(virQEMUDriverPtr driver,
|
2016-06-08 11:30:20 +00:00
|
|
|
virDomainGraphicsDefPtr graphics,
|
|
|
|
virDomainObjPtr vm)
|
2016-05-18 11:27:28 +00:00
|
|
|
{
|
2016-06-08 11:30:20 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2016-08-13 18:54:58 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2016-06-08 11:30:20 +00:00
|
|
|
const char *type = virDomainGraphicsTypeToString(graphics->type);
|
2016-05-18 11:27:28 +00:00
|
|
|
char *listenAddr = NULL;
|
2016-06-08 11:30:20 +00:00
|
|
|
bool useSocket = false;
|
2016-05-18 11:27:28 +00:00
|
|
|
size_t i;
|
2016-08-13 18:54:58 +00:00
|
|
|
int ret = -1;
|
2016-05-18 11:27:28 +00:00
|
|
|
|
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
2016-06-08 11:30:20 +00:00
|
|
|
useSocket = cfg->vncAutoUnixSocket;
|
2016-05-18 11:27:28 +00:00
|
|
|
listenAddr = cfg->vncListen;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
2016-05-18 12:11:20 +00:00
|
|
|
useSocket = cfg->spiceAutoUnixSocket;
|
2016-05-18 11:27:28 +00:00
|
|
|
listenAddr = cfg->spiceListen;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < graphics->nListens; i++) {
|
|
|
|
virDomainGraphicsListenDefPtr glisten = &graphics->listens[i];
|
|
|
|
|
|
|
|
switch (glisten->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS:
|
2016-06-08 11:30:20 +00:00
|
|
|
if (!glisten->address) {
|
|
|
|
/* If there is no address specified and qemu.conf has
|
|
|
|
* *_auto_unix_socket set we should use unix socket as
|
|
|
|
* default instead of tcp listen. */
|
|
|
|
if (useSocket) {
|
2016-06-08 13:18:25 +00:00
|
|
|
memset(glisten, 0, sizeof(virDomainGraphicsListenDef));
|
|
|
|
if (virAsprintf(&glisten->socket, "%s/%s.sock",
|
2016-06-08 11:30:20 +00:00
|
|
|
priv->libDir, type) < 0)
|
2016-08-13 18:54:58 +00:00
|
|
|
goto cleanup;
|
2016-06-08 13:18:25 +00:00
|
|
|
glisten->fromConfig = true;
|
|
|
|
glisten->type = VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_SOCKET;
|
2016-06-08 11:30:20 +00:00
|
|
|
} else if (listenAddr) {
|
|
|
|
if (VIR_STRDUP(glisten->address, listenAddr) < 0)
|
2016-08-13 18:54:58 +00:00
|
|
|
goto cleanup;
|
2016-06-08 11:30:20 +00:00
|
|
|
glisten->fromConfig = true;
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 11:27:28 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NETWORK:
|
|
|
|
if (glisten->address || !listenAddr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessGraphicsSetupNetworkAddress(glisten,
|
|
|
|
listenAddr) < 0)
|
2016-08-13 18:54:58 +00:00
|
|
|
goto cleanup;
|
2016-05-18 11:27:28 +00:00
|
|
|
break;
|
|
|
|
|
2016-06-08 08:35:37 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_SOCKET:
|
|
|
|
if (!glisten->socket) {
|
|
|
|
if (virAsprintf(&glisten->socket, "%s/%s.sock",
|
|
|
|
priv->libDir, type) < 0)
|
2016-08-13 18:54:58 +00:00
|
|
|
goto cleanup;
|
2016-06-08 08:35:37 +00:00
|
|
|
glisten->autoGenerated = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-05-18 11:27:28 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NONE:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-13 18:54:58 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
2016-05-18 11:27:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-09 12:20:08 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupGraphics(virQEMUDriverPtr driver,
|
2016-05-09 12:30:27 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
unsigned int flags)
|
2016-05-09 12:20:08 +00:00
|
|
|
{
|
2016-08-13 19:01:38 +00:00
|
|
|
virDomainGraphicsDefPtr graphics;
|
2016-05-09 12:30:27 +00:00
|
|
|
bool allocate = !(flags & VIR_QEMU_PROCESS_START_PRETEND);
|
2016-05-18 11:27:28 +00:00
|
|
|
size_t i;
|
2016-05-09 12:20:08 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
2016-08-13 19:29:38 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
graphics = vm->def->graphics[i];
|
|
|
|
|
|
|
|
if (qemuProcessGraphicsSetupListen(driver, graphics, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-08-13 19:01:38 +00:00
|
|
|
if (allocate) {
|
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
graphics = vm->def->graphics[i];
|
|
|
|
|
|
|
|
if (qemuProcessGraphicsReservePorts(driver, graphics) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2016-05-09 12:20:08 +00:00
|
|
|
|
2015-10-30 17:16:53 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; ++i) {
|
2016-08-13 19:01:38 +00:00
|
|
|
graphics = vm->def->graphics[i];
|
2016-04-25 13:24:48 +00:00
|
|
|
|
2016-08-13 18:58:55 +00:00
|
|
|
if (qemuProcessGraphicsAllocatePorts(driver, graphics, allocate) < 0)
|
|
|
|
goto cleanup;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-02 09:35:58 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupRawIO(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virCommandPtr cmd ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
bool rawio = false;
|
|
|
|
size_t i;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* in case a certain disk is desirous of CAP_SYS_RAWIO, add this */
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDeviceDef dev;
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
|
|
|
|
if (disk->rawio == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
rawio = true;
|
|
|
|
#ifndef CAP_SYS_RAWIO
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
dev.type = VIR_DOMAIN_DEVICE_DISK;
|
|
|
|
dev.data.disk = disk;
|
|
|
|
if (qemuAddSharedDevice(driver, &dev, vm->def->name) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuSetUnprivSGIO(&dev) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If rawio not already set, check hostdevs as well */
|
|
|
|
if (!rawio) {
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++) {
|
2016-11-15 18:25:41 +00:00
|
|
|
if (!virHostdevIsSCSIDevice(vm->def->hostdevs[i]))
|
2016-11-15 18:25:39 +00:00
|
|
|
continue;
|
|
|
|
|
2015-11-02 09:35:58 +00:00
|
|
|
virDomainHostdevSubsysSCSIPtr scsisrc =
|
|
|
|
&vm->def->hostdevs[i]->source.subsys.u.scsi;
|
|
|
|
if (scsisrc->rawio == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
rawio = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (rawio) {
|
|
|
|
#ifdef CAP_SYS_RAWIO
|
|
|
|
if (ret == 0)
|
|
|
|
virCommandAllowCap(cmd, CAP_SYS_RAWIO);
|
|
|
|
#else
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Raw I/O is not supported on this platform"));
|
|
|
|
ret = -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-02 10:00:49 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupBalloon(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
unsigned long long balloon = vm->def->mem.cur_balloon;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
2016-04-06 13:02:31 +00:00
|
|
|
if (!virDomainDefHasMemballoon(vm->def))
|
2015-11-02 10:00:49 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2017-07-19 06:40:24 +00:00
|
|
|
return -1;
|
2015-11-02 10:00:49 +00:00
|
|
|
|
2016-07-25 15:07:38 +00:00
|
|
|
if (vm->def->memballoon->period)
|
|
|
|
qemuMonitorSetMemoryStatsPeriod(priv->mon, vm->def->memballoon,
|
|
|
|
vm->def->memballoon->period);
|
2015-11-02 10:00:49 +00:00
|
|
|
if (qemuMonitorSetBalloon(priv->mon, balloon) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-02 12:39:28 +00:00
|
|
|
static int
|
|
|
|
qemuProcessMakeDir(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2016-02-26 08:15:55 +00:00
|
|
|
const char *path)
|
2015-11-02 12:39:28 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (virFileMakePathWithMode(path, 0750) < 0) {
|
|
|
|
virReportSystemError(errno, _("Cannot create directory '%s'"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver->securityManager,
|
|
|
|
vm->def, path) < 0)
|
2015-11-02 12:39:28 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-20 20:44:25 +00:00
|
|
|
static void
|
|
|
|
qemuProcessStartWarnShmem(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
bool check_shmem = false;
|
|
|
|
bool shmem = vm->def->nshmems;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For vhost-user to work, the domain has to have some type of
|
|
|
|
* shared memory configured. We're not the proper ones to judge
|
|
|
|
* whether shared hugepages or shm are enough and will be in the
|
|
|
|
* future, so we'll just warn in case neither is configured.
|
|
|
|
* Moreover failing would give the false illusion that libvirt is
|
|
|
|
* really checking that everything works before running the domain
|
|
|
|
* and not only we are unable to do that, but it's also not our
|
|
|
|
* aim to do so.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < vm->def->nnets; i++) {
|
|
|
|
if (virDomainNetGetActualType(vm->def->nets[i]) ==
|
|
|
|
VIR_DOMAIN_NET_TYPE_VHOSTUSER) {
|
|
|
|
check_shmem = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!check_shmem)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This check is by no means complete. We merely check
|
|
|
|
* whether there are *some* hugepages enabled and *some* NUMA
|
|
|
|
* nodes with shared memory access.
|
|
|
|
*/
|
|
|
|
if (!shmem && vm->def->mem.nhugepages) {
|
|
|
|
for (i = 0; i < virDomainNumaGetNodeCount(vm->def->numa); i++) {
|
|
|
|
if (virDomainNumaGetNodeMemoryAccessMode(vm->def->numa, i) ==
|
2017-02-02 13:27:30 +00:00
|
|
|
VIR_DOMAIN_MEMORY_ACCESS_SHARED) {
|
2016-04-20 20:44:25 +00:00
|
|
|
shmem = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!shmem) {
|
|
|
|
VIR_WARN("Detected vhost-user interface without any shared memory, "
|
|
|
|
"the interface might not be operational");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-20 11:49:21 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessStartValidateGraphics(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
virDomainGraphicsDefPtr graphics = vm->def->graphics[i];
|
|
|
|
|
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
|
|
|
if (graphics->nListens > 1) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("QEMU does not support multiple listens for "
|
|
|
|
"one graphics device."));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-11 15:53:50 +00:00
|
|
|
static int
|
|
|
|
qemuProcessStartValidateVideo(virDomainObjPtr vm,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
virDomainVideoDefPtr video;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nvideos; i++) {
|
|
|
|
video = vm->def->videos[i];
|
|
|
|
|
2016-10-10 15:51:38 +00:00
|
|
|
if ((video->type == VIR_DOMAIN_VIDEO_TYPE_VGA &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VGA)) ||
|
|
|
|
(video->type == VIR_DOMAIN_VIDEO_TYPE_CIRRUS &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_CIRRUS_VGA)) ||
|
|
|
|
(video->type == VIR_DOMAIN_VIDEO_TYPE_VMVGA &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VMWARE_SVGA)) ||
|
|
|
|
(video->type == VIR_DOMAIN_VIDEO_TYPE_QXL &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_QXL)) ||
|
|
|
|
(video->type == VIR_DOMAIN_VIDEO_TYPE_VIRTIO &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VIRTIO_GPU))) {
|
2016-10-11 15:53:50 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("this QEMU does not support '%s' video device"),
|
|
|
|
virDomainVideoTypeToString(video->type));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (video->accel) {
|
|
|
|
if (video->accel->accel3d == VIR_TRISTATE_SWITCH_ON &&
|
|
|
|
(video->type != VIR_DOMAIN_VIDEO_TYPE_VIRTIO ||
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_VIRTIO_GPU_VIRGL))) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("%s 3d acceleration is not supported"),
|
|
|
|
virDomainVideoTypeToString(video->type));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-12 03:14:32 +00:00
|
|
|
static int
|
|
|
|
qemuProcessStartValidateIOThreads(virDomainObjPtr vm,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (vm->def->niothreadids > 0 &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("IOThreads not supported for this QEMU"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ncontrollers; i++) {
|
|
|
|
virDomainControllerDefPtr cont = vm->def->controllers[i];
|
|
|
|
|
|
|
|
if (cont->type == VIR_DOMAIN_CONTROLLER_TYPE_SCSI &&
|
|
|
|
cont->model == VIR_DOMAIN_CONTROLLER_MODEL_SCSI_VIRTIO_SCSI &&
|
|
|
|
cont->iothread > 0 &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_VIRTIO_SCSI_IOTHREAD)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("IOThreads for virtio-scsi not supported for "
|
|
|
|
"this QEMU"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-21 13:17:10 +00:00
|
|
|
static int
|
2017-02-01 16:14:00 +00:00
|
|
|
qemuProcessStartValidateShmem(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nshmems; i++) {
|
|
|
|
virDomainShmemDefPtr shmem = vm->def->shmems[i];
|
|
|
|
|
|
|
|
if (strchr(shmem->name, '/')) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("shmem name '%s' must not contain '/'"),
|
|
|
|
shmem->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-20 20:55:48 +00:00
|
|
|
static int
|
2016-05-27 11:54:26 +00:00
|
|
|
qemuProcessStartValidateXML(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2016-04-20 20:55:48 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2016-05-27 11:54:26 +00:00
|
|
|
virCapsPtr caps,
|
2016-05-27 11:45:05 +00:00
|
|
|
unsigned int flags)
|
2016-04-20 20:55:48 +00:00
|
|
|
{
|
|
|
|
/* The bits we validate here are XML configs that we previously
|
|
|
|
* accepted. We reject them at VM startup time rather than parse
|
|
|
|
* time so that pre-existing VMs aren't rejected and dropped from
|
|
|
|
* the VM list when libvirt is updated.
|
|
|
|
*
|
|
|
|
* If back compat isn't a concern, XML validation should probably
|
|
|
|
* be done at parse time.
|
|
|
|
*/
|
|
|
|
if (qemuValidateCpuCount(vm->def, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2016-05-27 11:45:05 +00:00
|
|
|
/* checks below should not be executed when starting a qemu process for a
|
|
|
|
* VM that was running before (migration, snapshots, save). It's more
|
|
|
|
* important to start such VM than keep the configuration clean */
|
2016-05-27 10:34:14 +00:00
|
|
|
if ((flags & VIR_QEMU_PROCESS_START_NEW) &&
|
|
|
|
virDomainDefValidate(vm->def, caps, 0, driver->xmlopt) < 0)
|
|
|
|
return -1;
|
2016-04-20 20:55:48 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-04-20 20:44:25 +00:00
|
|
|
|
2016-08-04 06:25:55 +00:00
|
|
|
|
2016-02-04 14:25:29 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessStartValidate:
|
|
|
|
* @vm: domain object
|
|
|
|
* @qemuCaps: emulator capabilities
|
|
|
|
* @migration: restoration of existing state
|
|
|
|
*
|
2016-03-17 12:51:20 +00:00
|
|
|
* This function aggregates checks done prior to start of a VM.
|
|
|
|
*
|
|
|
|
* Flag VIR_QEMU_PROCESS_START_PRETEND tells, that we don't want to actually
|
|
|
|
* start the domain but create a valid qemu command. If some code shouldn't be
|
|
|
|
* executed in this case, make sure to check this flag.
|
2016-02-04 14:25:29 +00:00
|
|
|
*/
|
2016-05-27 11:42:24 +00:00
|
|
|
static int
|
2016-03-17 12:51:20 +00:00
|
|
|
qemuProcessStartValidate(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2016-02-04 14:25:29 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2016-05-27 11:54:26 +00:00
|
|
|
virCapsPtr caps,
|
2016-03-17 12:51:20 +00:00
|
|
|
unsigned int flags)
|
2016-02-04 14:25:29 +00:00
|
|
|
{
|
2016-03-17 12:51:20 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_PRETEND)) {
|
|
|
|
if (vm->def->virtType == VIR_DOMAIN_VIRT_KVM) {
|
|
|
|
VIR_DEBUG("Checking for KVM availability");
|
|
|
|
if (!virFileExists("/dev/kvm")) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Domain requires KVM, but it is not available. "
|
|
|
|
"Check that virtualization is enabled in the "
|
|
|
|
"host BIOS, and host configuration is setup to "
|
|
|
|
"load the kvm modules."));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Checking domain and device security labels");
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
|
2016-03-17 12:51:20 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-05-27 11:54:26 +00:00
|
|
|
if (qemuProcessStartValidateXML(driver, vm, qemuCaps, caps, flags) < 0)
|
2016-02-04 14:25:29 +00:00
|
|
|
return -1;
|
|
|
|
|
2016-10-10 17:55:17 +00:00
|
|
|
if (qemuProcessStartValidateGraphics(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2016-10-11 15:53:50 +00:00
|
|
|
if (qemuProcessStartValidateVideo(vm, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-02-12 03:14:32 +00:00
|
|
|
if (qemuProcessStartValidateIOThreads(vm, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-02-01 16:14:00 +00:00
|
|
|
if (qemuProcessStartValidateShmem(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2016-03-17 12:51:20 +00:00
|
|
|
VIR_DEBUG("Checking for any possible (non-fatal) issues");
|
|
|
|
|
2016-04-20 20:44:25 +00:00
|
|
|
qemuProcessStartWarnShmem(vm);
|
2016-03-17 12:51:20 +00:00
|
|
|
|
2016-10-10 17:55:17 +00:00
|
|
|
return 0;
|
2016-02-04 14:25:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 15:58:12 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessInit:
|
|
|
|
*
|
|
|
|
* Prepares the domain up to the point when priv->qemuCaps is initialized. The
|
|
|
|
* function calls qemuProcessStop when needed.
|
|
|
|
*
|
2016-03-22 12:17:27 +00:00
|
|
|
* Flag VIR_QEMU_PROCESS_START_PRETEND tells, that we don't want to actually
|
|
|
|
* start the domain but create a valid qemu command. If some code shouldn't be
|
|
|
|
* executed in this case, make sure to check this flag.
|
|
|
|
*
|
2015-11-10 15:58:12 +00:00
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessInit(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2017-05-31 10:34:10 +00:00
|
|
|
virCPUDefPtr updatedCPU,
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2016-02-04 14:25:29 +00:00
|
|
|
bool migration,
|
2016-03-17 12:51:20 +00:00
|
|
|
unsigned int flags)
|
2015-11-10 15:58:12 +00:00
|
|
|
{
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
virCapsPtr caps = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int stopFlags;
|
2017-05-31 10:34:10 +00:00
|
|
|
virCPUDefPtr origCPU = NULL;
|
2015-11-10 15:58:12 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%p name=%s id=%d migration=%d",
|
|
|
|
vm, vm->def->name, vm->def->id, migration);
|
|
|
|
|
|
|
|
VIR_DEBUG("Beginning VM startup process");
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("VM is already active"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
2016-02-04 13:48:57 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Determining emulator version");
|
|
|
|
virObjectUnref(priv->qemuCaps);
|
2016-06-15 11:48:19 +00:00
|
|
|
if (!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
|
|
|
|
driver->qemuCapsCache,
|
2016-02-04 13:48:57 +00:00
|
|
|
vm->def->emulator,
|
|
|
|
vm->def->os.machine)))
|
|
|
|
goto cleanup;
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuDomainUpdateCPU(vm, updatedCPU, &origCPU) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-05-27 11:54:26 +00:00
|
|
|
if (qemuProcessStartValidate(driver, vm, priv->qemuCaps, caps, flags) < 0)
|
2016-02-04 14:25:29 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-11-10 15:58:12 +00:00
|
|
|
/* Do this upfront, so any part of the startup process can add
|
|
|
|
* runtime state to vm->def that won't be persisted. This let's us
|
|
|
|
* report implicit runtime defaults in the XML, like vnc listen/socket
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Setting current domain def as transient");
|
2016-05-27 12:00:34 +00:00
|
|
|
if (virDomainObjSetDefTransient(caps, driver->xmlopt, vm) < 0)
|
2017-02-23 09:44:08 +00:00
|
|
|
goto cleanup;
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2017-02-23 09:44:08 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_PRETEND) {
|
2017-02-23 16:10:55 +00:00
|
|
|
if (qemuDomainSetPrivatePaths(driver, vm) < 0) {
|
|
|
|
virDomainObjRemoveTransientDef(vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2017-02-23 09:44:08 +00:00
|
|
|
} else {
|
2016-03-22 12:17:27 +00:00
|
|
|
vm->def->id = qemuDriverAllocateID(driver);
|
|
|
|
qemuDomainSetFakeReboot(driver, vm, false);
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_STARTING_UP);
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2016-03-22 12:17:27 +00:00
|
|
|
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
|
|
|
|
driver->inhibitCallback(true, driver->inhibitOpaque);
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2016-03-22 12:17:27 +00:00
|
|
|
/* Run an early hook to set-up missing devices */
|
|
|
|
if (qemuProcessStartHook(driver, vm,
|
|
|
|
VIR_HOOK_QEMU_OP_PREPARE,
|
|
|
|
VIR_HOOK_SUBOP_BEGIN) < 0)
|
|
|
|
goto stop;
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2017-02-23 09:44:08 +00:00
|
|
|
if (qemuDomainSetPrivatePaths(driver, vm) < 0)
|
|
|
|
goto stop;
|
2017-05-31 10:34:10 +00:00
|
|
|
|
|
|
|
VIR_STEAL_PTR(priv->origCPU, origCPU);
|
2017-02-23 09:44:08 +00:00
|
|
|
}
|
2016-03-14 09:31:28 +00:00
|
|
|
|
2015-11-10 15:58:12 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2017-05-31 10:34:10 +00:00
|
|
|
virCPUDefFree(origCPU);
|
2015-11-10 15:58:12 +00:00
|
|
|
virObjectUnref(cfg);
|
|
|
|
virObjectUnref(caps);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
stop:
|
|
|
|
stopFlags = VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
|
|
|
if (migration)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, asyncJob, stopFlags);
|
2015-11-10 15:58:12 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-15 17:01:21 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessNetworkPrepareDevices
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessNetworkPrepareDevices(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
2016-09-23 15:04:53 +00:00
|
|
|
virDomainNetType actualType;
|
2016-02-15 17:01:21 +00:00
|
|
|
|
|
|
|
/* If appropriate, grab a physical device from the configured
|
|
|
|
* network's pool of devices, or resolve bridge device name
|
|
|
|
* to the one defined in the network definition.
|
|
|
|
*/
|
|
|
|
if (networkAllocateActualDevice(def, net) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
actualType = virDomainNetGetActualType(net);
|
|
|
|
if (actualType == VIR_DOMAIN_NET_TYPE_HOSTDEV &&
|
|
|
|
net->type == VIR_DOMAIN_NET_TYPE_NETWORK) {
|
|
|
|
/* Each type='hostdev' network device must also have a
|
|
|
|
* corresponding entry in the hostdevs array. For netdevs
|
|
|
|
* that are hardcoded as type='hostdev', this is already
|
|
|
|
* done by the parser, but for those allocated from a
|
|
|
|
* network / determined at runtime, we need to do it
|
|
|
|
* separately.
|
|
|
|
*/
|
|
|
|
virDomainHostdevDefPtr hostdev = virDomainNetGetActualHostdev(net);
|
|
|
|
virDomainHostdevSubsysPCIPtr pcisrc = &hostdev->source.subsys.u.pci;
|
|
|
|
|
|
|
|
if (virDomainHostdevFind(def, hostdev, NULL) >= 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("PCI device %04x:%02x:%02x.%x "
|
|
|
|
"allocated from network %s is already "
|
|
|
|
"in use by domain %s"),
|
|
|
|
pcisrc->addr.domain, pcisrc->addr.bus,
|
|
|
|
pcisrc->addr.slot, pcisrc->addr.function,
|
|
|
|
net->data.network.name, def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virDomainHostdevInsert(def, hostdev) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-13 15:36:52 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessSetupVcpu:
|
|
|
|
* @vm: domain object
|
|
|
|
* @vcpuid: id of VCPU to set defaults
|
|
|
|
*
|
|
|
|
* This function sets resource properties (cgroups, affinity, scheduler) for a
|
|
|
|
* vCPU. This function expects that the vCPU is online and the vCPU pids were
|
|
|
|
* correctly detected at the point when it's called.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessSetupVcpu(virDomainObjPtr vm,
|
|
|
|
unsigned int vcpuid)
|
|
|
|
{
|
|
|
|
pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid);
|
2016-06-29 11:16:22 +00:00
|
|
|
virDomainVcpuDefPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
|
2016-01-13 15:36:52 +00:00
|
|
|
|
2016-07-04 15:14:55 +00:00
|
|
|
return qemuProcessSetupPid(vm, vcpupid, VIR_CGROUP_THREAD_VCPU,
|
|
|
|
vcpuid, vcpu->cpumask,
|
|
|
|
vm->def->cputune.period,
|
|
|
|
vm->def->cputune.quota,
|
|
|
|
&vcpu->sched);
|
2016-01-13 15:36:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessSetupVcpus(virDomainObjPtr vm)
|
|
|
|
{
|
2016-06-29 11:16:22 +00:00
|
|
|
virDomainVcpuDefPtr vcpu;
|
2016-01-13 15:36:52 +00:00
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if ((vm->def->cputune.period || vm->def->cputune.quota) &&
|
|
|
|
!virCgroupHasController(((qemuDomainObjPrivatePtr) vm->privateData)->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuDomainHasVcpuPids(vm)) {
|
|
|
|
/* If any CPU has custom affinity that differs from the
|
|
|
|
* VM default affinity, we must reject it */
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
|
|
|
|
if (!vcpu->online)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vcpu->cpumask &&
|
|
|
|
!virBitmapEqual(vm->def->cpumask, vcpu->cpumask)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cpu affinity is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
|
|
|
|
if (!vcpu->online)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessSetupVcpu(vm, i) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-14 09:38:02 +00:00
|
|
|
int
|
|
|
|
qemuProcessSetupIOThread(virDomainObjPtr vm,
|
|
|
|
virDomainIOThreadIDDefPtr iothread)
|
|
|
|
{
|
|
|
|
|
2016-06-21 15:33:06 +00:00
|
|
|
return qemuProcessSetupPid(vm, iothread->thread_id,
|
|
|
|
VIR_CGROUP_THREAD_IOTHREAD,
|
2016-01-14 09:38:02 +00:00
|
|
|
iothread->iothread_id,
|
2016-06-21 15:33:06 +00:00
|
|
|
iothread->cpumask,
|
2016-07-25 11:07:43 +00:00
|
|
|
vm->def->cputune.iothread_period,
|
|
|
|
vm->def->cputune.iothread_quota,
|
2016-06-21 15:33:06 +00:00
|
|
|
&iothread->sched);
|
2016-01-14 09:38:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessSetupIOThreads(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->niothreadids; i++) {
|
|
|
|
virDomainIOThreadIDDefPtr info = vm->def->iothreadids[i];
|
|
|
|
|
|
|
|
if (qemuProcessSetupIOThread(vm, info) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
static int
|
|
|
|
qemuProcessValidateHotpluggableVcpus(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
virDomainVcpuDefPtr subvcpu;
|
|
|
|
qemuDomainVcpuPrivatePtr vcpupriv;
|
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
size_t i = 0;
|
|
|
|
size_t j;
|
|
|
|
virBitmapPtr ordermap = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
2016-09-14 04:50:00 +00:00
|
|
|
if (!(ordermap = virBitmapNew(maxvcpus + 1)))
|
2016-08-04 12:36:24 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* validate:
|
|
|
|
* - all hotpluggable entities to be hotplugged have the correct data
|
|
|
|
* - vcpus belonging to a hotpluggable entity share configuration
|
|
|
|
* - order of the hotpluggable entities is unique
|
|
|
|
*/
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
|
|
|
|
|
|
|
/* skip over hotpluggable entities */
|
|
|
|
if (vcpupriv->vcpus == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vcpu->order != 0) {
|
2016-09-14 04:50:00 +00:00
|
|
|
if (virBitmapIsBitSet(ordermap, vcpu->order)) {
|
2016-08-04 12:36:24 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2016-09-14 03:58:33 +00:00
|
|
|
_("duplicate vcpu order '%u'"), vcpu->order);
|
2016-08-04 12:36:24 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-09-16 13:51:14 +00:00
|
|
|
if (virBitmapSetBit(ordermap, vcpu->order)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("vcpu order '%u' exceeds vcpu count"),
|
|
|
|
vcpu->order);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (j = i + 1; j < (i + vcpupriv->vcpus); j++) {
|
|
|
|
subvcpu = virDomainDefGetVcpu(def, j);
|
|
|
|
if (subvcpu->hotpluggable != vcpu->hotpluggable ||
|
|
|
|
subvcpu->online != vcpu->online ||
|
|
|
|
subvcpu->order != vcpu->order) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("vcpus '%zu' and '%zu' are in the same hotplug "
|
|
|
|
"group but differ in configuration"), i, j);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->online && vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
if ((vcpupriv->socket_id == -1 && vcpupriv->core_id == -1 &&
|
2017-06-27 14:04:38 +00:00
|
|
|
vcpupriv->thread_id == -1 && vcpupriv->node_id == -1) ||
|
2016-08-04 12:36:24 +00:00
|
|
|
!vcpupriv->type) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("vcpu '%zu' is missing hotplug data"), i);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virBitmapFree(ordermap);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainHasHotpluggableStartupVcpus(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
|
|
|
|
if (vcpu->online && vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessVcpusSortOrder(const void *a,
|
|
|
|
const void *b)
|
|
|
|
{
|
|
|
|
virDomainVcpuDefPtr vcpua = *((virDomainVcpuDefPtr *)a);
|
|
|
|
virDomainVcpuDefPtr vcpub = *((virDomainVcpuDefPtr *)b);
|
|
|
|
|
|
|
|
return vcpua->order - vcpub->order;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessSetupHotpluggableVcpus(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
2016-09-07 11:20:00 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
qemuCgroupEmulatorAllNodesDataPtr emulatorCgroup = NULL;
|
2016-08-04 12:36:24 +00:00
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
qemuDomainVcpuPrivatePtr vcpupriv;
|
|
|
|
virJSONValuePtr vcpuprops = NULL;
|
|
|
|
size_t i;
|
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
virDomainVcpuDefPtr *bootHotplug = NULL;
|
|
|
|
size_t nbootHotplug = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
|
|
|
|
|
|
|
if (vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES && vcpu->online &&
|
|
|
|
vcpupriv->vcpus != 0) {
|
|
|
|
if (virAsprintf(&vcpupriv->alias, "vcpu%zu", i) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (VIR_APPEND_ELEMENT(bootHotplug, nbootHotplug, vcpu) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nbootHotplug == 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qsort(bootHotplug, nbootHotplug, sizeof(*bootHotplug),
|
|
|
|
qemuProcessVcpusSortOrder);
|
|
|
|
|
2016-09-07 11:20:00 +00:00
|
|
|
if (qemuCgroupEmulatorAllNodesAllow(priv->cgroup, &emulatorCgroup) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
for (i = 0; i < nbootHotplug; i++) {
|
|
|
|
vcpu = bootHotplug[i];
|
|
|
|
|
|
|
|
if (!(vcpuprops = qemuBuildHotpluggableCPUProps(vcpu)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops);
|
|
|
|
vcpuprops = NULL;
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
virJSONValueFree(vcpuprops);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2016-09-13 10:24:02 +00:00
|
|
|
qemuCgroupEmulatorAllNodesRestore(emulatorCgroup);
|
2016-08-04 12:36:24 +00:00
|
|
|
VIR_FREE(bootHotplug);
|
|
|
|
virJSONValueFree(vcpuprops);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-22 13:53:48 +00:00
|
|
|
static int
|
|
|
|
qemuProcessUpdateGuestCPU(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
virCapsPtr caps,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
size_t nmodels = 0;
|
|
|
|
char **models = NULL;
|
|
|
|
|
|
|
|
if (!def->cpu)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* nothing to do if only topology part of CPU def is used */
|
|
|
|
if (def->cpu->mode == VIR_CPU_MODE_CUSTOM && !def->cpu->model)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Old libvirt added host CPU model to host-model CPUs for migrations,
|
|
|
|
* while new libvirt just turns host-model into custom mode. We need
|
|
|
|
* to fix the mode to maintain backward compatibility and to avoid
|
|
|
|
* the CPU model to be replaced in virCPUUpdate.
|
|
|
|
*/
|
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_NEW) &&
|
|
|
|
ARCH_IS_X86(def->os.arch) &&
|
|
|
|
def->cpu->mode == VIR_CPU_MODE_HOST_MODEL &&
|
|
|
|
def->cpu->model) {
|
|
|
|
def->cpu->mode = VIR_CPU_MODE_CUSTOM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virQEMUCapsIsCPUModeSupported(qemuCaps, caps, def->virtType,
|
|
|
|
def->cpu->mode)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("CPU mode '%s' for %s %s domain on %s host is not "
|
|
|
|
"supported by hypervisor"),
|
|
|
|
virCPUModeTypeToString(def->cpu->mode),
|
|
|
|
virArchToString(def->os.arch),
|
|
|
|
virDomainVirtTypeToString(def->virtType),
|
|
|
|
virArchToString(caps->host.arch));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-11-09 16:09:48 +00:00
|
|
|
if (virCPUConvertLegacy(caps->host.arch, def->cpu) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2016-06-22 13:53:48 +00:00
|
|
|
/* nothing to update for host-passthrough */
|
|
|
|
if (def->cpu->mode == VIR_CPU_MODE_HOST_PASSTHROUGH)
|
|
|
|
return 0;
|
|
|
|
|
2017-03-01 15:12:07 +00:00
|
|
|
if (def->cpu->check == VIR_CPU_CHECK_PARTIAL &&
|
|
|
|
virCPUCompare(caps->host.arch,
|
2017-04-11 09:14:30 +00:00
|
|
|
virQEMUCapsGetHostModel(qemuCaps, def->virtType,
|
2017-04-11 18:46:05 +00:00
|
|
|
VIR_QEMU_CAPS_HOST_CPU_FULL),
|
2017-03-01 15:12:07 +00:00
|
|
|
def->cpu, true) < 0)
|
|
|
|
return -1;
|
2016-06-22 13:53:48 +00:00
|
|
|
|
|
|
|
if (virCPUUpdate(def->os.arch, def->cpu,
|
2017-04-11 09:14:30 +00:00
|
|
|
virQEMUCapsGetHostModel(qemuCaps, def->virtType,
|
2017-03-29 13:31:17 +00:00
|
|
|
VIR_QEMU_CAPS_HOST_CPU_MIGRATABLE)) < 0)
|
2016-06-22 13:53:48 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-11-14 16:21:30 +00:00
|
|
|
if (virQEMUCapsGetCPUDefinitions(qemuCaps, def->virtType,
|
|
|
|
&models, &nmodels) < 0 ||
|
2016-11-10 08:49:06 +00:00
|
|
|
virCPUTranslate(def->os.arch, def->cpu,
|
|
|
|
(const char **) models, nmodels) < 0)
|
2016-06-22 13:53:48 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
def->cpu->fallback = VIR_CPU_FALLBACK_FORBID;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2016-11-25 08:18:35 +00:00
|
|
|
virStringListFreeCount(models, nmodels);
|
2016-06-22 13:53:48 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-12 07:06:42 +00:00
|
|
|
static int
|
|
|
|
qemuProcessPrepareDomainNUMAPlacement(virDomainObjPtr vm,
|
|
|
|
virCapsPtr caps)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
char *nodeset = NULL;
|
2017-07-12 11:59:35 +00:00
|
|
|
virBitmapPtr numadNodeset = NULL;
|
|
|
|
virBitmapPtr hostMemoryNodeset = NULL;
|
2017-07-12 07:06:42 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* Get the advisory nodeset from numad if 'placement' of
|
|
|
|
* either <vcpu> or <numatune> is 'auto'.
|
|
|
|
*/
|
|
|
|
if (!virDomainDefNeedsPlacementAdvice(vm->def))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nodeset = virNumaGetAutoPlacementAdvice(virDomainDefGetVcpus(vm->def),
|
|
|
|
virDomainDefGetMemoryTotal(vm->def));
|
|
|
|
|
|
|
|
if (!nodeset)
|
|
|
|
goto cleanup;
|
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
if (!(hostMemoryNodeset = virNumaGetHostMemoryNodeset()))
|
|
|
|
goto cleanup;
|
|
|
|
|
2017-07-12 07:06:42 +00:00
|
|
|
VIR_DEBUG("Nodeset returned from numad: %s", nodeset);
|
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
if (virBitmapParse(nodeset, &numadNodeset, VIR_DOMAIN_CPUMASK_LEN) < 0)
|
2017-07-12 07:06:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
/* numad may return a nodeset that only contains cpus but cgroups don't play
|
|
|
|
* well with that. Set the autoCpuset from all cpus from that nodeset, but
|
|
|
|
* assign autoNodeset only with nodes containing memory. */
|
|
|
|
if (!(priv->autoCpuset = virCapabilitiesGetCpusForNodemask(caps, numadNodeset)))
|
2017-07-12 07:06:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
virBitmapIntersect(numadNodeset, hostMemoryNodeset);
|
|
|
|
|
|
|
|
VIR_STEAL_PTR(priv->autoNodeset, numadNodeset);
|
|
|
|
|
2017-07-12 07:06:42 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(nodeset);
|
2017-07-12 11:59:35 +00:00
|
|
|
virBitmapFree(numadNodeset);
|
|
|
|
virBitmapFree(hostMemoryNodeset);
|
2017-07-12 07:06:42 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessPrepareDomain
|
|
|
|
*
|
|
|
|
* This function groups all code that modifies only live XML of a domain which
|
|
|
|
* is about to start and it's the only place to do those modifications.
|
|
|
|
*
|
|
|
|
* Flag VIR_QEMU_PROCESS_START_PRETEND tells, that we don't want to actually
|
|
|
|
* start the domain but create a valid qemu command. If some code shouldn't be
|
|
|
|
* executed in this case, make sure to check this flag.
|
|
|
|
*
|
|
|
|
* TODO: move all XML modification from qemuBuildCommandLine into this function
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessPrepareDomain(virConnectPtr conn,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
size_t i;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2017-06-15 06:34:55 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2016-03-15 12:00:59 +00:00
|
|
|
virCapsPtr caps;
|
|
|
|
|
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2017-07-21 13:51:03 +00:00
|
|
|
priv->machineName = qemuDomainGetMachineName(vm);
|
|
|
|
if (!priv->machineName)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_PRETEND)) {
|
|
|
|
/* If you are using a SecurityDriver with dynamic labelling,
|
|
|
|
then generate a security label for isolation */
|
|
|
|
VIR_DEBUG("Generating domain security label (if required)");
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0) {
|
2016-03-15 12:00:59 +00:00
|
|
|
virDomainAuditSecurityLabel(vm, false);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
virDomainAuditSecurityLabel(vm, true);
|
|
|
|
|
2017-07-12 07:06:42 +00:00
|
|
|
if (qemuProcessPrepareDomainNUMAPlacement(vm, caps) < 0)
|
|
|
|
goto cleanup;
|
2016-03-15 12:00:59 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 06:34:55 +00:00
|
|
|
/* Whether we should use virtlogd as stdio handler for character
|
|
|
|
* devices source backend. */
|
|
|
|
if (cfg->stdioLogD &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CHARDEV_FILE_APPEND)) {
|
|
|
|
priv->chardevStdioLogd = true;
|
|
|
|
}
|
|
|
|
|
2016-04-13 07:38:29 +00:00
|
|
|
/*
|
|
|
|
* Normally PCI addresses are assigned in the virDomainCreate
|
|
|
|
* or virDomainDefine methods. We might still need to assign
|
|
|
|
* some here to cope with the question of upgrades. Regardless
|
|
|
|
* we also need to populate the PCI address set cache for later
|
|
|
|
* use in hotplug
|
|
|
|
*/
|
2016-05-20 11:22:26 +00:00
|
|
|
VIR_DEBUG("Assigning domain PCI addresses");
|
2016-11-03 20:33:32 +00:00
|
|
|
if ((qemuDomainAssignAddresses(vm->def, priv->qemuCaps, driver, vm,
|
|
|
|
!!(flags & VIR_QEMU_PROCESS_START_NEW))) < 0) {
|
2016-05-20 11:22:26 +00:00
|
|
|
goto cleanup;
|
2016-11-03 20:33:32 +00:00
|
|
|
}
|
2016-04-13 07:38:29 +00:00
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
if (qemuAssignDeviceAliases(vm->def, priv->qemuCaps) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-05-09 12:52:52 +00:00
|
|
|
VIR_DEBUG("Setting graphics devices");
|
2016-05-09 12:30:27 +00:00
|
|
|
if (qemuProcessSetupGraphics(driver, vm, flags) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-01 15:51:28 +00:00
|
|
|
/* Drop possibly missing disks from the definition. This function
|
|
|
|
* also resolves source pool/volume into a path and it needs to
|
|
|
|
* happen after the def is copied and aliases are set. */
|
|
|
|
if (qemuDomainCheckDiskPresence(conn, driver, vm, flags) < 0)
|
|
|
|
goto cleanup;
|
2016-06-02 13:32:00 +00:00
|
|
|
|
2016-05-02 17:11:24 +00:00
|
|
|
VIR_DEBUG("Create domain masterKey");
|
|
|
|
if (qemuDomainMasterKeyCreate(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-10-21 23:02:35 +00:00
|
|
|
VIR_DEBUG("Prepare chardev source backends for TLS");
|
|
|
|
qemuDomainPrepareChardevSource(vm->def, driver);
|
|
|
|
|
|
|
|
VIR_DEBUG("Add secrets to disks, hostdevs, and chardevs");
|
|
|
|
if (qemuDomainSecretPrepare(conn, driver, vm) < 0)
|
2016-05-02 17:20:55 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-03-30 14:34:17 +00:00
|
|
|
for (i = 0; i < vm->def->nchannels; i++) {
|
|
|
|
if (qemuDomainPrepareChannel(vm->def->channels[i],
|
|
|
|
priv->channelTargetDir) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
if (VIR_ALLOC(priv->monConfig) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Preparing monitor state");
|
|
|
|
if (qemuProcessPrepareMonitorChr(priv->monConfig, priv->libDir) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
priv->monJSON = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON);
|
|
|
|
priv->monError = false;
|
|
|
|
priv->monStart = 0;
|
|
|
|
priv->gotShutdown = false;
|
|
|
|
|
2016-06-22 13:53:48 +00:00
|
|
|
VIR_DEBUG("Updating guest CPU definition");
|
|
|
|
if (qemuProcessUpdateGuestCPU(vm->def, priv->qemuCaps, caps, flags) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virObjectUnref(caps);
|
2017-06-15 06:34:55 +00:00
|
|
|
virObjectUnref(cfg);
|
2016-03-15 12:00:59 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
/**
|
2016-03-22 12:16:05 +00:00
|
|
|
* qemuProcessPrepareHost
|
2015-11-10 15:58:41 +00:00
|
|
|
*
|
2016-03-22 12:16:05 +00:00
|
|
|
* This function groups all code that modifies host system (which also may
|
|
|
|
* update live XML) to prepare environment for a domain which is about to start
|
|
|
|
* and it's the only place to do those modifications.
|
2015-11-10 15:58:41 +00:00
|
|
|
*
|
2016-03-22 12:16:05 +00:00
|
|
|
* TODO: move all host modification from qemuBuildCommandLine into this function
|
2015-11-10 15:58:41 +00:00
|
|
|
*/
|
|
|
|
int
|
2016-03-22 12:16:05 +00:00
|
|
|
qemuProcessPrepareHost(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
bool incoming)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2015-10-20 12:30:52 +00:00
|
|
|
int ret = -1;
|
2014-03-05 10:56:26 +00:00
|
|
|
unsigned int hostdev_flags = 0;
|
2016-03-22 12:16:05 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
if (qemuPrepareNVRAM(cfg, vm) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-02-01 17:04:15 +00:00
|
|
|
|
qemu: allocate network connections sooner during domain startup
VFIO device assignment requires a cgroup ACL to be setup for access to
the /dev/vfio/nn "group" device for any devices that will be assigned
to a guest. In the case of a host device that is allocated from a
pool, it was being allocated during qemuBuildCommandLine(), which is
called by qemuProcessStart() *after* the all-encompassing
qemuSetupCgroup() was called, meaning that the standard Cgroup ACL
setup wasn't creating ACLs for these devices allocated from pools.
One possible solution was to manually add a single ACL down inside
qemuBuildCommandLine() when networkAllocateActualDevice() is called,
but that has two problems: 1) the function that adds the cgroup ACL
requires a virDomainObjPtr, which isn't available in
qemuBuildCommandLine(), and 2) we really shouldn't be doing network
device setup inside qemuBuildCommandLine() anyway.
Instead, I've created a new function called
qemuNetworkPrepareDevices() which is called just before
qemuPrepareHostDevices() during qemuProcessStart() (explanation of
ordering in the comments), i.e. well before the call to
qemuSetupCgroup(). To minimize code churn in a patch that will be
backported to 1.0.5-maint, qemuNetworkPrepareDevices only does
networkAllocateActualDevice() and the bare amount of setup required
for type='hostdev network devices, but it eventually should do *all*
device setup for guest network devices.
Note that some of the code that was previously needed in
qemuBuildCommandLine() is no longer required when
networkAllocateActualDevice() is called earlier:
* qemuAssignDeviceHostdevAlias() is already done further down in
qemuProcessStart().
* qemuPrepareHostdevPCIDevices() is called by
qemuPrepareHostDevices() which is called after
qemuNetworkPrepareDevices() in qemuProcessStart().
As hinted above, this new function should be moved into a separate
qemu_network.c (or similarly named) file along with
qemuPhysIfaceConnect(), qemuNetworkIfaceConnect(), and
qemuOpenVhostNet(), and expanded to call those functions as well, then
the nnets loop in qemuBuildCommandLine() should be reduced to only
build the commandline string (which itself can be in a separate
qemuInterfaceBuilldCommandLine() function as suggested by
Michal). However, this will require storing away an array of tapfd and
vhostfd that are needed for the commandline, so I would rather do that
in a separate patch and leave this patch at the minimum to fix the
bug.
2013-05-06 19:43:56 +00:00
|
|
|
/* network devices must be "prepared" before hostdevs, because
|
|
|
|
* setting up a network device might create a new hostdev that
|
|
|
|
* will need to be setup.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Preparing network devices");
|
2016-02-15 17:01:21 +00:00
|
|
|
if (qemuProcessNetworkPrepareDevices(vm->def) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
qemu: allocate network connections sooner during domain startup
VFIO device assignment requires a cgroup ACL to be setup for access to
the /dev/vfio/nn "group" device for any devices that will be assigned
to a guest. In the case of a host device that is allocated from a
pool, it was being allocated during qemuBuildCommandLine(), which is
called by qemuProcessStart() *after* the all-encompassing
qemuSetupCgroup() was called, meaning that the standard Cgroup ACL
setup wasn't creating ACLs for these devices allocated from pools.
One possible solution was to manually add a single ACL down inside
qemuBuildCommandLine() when networkAllocateActualDevice() is called,
but that has two problems: 1) the function that adds the cgroup ACL
requires a virDomainObjPtr, which isn't available in
qemuBuildCommandLine(), and 2) we really shouldn't be doing network
device setup inside qemuBuildCommandLine() anyway.
Instead, I've created a new function called
qemuNetworkPrepareDevices() which is called just before
qemuPrepareHostDevices() during qemuProcessStart() (explanation of
ordering in the comments), i.e. well before the call to
qemuSetupCgroup(). To minimize code churn in a patch that will be
backported to 1.0.5-maint, qemuNetworkPrepareDevices only does
networkAllocateActualDevice() and the bare amount of setup required
for type='hostdev network devices, but it eventually should do *all*
device setup for guest network devices.
Note that some of the code that was previously needed in
qemuBuildCommandLine() is no longer required when
networkAllocateActualDevice() is called earlier:
* qemuAssignDeviceHostdevAlias() is already done further down in
qemuProcessStart().
* qemuPrepareHostdevPCIDevices() is called by
qemuPrepareHostDevices() which is called after
qemuNetworkPrepareDevices() in qemuProcessStart().
As hinted above, this new function should be moved into a separate
qemu_network.c (or similarly named) file along with
qemuPhysIfaceConnect(), qemuNetworkIfaceConnect(), and
qemuOpenVhostNet(), and expanded to call those functions as well, then
the nnets loop in qemuBuildCommandLine() should be reduced to only
build the commandline string (which itself can be in a separate
qemuInterfaceBuilldCommandLine() function as suggested by
Michal). However, this will require storing away an array of tapfd and
vhostfd that are needed for the commandline, so I would rather do that
in a separate patch and leave this patch at the minimum to fix the
bug.
2013-05-06 19:43:56 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Must be run before security labelling */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Preparing host devices");
|
2014-03-05 10:56:26 +00:00
|
|
|
if (!cfg->relaxedACS)
|
|
|
|
hostdev_flags |= VIR_HOSTDEV_STRICT_ACS_CHECK;
|
2015-11-10 15:58:41 +00:00
|
|
|
if (!incoming)
|
2014-03-05 11:12:04 +00:00
|
|
|
hostdev_flags |= VIR_HOSTDEV_COLD_BOOT;
|
2015-10-20 12:10:16 +00:00
|
|
|
if (qemuHostdevPrepareDomainDevices(driver, vm->def, priv->qemuCaps,
|
|
|
|
hostdev_flags) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Preparing chr devices");
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virDomainChrDefForeach(vm->def,
|
|
|
|
true,
|
|
|
|
qemuProcessPrepareChardevDevice,
|
|
|
|
NULL) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-06-07 12:47:37 +00:00
|
|
|
if (qemuProcessBuildDestroyHugepagesPath(driver, vm, NULL, true) < 0)
|
2016-11-22 12:21:51 +00:00
|
|
|
goto cleanup;
|
2012-12-11 20:20:29 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Ensure no historical cgroup for this VM is lying around bogus
|
|
|
|
* settings */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Ensuring no historical cgroup is lying around");
|
2016-02-01 15:50:54 +00:00
|
|
|
qemuRemoveCgroup(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
if (virFileMakePath(cfg->logDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot create log directory %s"),
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->logDir);
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
VIR_FREE(priv->pidfile);
|
|
|
|
if (!(priv->pidfile = virPidFileBuildPath(cfg->stateDir, vm->def->name))) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
"%s", _("Failed to build pidfile path."));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlink(priv->pidfile) < 0 &&
|
|
|
|
errno != ENOENT) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Cannot remove stale PID file %s"),
|
|
|
|
priv->pidfile);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create all per-domain directories in order to make sure domain
|
|
|
|
* with any possible seclabels can access it.
|
|
|
|
*/
|
|
|
|
if (qemuProcessMakeDir(driver, vm, priv->libDir) < 0 ||
|
|
|
|
qemuProcessMakeDir(driver, vm, priv->channelTargetDir) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-05-02 17:11:24 +00:00
|
|
|
VIR_DEBUG("Write domain masterKey");
|
|
|
|
if (qemuDomainWriteMasterKeyFile(driver, vm) < 0)
|
2016-03-29 22:22:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuProcessLaunch:
|
|
|
|
*
|
|
|
|
* Launch a new QEMU process with stopped virtual CPUs.
|
|
|
|
*
|
|
|
|
* The caller is supposed to call qemuProcessStop with appropriate
|
|
|
|
* flags in case of failure.
|
|
|
|
*
|
|
|
|
* Returns 0 on success,
|
|
|
|
* -1 on error which happened before devices were labeled and thus
|
|
|
|
* there is no need to restore them,
|
|
|
|
* -2 on error requesting security labels to be restored.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessLaunch(virConnectPtr conn,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
qemuProcessIncomingDefPtr incoming,
|
|
|
|
virDomainSnapshotObjPtr snapshot,
|
|
|
|
virNetDevVPortProfileOp vmop,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
int rv;
|
|
|
|
int logfile = -1;
|
|
|
|
qemuDomainLogContextPtr logCtxt = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCommandPtr cmd = NULL;
|
|
|
|
struct qemuProcessHookData hookData;
|
|
|
|
virQEMUDriverConfigPtr cfg;
|
|
|
|
virCapsPtr caps = NULL;
|
|
|
|
size_t nnicindexes = 0;
|
|
|
|
int *nicindexes = NULL;
|
2016-03-28 13:30:30 +00:00
|
|
|
size_t i;
|
2016-03-22 12:16:05 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("vm=%p name=%s id=%d asyncJob=%d "
|
|
|
|
"incoming.launchURI=%s incoming.deferredURI=%s "
|
|
|
|
"incoming.fd=%d incoming.path=%s "
|
|
|
|
"snapshot=%p vmop=%d flags=0x%x",
|
|
|
|
vm, vm->def->name, vm->def->id, asyncJob,
|
|
|
|
NULLSTR(incoming ? incoming->launchURI : NULL),
|
|
|
|
NULLSTR(incoming ? incoming->deferredURI : NULL),
|
|
|
|
incoming ? incoming->fd : -1,
|
|
|
|
NULLSTR(incoming ? incoming->path : NULL),
|
|
|
|
snapshot, vmop, flags);
|
|
|
|
|
|
|
|
/* Okay, these are just internal flags,
|
|
|
|
* but doesn't hurt to check */
|
|
|
|
virCheckFlags(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
2016-06-07 11:52:16 +00:00
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY |
|
|
|
|
VIR_QEMU_PROCESS_START_NEW, -1);
|
2016-03-22 12:16:05 +00:00
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
|
|
|
hookData.conn = conn;
|
|
|
|
hookData.vm = vm;
|
|
|
|
hookData.driver = driver;
|
|
|
|
/* We don't increase cfg's reference counter here. */
|
|
|
|
hookData.cfg = cfg;
|
|
|
|
|
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Creating domain log file");
|
2015-11-12 14:28:36 +00:00
|
|
|
if (!(logCtxt = qemuDomainLogContextNew(driver, vm,
|
|
|
|
QEMU_DOMAIN_LOG_CONTEXT_MODE_START)))
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-11-12 14:28:36 +00:00
|
|
|
logfile = qemuDomainLogContextGetWriteFD(logCtxt);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Building emulator command line");
|
2016-04-06 14:41:33 +00:00
|
|
|
if (!(cmd = qemuBuildCommandLine(driver,
|
2016-02-23 13:05:09 +00:00
|
|
|
qemuDomainLogContextGetManager(logCtxt),
|
|
|
|
vm->def, priv->monConfig,
|
2013-04-26 02:32:41 +00:00
|
|
|
priv->monJSON, priv->qemuCaps,
|
2015-11-06 17:41:37 +00:00
|
|
|
incoming ? incoming->launchURI : NULL,
|
|
|
|
snapshot, vmop,
|
2016-04-13 06:10:24 +00:00
|
|
|
false,
|
2014-10-30 06:34:30 +00:00
|
|
|
qemuCheckFips(),
|
2015-03-27 09:11:00 +00:00
|
|
|
priv->autoNodeset,
|
2016-02-26 08:15:55 +00:00
|
|
|
&nnicindexes, &nicindexes,
|
2017-05-29 12:11:25 +00:00
|
|
|
priv->libDir,
|
|
|
|
priv->chardevStdioLogd)))
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
if (incoming && incoming->fd != -1)
|
|
|
|
virCommandPassFD(cmd, incoming->fd, 0);
|
2015-08-07 12:42:31 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* now that we know it is about to start call the hook if present */
|
2015-10-30 16:59:43 +00:00
|
|
|
if (qemuProcessStartHook(driver, vm,
|
|
|
|
VIR_HOOK_QEMU_OP_START,
|
|
|
|
VIR_HOOK_SUBOP_BEGIN) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-12 13:02:46 +00:00
|
|
|
qemuLogOperation(vm, "starting up", cmd, logCtxt);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjCheckTaint(driver, vm, logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2015-11-12 14:28:36 +00:00
|
|
|
qemuDomainLogContextMarkPosition(logCtxt);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-11-15 10:30:18 +00:00
|
|
|
VIR_DEBUG("Building mount namespace");
|
|
|
|
|
|
|
|
if (qemuDomainCreateNamespace(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Clear emulator capabilities: %d",
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->clearEmulatorCapabilities);
|
|
|
|
if (cfg->clearEmulatorCapabilities)
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandClearCaps(cmd);
|
|
|
|
|
2015-11-02 09:35:58 +00:00
|
|
|
VIR_DEBUG("Setting up raw IO");
|
|
|
|
if (qemuProcessSetupRawIO(driver, vm, cmd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2014-09-09 22:51:02 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandSetPreExecHook(cmd, qemuProcessHook, &hookData);
|
2013-04-25 16:16:25 +00:00
|
|
|
virCommandSetMaxProcesses(cmd, cfg->maxProcesses);
|
|
|
|
virCommandSetMaxFiles(cmd, cfg->maxFiles);
|
2015-03-18 11:14:55 +00:00
|
|
|
virCommandSetMaxCoreSize(cmd, cfg->maxCore);
|
qemu: ensure sane umask for qemu process
Add umask to _virCommand, allow user to set umask to command.
Set umask(002) to qemu process to overwrite the default umask
of 022 set by many distros, so that unix sockets created for
virtio-serial has expected permissions.
Fix problem reported here:
https://sourceware.org/bugzilla/show_bug.cgi?id=13078#c11
https://bugzilla.novell.com/show_bug.cgi?id=888166
To use virtio-serial device, unix socket created for chardev with
default umask(022) has insufficient permissions.
e.g.:
-device virtio-serial \
-chardev socket,path=/tmp/foo,server,nowait,id=foo \
-device virtserialport,chardev=foo,name=org.fedoraproject.port.0
srwxr-xr-x 1 qemu qemu 0 21. Jul 14:19 /tmp/somefile.sock
Other users in the same group (like real user, test engines, etc)
cannot write to this socket.
Signed-off-by: Chunyan Liu <cyliu@suse.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-09-03 06:18:07 +00:00
|
|
|
virCommandSetUmask(cmd, 0x002);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-02-01 20:20:22 +00:00
|
|
|
VIR_DEBUG("Setting up security labelling");
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetChildProcessLabel(driver->securityManager,
|
|
|
|
vm->def, cmd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-02-01 20:20:22 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandSetOutputFD(cmd, &logfile);
|
|
|
|
virCommandSetErrorFD(cmd, &logfile);
|
|
|
|
virCommandNonblockingFDs(cmd);
|
2011-06-17 13:43:54 +00:00
|
|
|
virCommandSetPidFile(cmd, priv->pidfile);
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandDaemonize(cmd);
|
2010-10-26 14:04:46 +00:00
|
|
|
virCommandRequireHandshake(cmd);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityPreFork(driver->securityManager) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-10-20 12:26:46 +00:00
|
|
|
rv = virCommandRun(cmd, NULL);
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecurityPostFork(driver->securityManager);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-04-11 22:25:25 +00:00
|
|
|
/* wait for qemu process to show up */
|
2015-10-20 12:26:46 +00:00
|
|
|
if (rv == 0) {
|
2011-08-05 13:13:12 +00:00
|
|
|
if (virPidFileReadPath(priv->pidfile, &vm->pid) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Domain %s didn't show up"), vm->def->name);
|
2015-10-20 12:26:46 +00:00
|
|
|
rv = -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2016-10-06 14:54:41 +00:00
|
|
|
VIR_DEBUG("QEMU vm=%p name=%s running with pid=%lld",
|
|
|
|
vm, vm->def->name, (long long) vm->pid);
|
2013-10-31 11:28:46 +00:00
|
|
|
} else {
|
|
|
|
VIR_DEBUG("QEMU vm=%p name=%s failed to spawn",
|
|
|
|
vm, vm->def->name);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-07-12 09:45:16 +00:00
|
|
|
VIR_DEBUG("Writing early domain status to disk");
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-07-12 09:45:16 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Waiting for handshake from child");
|
|
|
|
if (virCommandHandshakeWait(cmd) < 0) {
|
2013-12-03 16:38:14 +00:00
|
|
|
/* Read errors from child that occurred between fork and exec. */
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuProcessReportLogError(logCtxt,
|
|
|
|
_("Process exited prior to exec"));
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
|
2013-07-22 14:21:15 +00:00
|
|
|
VIR_DEBUG("Setting up domain cgroup (if required)");
|
2015-01-16 11:25:50 +00:00
|
|
|
if (qemuSetupCgroup(driver, vm, nnicindexes, nicindexes) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-07-22 14:21:15 +00:00
|
|
|
|
2016-04-27 12:40:23 +00:00
|
|
|
if (!(priv->perf = virPerfNew()))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
for (i = 0; i < VIR_PERF_EVENT_LAST; i++) {
|
2016-06-28 12:37:29 +00:00
|
|
|
if (vm->def->perf.events[i] == VIR_TRISTATE_BOOL_YES &&
|
2016-04-27 12:58:32 +00:00
|
|
|
virPerfEventEnable(priv->perf, i, vm->pid) < 0)
|
|
|
|
goto cleanup;
|
2016-03-28 13:30:30 +00:00
|
|
|
}
|
2016-03-28 13:30:29 +00:00
|
|
|
|
2016-01-14 15:56:53 +00:00
|
|
|
/* This must be done after cgroup placement to avoid resetting CPU
|
|
|
|
* affinity */
|
|
|
|
if (!vm->def->cputune.emulatorpin &&
|
|
|
|
qemuProcessInitCpuAffinity(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-02-26 15:34:23 +00:00
|
|
|
VIR_DEBUG("Setting emulator tuning/settings");
|
|
|
|
if (qemuProcessSetupEmulator(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Setting domain security labels");
|
2017-01-05 13:19:21 +00:00
|
|
|
if (qemuSecuritySetAllLabel(driver,
|
|
|
|
vm,
|
|
|
|
incoming ? incoming->path : NULL) < 0)
|
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* Security manager labeled all devices, therefore
|
2015-11-10 15:58:41 +00:00
|
|
|
* if any operation from now on fails, we need to ask the caller to
|
|
|
|
* restore labels.
|
|
|
|
*/
|
|
|
|
ret = -2;
|
2012-06-11 13:57:19 +00:00
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if (incoming && incoming->fd != -1) {
|
2010-10-26 14:04:46 +00:00
|
|
|
/* if there's an fd to migrate from, and it's a pipe, put the
|
|
|
|
* proper security label on it
|
|
|
|
*/
|
|
|
|
struct stat stdin_sb;
|
|
|
|
|
|
|
|
VIR_DEBUG("setting security label on pipe used for migration");
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if (fstat(incoming->fd, &stdin_sb) < 0) {
|
2010-10-26 14:04:46 +00:00
|
|
|
virReportSystemError(errno,
|
2015-11-10 15:58:41 +00:00
|
|
|
_("cannot stat fd %d"), incoming->fd);
|
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
if (S_ISFIFO(stdin_sb.st_mode) &&
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecuritySetImageFDLabel(driver->securityManager,
|
|
|
|
vm->def, incoming->fd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Labelling done, completing handshake to child");
|
2014-11-13 14:25:30 +00:00
|
|
|
if (virCommandHandshakeNotify(cmd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Handshake complete, child running");
|
|
|
|
|
2015-10-20 12:26:46 +00:00
|
|
|
if (rv == -1) /* The VM failed to start; tear filters before taps */
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainConfVMNWFilterTeardown(vm);
|
|
|
|
|
2015-10-20 12:26:46 +00:00
|
|
|
if (rv == -1) /* The VM failed to start */
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Waiting for monitor to show up");
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessWaitForMonitor(driver, vm, asyncJob, logCtxt) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-11-16 13:43:01 +00:00
|
|
|
if (qemuConnectAgent(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2017-03-13 11:32:02 +00:00
|
|
|
VIR_DEBUG("Verifying and updating provided guest CPU");
|
2017-07-11 11:30:09 +00:00
|
|
|
if (qemuProcessUpdateAndVerifyCPU(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2014-07-08 07:59:49 +00:00
|
|
|
VIR_DEBUG("Setting up post-init cgroup restrictions");
|
2014-12-12 14:23:12 +00:00
|
|
|
if (qemuSetupCpusetMems(vm) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2014-07-08 07:59:49 +00:00
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
VIR_DEBUG("setting up hotpluggable cpus");
|
|
|
|
if (qemuDomainHasHotpluggableStartupVcpus(vm->def)) {
|
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessValidateHotpluggableVcpus(vm->def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessSetupHotpluggableVcpus(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-07-19 14:00:29 +00:00
|
|
|
VIR_DEBUG("Refreshing VCPU info");
|
2016-08-05 12:48:27 +00:00
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-08-01 05:35:50 +00:00
|
|
|
if (qemuDomainValidateVcpuInfo(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-04 12:23:25 +00:00
|
|
|
qemuDomainVcpuPersistOrder(vm->def);
|
|
|
|
|
2014-09-03 13:07:38 +00:00
|
|
|
VIR_DEBUG("Detecting IOThread PIDs");
|
|
|
|
if (qemuProcessDetectIOThreadPIDs(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2014-09-03 13:07:38 +00:00
|
|
|
|
2016-02-16 13:43:37 +00:00
|
|
|
VIR_DEBUG("Setting global CPU cgroup (if required)");
|
|
|
|
if (qemuSetupGlobalCpuCgroup(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-01-13 15:36:52 +00:00
|
|
|
VIR_DEBUG("Setting vCPU tuning/settings");
|
|
|
|
if (qemuProcessSetupVcpus(vm) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-07-21 02:10:31 +00:00
|
|
|
|
2016-01-14 09:38:02 +00:00
|
|
|
VIR_DEBUG("Setting IOThread tuning/settings");
|
|
|
|
if (qemuProcessSetupIOThreads(vm) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-01-08 14:37:50 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting any required VM passwords");
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuProcessInitPasswords(conn, driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
/* set default link states */
|
|
|
|
/* qemu doesn't support setting this on the command line, so
|
|
|
|
* enter the monitor */
|
|
|
|
VIR_DEBUG("Setting network link states");
|
2015-11-02 09:50:21 +00:00
|
|
|
if (qemuProcessSetLinkStates(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-09-06 08:23:47 +00:00
|
|
|
|
2013-07-19 13:08:29 +00:00
|
|
|
VIR_DEBUG("Fetching list of active devices");
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainUpdateDeviceList(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-07-19 13:08:29 +00:00
|
|
|
|
2015-01-19 12:21:09 +00:00
|
|
|
VIR_DEBUG("Updating info of memory devices");
|
|
|
|
if (qemuDomainUpdateMemoryDeviceInfo(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-01-19 12:21:09 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting initial memory amount");
|
2015-11-02 10:00:49 +00:00
|
|
|
if (qemuProcessSetupBalloon(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-06-30 14:31:24 +00:00
|
|
|
/* Since CPUs were not started yet, the balloon could not return the memory
|
2015-05-27 13:04:14 +00:00
|
|
|
* to the host and thus cur_balloon needs to be updated so that GetXMLdesc
|
|
|
|
* and friends return the correct size in case they can't grab the job */
|
2015-11-06 17:41:37 +00:00
|
|
|
if (!incoming && !snapshot &&
|
2015-09-23 12:19:06 +00:00
|
|
|
qemuProcessRefreshBalloonState(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-05-27 13:04:14 +00:00
|
|
|
|
2014-12-10 14:31:23 +00:00
|
|
|
VIR_DEBUG("Detecting actual memory size for video device");
|
|
|
|
if (qemuProcessUpdateVideoRamSize(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-05-23 12:00:35 +00:00
|
|
|
VIR_DEBUG("Updating disk data");
|
|
|
|
if (qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
|
2016-04-29 11:38:51 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_AUTODESTROY &&
|
|
|
|
qemuProcessAutoDestroyAdd(driver, vm, conn) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2016-05-02 17:31:47 +00:00
|
|
|
qemuDomainSecretDestroy(vm);
|
2015-11-10 15:58:41 +00:00
|
|
|
virCommandFree(cmd);
|
2017-04-03 08:24:36 +00:00
|
|
|
virObjectUnref(logCtxt);
|
2015-11-10 15:58:41 +00:00
|
|
|
virObjectUnref(cfg);
|
|
|
|
virObjectUnref(caps);
|
|
|
|
VIR_FREE(nicindexes);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 12:29:40 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessFinishStartup:
|
|
|
|
*
|
|
|
|
* Finish starting a new domain.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessFinishStartup(virConnectPtr conn,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
bool startCPUs,
|
|
|
|
virDomainPausedReason pausedReason)
|
|
|
|
{
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (startCPUs) {
|
|
|
|
VIR_DEBUG("Starting domain CPUs");
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_BOOTED,
|
|
|
|
asyncJob) < 0) {
|
|
|
|
if (!virGetLastError())
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("resume operation failed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, pausedReason);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Writing domain status to disk");
|
2016-02-04 12:32:45 +00:00
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
2015-11-10 12:29:40 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessStartHook(driver, vm,
|
|
|
|
VIR_HOOK_QEMU_OP_STARTED,
|
|
|
|
VIR_HOOK_SUBOP_BEGIN) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
int
|
|
|
|
qemuProcessStart(virConnectPtr conn,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2017-05-31 10:34:10 +00:00
|
|
|
virCPUDefPtr updatedCPU,
|
2015-11-10 15:58:41 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
const char *migrateFrom,
|
|
|
|
int migrateFd,
|
|
|
|
const char *migratePath,
|
|
|
|
virDomainSnapshotObjPtr snapshot,
|
|
|
|
virNetDevVPortProfileOp vmop,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
qemuProcessIncomingDefPtr incoming = NULL;
|
|
|
|
unsigned int stopFlags;
|
|
|
|
bool relabel = false;
|
|
|
|
int ret = -1;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
VIR_DEBUG("conn=%p driver=%p vm=%p name=%s id=%d asyncJob=%s "
|
|
|
|
"migrateFrom=%s migrateFd=%d migratePath=%s "
|
|
|
|
"snapshot=%p vmop=%d flags=0x%x",
|
|
|
|
conn, driver, vm, vm->def->name, vm->def->id,
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob),
|
|
|
|
NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath),
|
|
|
|
snapshot, vmop, flags);
|
|
|
|
|
|
|
|
virCheckFlagsGoto(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY, cleanup);
|
|
|
|
|
2016-05-27 11:45:05 +00:00
|
|
|
if (!migrateFrom && !snapshot)
|
|
|
|
flags |= VIR_QEMU_PROCESS_START_NEW;
|
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuProcessInit(driver, vm, updatedCPU,
|
|
|
|
asyncJob, !!migrateFrom, flags) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (migrateFrom) {
|
2016-01-07 23:07:37 +00:00
|
|
|
incoming = qemuProcessIncomingDefNew(priv->qemuCaps, NULL, migrateFrom,
|
2015-11-10 15:58:41 +00:00
|
|
|
migrateFd, migratePath);
|
|
|
|
if (!incoming)
|
|
|
|
goto stop;
|
|
|
|
}
|
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
if (qemuProcessPrepareDomain(conn, driver, vm, flags) < 0)
|
|
|
|
goto stop;
|
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
if (qemuProcessPrepareHost(driver, vm, !!incoming) < 0)
|
|
|
|
goto stop;
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if ((rv = qemuProcessLaunch(conn, driver, vm, asyncJob, incoming,
|
|
|
|
snapshot, vmop, flags)) < 0) {
|
2016-03-14 15:54:03 +00:00
|
|
|
if (rv == -2)
|
2015-11-10 15:58:41 +00:00
|
|
|
relabel = true;
|
|
|
|
goto stop;
|
|
|
|
}
|
|
|
|
relabel = true;
|
2014-12-10 14:31:23 +00:00
|
|
|
|
2015-11-11 17:02:23 +00:00
|
|
|
if (incoming &&
|
|
|
|
incoming->deferredURI &&
|
|
|
|
qemuMigrationRunIncoming(driver, vm, incoming->deferredURI, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto stop;
|
2015-11-11 17:02:23 +00:00
|
|
|
|
2015-11-10 12:29:40 +00:00
|
|
|
if (qemuProcessFinishStartup(conn, driver, vm, asyncJob,
|
|
|
|
!(flags & VIR_QEMU_PROCESS_START_PAUSED),
|
|
|
|
incoming ?
|
|
|
|
VIR_DOMAIN_PAUSED_MIGRATION :
|
|
|
|
VIR_DOMAIN_PAUSED_USER) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto stop;
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2014-09-05 23:16:20 +00:00
|
|
|
/* Keep watching qemu log for errors during incoming migration, otherwise
|
|
|
|
* unset reporting errors from qemu log. */
|
2015-11-06 17:41:37 +00:00
|
|
|
if (!incoming)
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
|
qemu: Wire up better early error reporting
The previous patches added infrastructure to report better errors from
monitor in some cases. This patch finalizes this "feature" by enabling
this enhanced error reporting on early phases of VM startup. In these
phases the possibility of qemu producing a useful error message is
really high compared to running it during the whole life cycle. After
the start up is complete, the feature is disabled to provide the usual
error messages so that users are not confused by possibly irrelevant
messages that may be in the domain log.
The original motivation to do this enhancement is to capture errors when
using VFIO device passthrough, where qemu reports errors after the
monitor is initialized and the existing error catching code couldn't
catch this producing a unhelpful message:
# virsh start test
error: Failed to start domain test
error: Unable to read from monitor: Connection reset by peer
With this change, the message is changed to:
# virsh start test
error: Failed to start domain test
error: internal error: early end of file from monitor: possible problem:
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: vfio: error, group 8 is not viable, please ensure all devices within the iommu_group are bound to their vfio bus driver.
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: vfio: failed to get group 8
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: Device 'vfio-pci' could not be initialized
2013-09-18 14:23:14 +00:00
|
|
|
|
2015-10-20 12:30:52 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2015-11-06 17:41:37 +00:00
|
|
|
qemuProcessIncomingDefFree(incoming);
|
2015-10-20 12:30:52 +00:00
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
stop:
|
|
|
|
stopFlags = 0;
|
|
|
|
if (!relabel)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
|
|
|
if (migrateFrom)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2015-11-12 11:01:07 +00:00
|
|
|
if (priv->mon)
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, asyncJob, stopFlags);
|
2015-10-20 12:30:52 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-22 12:17:27 +00:00
|
|
|
virCommandPtr
|
|
|
|
qemuProcessCreatePretendCmd(virConnectPtr conn,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *migrateURI,
|
2016-04-25 05:47:59 +00:00
|
|
|
bool enableFips,
|
2016-03-22 12:17:27 +00:00
|
|
|
bool standalone,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCommandPtr cmd = NULL;
|
|
|
|
|
|
|
|
virCheckFlagsGoto(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY, cleanup);
|
|
|
|
|
|
|
|
flags |= VIR_QEMU_PROCESS_START_PRETEND;
|
2016-05-27 11:45:05 +00:00
|
|
|
flags |= VIR_QEMU_PROCESS_START_NEW;
|
2016-03-22 12:17:27 +00:00
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE,
|
|
|
|
!!migrateURI, flags) < 0)
|
2016-03-22 12:17:27 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessPrepareDomain(conn, driver, vm, flags) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Building emulator command line");
|
2016-04-06 14:41:33 +00:00
|
|
|
cmd = qemuBuildCommandLine(driver,
|
2016-03-22 12:17:27 +00:00
|
|
|
NULL,
|
|
|
|
vm->def,
|
|
|
|
priv->monConfig,
|
|
|
|
priv->monJSON,
|
|
|
|
priv->qemuCaps,
|
|
|
|
migrateURI,
|
|
|
|
NULL,
|
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_NO_OP,
|
|
|
|
standalone,
|
2016-04-25 05:47:59 +00:00
|
|
|
enableFips,
|
2016-03-22 12:17:27 +00:00
|
|
|
priv->autoNodeset,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
2017-05-29 12:11:25 +00:00
|
|
|
priv->libDir,
|
|
|
|
priv->chardevStdioLogd);
|
2016-03-22 12:17:27 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
int
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuProcessKill(virDomainObjPtr vm, unsigned int flags)
|
2011-04-21 15:19:06 +00:00
|
|
|
{
|
2012-09-26 14:42:58 +00:00
|
|
|
int ret;
|
qemu: new GRACEFUL flag for virDomainDestroy w/ QEMU support
When libvirt's virDomainDestroy API is shutting down the qemu process,
it first sends SIGTERM, then waits for 1.6 seconds and, if it sees the
process still there, sends a SIGKILL.
There have been reports that this behavior can lead to data loss
because the guest running in qemu doesn't have time to flush its disk
cache buffers before it's unceremoniously whacked.
This patch maintains that default behavior, but provides a new flag
VIR_DOMAIN_DESTROY_GRACEFUL to alter the behavior. If this flag is set
in the call to virDomainDestroyFlags, SIGKILL will never be sent to
the qemu process; instead, if the timeout is reached and the qemu
process still exists, virDomainDestroy will return an error.
Once this patch is in, the recommended method for applications to call
virDomainDestroyFlags will be with VIR_DOMAIN_DESTROY_GRACEFUL
included. If that fails, then the application can decide if and when
to call virDomainDestroyFlags again without
VIR_DOMAIN_DESTROY_GRACEFUL (to force the issue with SIGKILL).
(Note that this does not address the issue of existing applications
that have not yet been modified to use VIR_DOMAIN_DESTROY_GRACEFUL.
That is a separate patch.)
2012-01-27 18:28:23 +00:00
|
|
|
|
2016-10-06 14:54:41 +00:00
|
|
|
VIR_DEBUG("vm=%p name=%s pid=%lld flags=%x",
|
2013-10-31 11:28:46 +00:00
|
|
|
vm, vm->def->name,
|
2016-10-06 14:54:41 +00:00
|
|
|
(long long) vm->pid, flags);
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2012-03-30 06:21:49 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_KILL_NOCHECK)) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("VM '%s' not active", vm->def->name);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
2013-05-17 14:22:46 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_KILL_NOWAIT) {
|
2012-09-26 14:42:58 +00:00
|
|
|
virProcessKill(vm->pid,
|
|
|
|
(flags & VIR_QEMU_PROCESS_KILL_FORCE) ?
|
|
|
|
SIGKILL : SIGTERM);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2012-09-26 14:42:58 +00:00
|
|
|
ret = virProcessKillPainfully(vm->pid,
|
|
|
|
!!(flags & VIR_QEMU_PROCESS_KILL_FORCE));
|
2011-09-13 16:11:26 +00:00
|
|
|
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
return ret;
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-11 14:13:09 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessBeginStopJob:
|
|
|
|
*
|
|
|
|
* Stop all current jobs by killing the domain and start a new one for
|
|
|
|
* qemuProcessStop.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessBeginStopJob(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainJob job,
|
|
|
|
bool forceKill)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
unsigned int killFlags = forceKill ? VIR_QEMU_PROCESS_KILL_FORCE : 0;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* We need to prevent monitor EOF callback from doing our work (and
|
|
|
|
* sending misleading events) while the vm is unlocked inside
|
|
|
|
* BeginJob/ProcessKill API
|
|
|
|
*/
|
|
|
|
priv->beingDestroyed = true;
|
|
|
|
|
|
|
|
if (qemuProcessKill(vm, killFlags) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Wake up anything waiting on domain condition */
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, job) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
priv->beingDestroyed = false;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuProcessStop(virQEMUDriverPtr driver,
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-06-11 13:20:44 +00:00
|
|
|
virDomainShutoffReason reason,
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2012-06-11 13:20:44 +00:00
|
|
|
unsigned int flags)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int retries = 0;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virErrorPtr orig_err;
|
|
|
|
virDomainDefPtr def;
|
2012-02-10 21:09:00 +00:00
|
|
|
virNetDevVPortProfilePtr vport = NULL;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
char *timestamp;
|
2013-01-10 21:03:14 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-10-06 14:54:41 +00:00
|
|
|
VIR_DEBUG("Shutting down vm=%p name=%s id=%d pid=%lld, "
|
2016-02-11 10:20:28 +00:00
|
|
|
"reason=%s, asyncJob=%s, flags=%x",
|
2013-10-31 11:28:46 +00:00
|
|
|
vm, vm->def->name, vm->def->id,
|
2016-10-06 14:54:41 +00:00
|
|
|
(long long) vm->pid,
|
2016-02-11 10:20:28 +00:00
|
|
|
virDomainShutoffReasonTypeToString(reason),
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob),
|
|
|
|
flags);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-01-07 13:31:13 +00:00
|
|
|
/* This method is routinely used in clean up paths. Disable error
|
|
|
|
* reporting so we don't squash a legit error. */
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2016-02-11 10:20:28 +00:00
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
|
|
|
|
if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE &&
|
|
|
|
priv->job.asyncOwner == virThreadSelfID() &&
|
|
|
|
priv->job.active != QEMU_JOB_ASYNC_NESTED) {
|
|
|
|
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("VM '%s' not active", vm->def->name);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2017-06-07 12:47:37 +00:00
|
|
|
qemuProcessBuildDestroyHugepagesPath(driver, vm, NULL, false);
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2012-03-30 06:21:49 +00:00
|
|
|
vm->def->id = -1;
|
|
|
|
|
2013-02-19 13:57:46 +00:00
|
|
|
if (virAtomicIntDecAndTest(&driver->nactive) && driver->inhibitCallback)
|
2012-10-31 19:03:55 +00:00
|
|
|
driver->inhibitCallback(false, driver->inhibitOpaque);
|
|
|
|
|
2015-05-14 12:28:12 +00:00
|
|
|
/* Wake up anything waiting on domain condition */
|
|
|
|
virDomainObjBroadcast(vm);
|
2015-04-16 09:24:21 +00:00
|
|
|
|
2016-06-07 14:31:15 +00:00
|
|
|
if ((timestamp = virTimeStringNow()) != NULL) {
|
2016-09-19 08:17:57 +00:00
|
|
|
qemuDomainLogAppendMessage(driver, vm, "%s: shutting down, reason=%s\n",
|
|
|
|
timestamp,
|
|
|
|
virDomainShutoffReasonTypeToString(reason));
|
2016-06-07 14:31:15 +00:00
|
|
|
VIR_FREE(timestamp);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2014-11-18 23:55:48 +00:00
|
|
|
/* Clear network bandwidth */
|
|
|
|
virDomainClearNetBandwidth(vm);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainConfVMNWFilterTeardown(vm);
|
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
if (cfg->macFilter) {
|
2011-02-14 16:09:39 +00:00
|
|
|
def = vm->def;
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
|
|
|
if (net->ifname == NULL)
|
|
|
|
continue;
|
2014-03-07 17:34:54 +00:00
|
|
|
ignore_value(ebtablesRemoveForwardAllowIn(driver->ebtables,
|
|
|
|
net->ifname,
|
|
|
|
&net->mac));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-31 08:16:22 +00:00
|
|
|
virPortAllocatorRelease(driver->migrationPorts, priv->nbdPort);
|
2013-07-04 19:16:57 +00:00
|
|
|
priv->nbdPort = 0;
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
if (priv->agent) {
|
|
|
|
qemuAgentClose(priv->agent);
|
|
|
|
priv->agent = NULL;
|
|
|
|
}
|
2016-11-16 13:43:03 +00:00
|
|
|
priv->agentError = false;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
Fix (rare) deadlock in QEMU monitor callbacks
Some users report (very rarely) seeing a deadlock in the QEMU
monitor callbacks
Thread 10 (Thread 0x7fcd11e20700 (LWP 26753)):
#0 0x00000030d0e0de4d in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00000030d0e09ca6 in _L_lock_840 () from /lib64/libpthread.so.0
#2 0x00000030d0e09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3 0x00007fcd162f416d in virMutexLock (m=<optimized out>)
at util/threads-pthread.c:85
#4 0x00007fcd1632c651 in virDomainObjLock (obj=<optimized out>)
at conf/domain_conf.c:14256
#5 0x00007fcd0daf05cc in qemuProcessHandleMonitorDestroy (mon=0x7fcccc0029e0,
vm=0x7fcccc00a850) at qemu/qemu_process.c:1026
#6 0x00007fcd0db01710 in qemuMonitorDispose (obj=0x7fcccc0029e0)
at qemu/qemu_monitor.c:249
#7 0x00007fcd162fd4e3 in virObjectUnref (anyobj=<optimized out>)
at util/virobject.c:139
#8 0x00007fcd0db027a9 in qemuMonitorClose (mon=<optimized out>)
at qemu/qemu_monitor.c:860
#9 0x00007fcd0daf61ad in qemuProcessStop (driver=driver@entry=0x7fcd04079d50,
vm=vm@entry=0x7fcccc00a850,
reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED, flags=flags@entry=0)
at qemu/qemu_process.c:4057
#10 0x00007fcd0db323cf in qemuDomainDestroyFlags (dom=<optimized out>,
flags=<optimized out>) at qemu/qemu_driver.c:1977
#11 0x00007fcd1637ff51 in virDomainDestroyFlags (
domain=domain@entry=0x7fccf00c1830, flags=1) at libvirt.c:2256
At frame #10 we are holding the domain lock, we call into
qemuProcessStop() to cleanup QEMU, which triggers the monitor
to close, which invokes qemuProcessHandleMonitorDestroy() which
tries to obtain the domain lock again. This is a non-recursive
lock, hence hang.
Since qemuMonitorPtr is a virObject, the unref call in
qemuProcessHandleMonitorDestroy no longer needs mutex
protection. The assignment of priv->mon = NULL, can be
instead done by the caller of qemuMonitorClose(), thus
removing all need for locking.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:54:58 +00:00
|
|
|
if (priv->mon) {
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuMonitorClose(priv->mon);
|
Fix (rare) deadlock in QEMU monitor callbacks
Some users report (very rarely) seeing a deadlock in the QEMU
monitor callbacks
Thread 10 (Thread 0x7fcd11e20700 (LWP 26753)):
#0 0x00000030d0e0de4d in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00000030d0e09ca6 in _L_lock_840 () from /lib64/libpthread.so.0
#2 0x00000030d0e09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3 0x00007fcd162f416d in virMutexLock (m=<optimized out>)
at util/threads-pthread.c:85
#4 0x00007fcd1632c651 in virDomainObjLock (obj=<optimized out>)
at conf/domain_conf.c:14256
#5 0x00007fcd0daf05cc in qemuProcessHandleMonitorDestroy (mon=0x7fcccc0029e0,
vm=0x7fcccc00a850) at qemu/qemu_process.c:1026
#6 0x00007fcd0db01710 in qemuMonitorDispose (obj=0x7fcccc0029e0)
at qemu/qemu_monitor.c:249
#7 0x00007fcd162fd4e3 in virObjectUnref (anyobj=<optimized out>)
at util/virobject.c:139
#8 0x00007fcd0db027a9 in qemuMonitorClose (mon=<optimized out>)
at qemu/qemu_monitor.c:860
#9 0x00007fcd0daf61ad in qemuProcessStop (driver=driver@entry=0x7fcd04079d50,
vm=vm@entry=0x7fcccc00a850,
reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED, flags=flags@entry=0)
at qemu/qemu_process.c:4057
#10 0x00007fcd0db323cf in qemuDomainDestroyFlags (dom=<optimized out>,
flags=<optimized out>) at qemu/qemu_driver.c:1977
#11 0x00007fcd1637ff51 in virDomainDestroyFlags (
domain=domain@entry=0x7fccf00c1830, flags=1) at libvirt.c:2256
At frame #10 we are holding the domain lock, we call into
qemuProcessStop() to cleanup QEMU, which triggers the monitor
to close, which invokes qemuProcessHandleMonitorDestroy() which
tries to obtain the domain lock again. This is a non-recursive
lock, hence hang.
Since qemuMonitorPtr is a virObject, the unref call in
qemuProcessHandleMonitorDestroy no longer needs mutex
protection. The assignment of priv->mon = NULL, can be
instead done by the caller of qemuMonitorClose(), thus
removing all need for locking.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:54:58 +00:00
|
|
|
priv->mon = NULL;
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (priv->monConfig) {
|
|
|
|
if (priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX)
|
|
|
|
unlink(priv->monConfig->data.nix.path);
|
|
|
|
virDomainChrSourceDefFree(priv->monConfig);
|
|
|
|
priv->monConfig = NULL;
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
/* Remove the master key */
|
|
|
|
qemuDomainMasterKeyRemove(priv);
|
|
|
|
|
2016-02-26 08:15:55 +00:00
|
|
|
virFileDeleteTree(priv->libDir);
|
|
|
|
virFileDeleteTree(priv->channelTargetDir);
|
2015-08-07 12:42:31 +00:00
|
|
|
|
2016-04-03 19:51:29 +00:00
|
|
|
qemuDomainClearPrivatePaths(vm);
|
|
|
|
|
2014-05-13 06:54:20 +00:00
|
|
|
ignore_value(virDomainChrDefForeach(vm->def,
|
|
|
|
false,
|
|
|
|
qemuProcessCleanupChardevDevice,
|
|
|
|
NULL));
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* shut it off for sure */
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuProcessKill(vm,
|
|
|
|
VIR_QEMU_PROCESS_KILL_FORCE|
|
|
|
|
VIR_QEMU_PROCESS_KILL_NOCHECK));
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-03-16 06:52:26 +00:00
|
|
|
qemuDomainCleanupRun(driver, vm);
|
|
|
|
|
2011-06-23 09:37:57 +00:00
|
|
|
/* Stop autodestroy in case guest is restarted */
|
2013-02-28 16:43:43 +00:00
|
|
|
qemuProcessAutoDestroyRemove(driver, vm);
|
2011-06-23 09:37:57 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* now that we know it's stopped call the hook if present */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2012-10-08 09:58:05 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* we can't stop the operation even if the script raised an error */
|
2015-09-23 22:13:57 +00:00
|
|
|
ignore_value(virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_STOPPED, VIR_HOOK_SUBOP_END,
|
|
|
|
NULL, xml, NULL));
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
}
|
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* Reset Security Labels unless caller don't want us to */
|
|
|
|
if (!(flags & VIR_QEMU_PROCESS_STOP_NO_RELABEL))
|
2016-11-23 10:52:57 +00:00
|
|
|
qemuSecurityRestoreAllLabel(driver, vm,
|
|
|
|
!!(flags & VIR_QEMU_PROCESS_STOP_MIGRATED));
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecurityReleaseLabel(driver->securityManager, vm->def);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-02 14:37:07 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2013-05-03 18:07:37 +00:00
|
|
|
virDomainDeviceDef dev;
|
2013-01-02 14:37:07 +00:00
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
2013-05-03 18:07:37 +00:00
|
|
|
|
|
|
|
dev.type = VIR_DOMAIN_DEVICE_DISK;
|
|
|
|
dev.data.disk = disk;
|
|
|
|
ignore_value(qemuRemoveSharedDevice(driver, &dev, vm->def->name));
|
2013-01-02 14:37:07 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Clear out dynamically assigned labels */
|
2012-08-15 22:10:37 +00:00
|
|
|
for (i = 0; i < vm->def->nseclabels; i++) {
|
2014-11-13 14:25:30 +00:00
|
|
|
if (vm->def->seclabels[i]->type == VIR_DOMAIN_SECLABEL_DYNAMIC)
|
2012-08-15 22:10:37 +00:00
|
|
|
VIR_FREE(vm->def->seclabels[i]->label);
|
|
|
|
VIR_FREE(vm->def->seclabels[i]->imagelabel);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2016-11-25 08:18:35 +00:00
|
|
|
virStringListFree(priv->qemuDevices);
|
2013-07-19 13:08:29 +00:00
|
|
|
priv->qemuDevices = NULL;
|
|
|
|
|
2015-10-20 12:12:48 +00:00
|
|
|
qemuHostdevReAttachDomainDevices(driver, vm->def);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
def = vm->def;
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
2014-03-11 07:17:26 +00:00
|
|
|
vport = virDomainNetGetActualVirtPortProfile(net);
|
|
|
|
|
|
|
|
switch (virDomainNetGetActualType(net)) {
|
|
|
|
case VIR_DOMAIN_NET_TYPE_DIRECT:
|
2011-11-02 17:19:48 +00:00
|
|
|
ignore_value(virNetDevMacVLanDeleteWithVPortProfile(
|
2012-07-17 12:07:59 +00:00
|
|
|
net->ifname, &net->mac,
|
2011-11-02 17:19:48 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
|
|
|
virDomainNetGetActualDirectMode(net),
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->stateDir));
|
2014-03-11 07:17:26 +00:00
|
|
|
break;
|
2016-03-23 11:37:59 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_ETHERNET:
|
|
|
|
if (net->ifname) {
|
|
|
|
ignore_value(virNetDevTapDelete(net->ifname, net->backend.tap));
|
|
|
|
VIR_FREE(net->ifname);
|
|
|
|
}
|
|
|
|
break;
|
2014-03-11 07:17:26 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_BRIDGE:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_NETWORK:
|
|
|
|
#ifdef VIR_NETDEV_TAP_REQUIRE_MANUAL_CLEANUP
|
|
|
|
if (!(vport && vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH))
|
2014-09-11 15:15:24 +00:00
|
|
|
ignore_value(virNetDevTapDelete(net->ifname, net->backend.tap));
|
2014-03-11 07:17:26 +00:00
|
|
|
#endif
|
|
|
|
break;
|
2016-09-23 15:04:53 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_USER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_VHOSTUSER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_SERVER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_CLIENT:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_MCAST:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_INTERNAL:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_HOSTDEV:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_UDP:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_LAST:
|
|
|
|
/* No special cleanup procedure for these types. */
|
|
|
|
break;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-07-04 06:27:12 +00:00
|
|
|
/* release the physical device (or any other resources used by
|
|
|
|
* this interface in the network driver
|
|
|
|
*/
|
2015-02-23 20:54:56 +00:00
|
|
|
if (vport) {
|
|
|
|
if (vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_MIDONET) {
|
|
|
|
ignore_value(virNetDevMidonetUnbindPort(vport));
|
|
|
|
} else if (vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH) {
|
|
|
|
ignore_value(virNetDevOpenvswitchRemovePort(
|
|
|
|
virDomainNetGetActualBridgeName(net),
|
|
|
|
net->ifname));
|
|
|
|
}
|
|
|
|
}
|
2012-02-10 21:09:00 +00:00
|
|
|
|
2013-08-27 17:06:18 +00:00
|
|
|
/* kick the device out of the hostdev list too */
|
|
|
|
virDomainNetRemoveHostdev(def, net);
|
2014-01-31 15:48:06 +00:00
|
|
|
networkReleaseActualDevice(vm->def, net);
|
2011-07-04 06:27:12 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
retry:
|
2016-02-01 15:50:54 +00:00
|
|
|
if ((ret = qemuRemoveCgroup(vm)) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
if (ret == -EBUSY && (retries++ < 5)) {
|
|
|
|
usleep(200*1000);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
VIR_WARN("Failed to remove cgroup for %s",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2013-03-21 14:40:29 +00:00
|
|
|
virCgroupFree(&priv->cgroup);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-03-28 13:30:29 +00:00
|
|
|
virPerfFree(priv->perf);
|
2016-04-07 10:50:15 +00:00
|
|
|
priv->perf = NULL;
|
2016-03-28 13:30:29 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuProcessRemoveDomainStatus(driver, vm);
|
|
|
|
|
2012-11-10 01:40:23 +00:00
|
|
|
/* Remove VNC and Spice ports from port reservation bitmap, but only if
|
|
|
|
they were reserved by the driver (autoport=yes)
|
2011-02-14 16:09:39 +00:00
|
|
|
*/
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; ++i) {
|
2012-11-10 01:40:23 +00:00
|
|
|
virDomainGraphicsDefPtr graphics = vm->def->graphics[i];
|
2013-04-30 14:26:43 +00:00
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
|
|
|
if (graphics->data.vnc.autoport) {
|
2013-07-04 19:16:57 +00:00
|
|
|
virPortAllocatorRelease(driver->remotePorts,
|
|
|
|
graphics->data.vnc.port);
|
2014-09-03 19:32:36 +00:00
|
|
|
} else if (graphics->data.vnc.portReserved) {
|
2014-06-24 11:34:18 +00:00
|
|
|
virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.spice.port,
|
|
|
|
false);
|
|
|
|
graphics->data.vnc.portReserved = false;
|
|
|
|
}
|
2016-11-22 11:09:32 +00:00
|
|
|
if (graphics->data.vnc.websocketGenerated) {
|
|
|
|
virPortAllocatorRelease(driver->webSocketPorts,
|
|
|
|
graphics->data.vnc.websocket);
|
|
|
|
graphics->data.vnc.websocketGenerated = false;
|
|
|
|
graphics->data.vnc.websocket = -1;
|
|
|
|
} else if (graphics->data.vnc.websocket) {
|
|
|
|
virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.vnc.websocket,
|
|
|
|
false);
|
|
|
|
}
|
2012-11-10 01:40:23 +00:00
|
|
|
}
|
2014-06-24 11:34:18 +00:00
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
if (graphics->data.spice.autoport) {
|
|
|
|
virPortAllocatorRelease(driver->remotePorts,
|
|
|
|
graphics->data.spice.port);
|
|
|
|
virPortAllocatorRelease(driver->remotePorts,
|
|
|
|
graphics->data.spice.tlsPort);
|
|
|
|
} else {
|
|
|
|
if (graphics->data.spice.portReserved) {
|
|
|
|
virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.spice.port,
|
|
|
|
false);
|
|
|
|
graphics->data.spice.portReserved = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (graphics->data.spice.tlsPortReserved) {
|
|
|
|
virPortAllocatorSetUsed(driver->remotePorts,
|
|
|
|
graphics->data.spice.tlsPort,
|
|
|
|
false);
|
|
|
|
graphics->data.spice.tlsPortReserved = false;
|
|
|
|
}
|
|
|
|
}
|
2012-11-10 01:40:23 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2017-07-21 13:51:03 +00:00
|
|
|
VIR_FREE(priv->machineName);
|
|
|
|
|
2011-05-04 10:59:20 +00:00
|
|
|
vm->taint = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
vm->pid = -1;
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, reason);
|
2015-04-10 13:21:23 +00:00
|
|
|
for (i = 0; i < vm->def->niothreadids; i++)
|
|
|
|
vm->def->iothreadids[i]->thread_id = 0;
|
2013-02-01 13:48:58 +00:00
|
|
|
virObjectUnref(priv->qemuCaps);
|
|
|
|
priv->qemuCaps = NULL;
|
2011-06-17 13:43:54 +00:00
|
|
|
VIR_FREE(priv->pidfile);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-04-25 13:17:34 +00:00
|
|
|
/* remove automatic pinning data */
|
|
|
|
virBitmapFree(priv->autoNodeset);
|
|
|
|
priv->autoNodeset = NULL;
|
|
|
|
virBitmapFree(priv->autoCpuset);
|
|
|
|
priv->autoCpuset = NULL;
|
|
|
|
|
2017-04-26 07:57:39 +00:00
|
|
|
/* remove address data */
|
2017-04-26 08:27:49 +00:00
|
|
|
virDomainPCIAddressSetFree(priv->pciaddrs);
|
|
|
|
priv->pciaddrs = NULL;
|
2017-04-26 07:57:39 +00:00
|
|
|
virDomainUSBAddressSetFree(priv->usbaddrs);
|
|
|
|
priv->usbaddrs = NULL;
|
|
|
|
|
2017-04-26 10:46:03 +00:00
|
|
|
/* clean up migration data */
|
|
|
|
VIR_FREE(priv->migTLSAlias);
|
2017-05-31 10:34:10 +00:00
|
|
|
virCPUDefFree(priv->origCPU);
|
|
|
|
priv->origCPU = NULL;
|
2017-04-26 10:46:03 +00:00
|
|
|
|
2017-05-22 11:36:55 +00:00
|
|
|
/* clear previously used namespaces */
|
|
|
|
virBitmapFree(priv->namespaces);
|
|
|
|
priv->namespaces = NULL;
|
|
|
|
|
2011-03-23 20:50:29 +00:00
|
|
|
/* The "release" hook cleans up additional resources */
|
2011-03-22 13:12:36 +00:00
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2012-10-08 09:58:05 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0);
|
2011-03-22 13:12:36 +00:00
|
|
|
|
|
|
|
/* we can't stop the operation even if the script raised an error */
|
|
|
|
virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
2012-02-27 16:06:22 +00:00
|
|
|
VIR_HOOK_QEMU_OP_RELEASE, VIR_HOOK_SUBOP_END,
|
|
|
|
NULL, xml, NULL);
|
2011-03-22 13:12:36 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
}
|
|
|
|
|
2016-09-08 13:16:58 +00:00
|
|
|
virDomainObjRemoveTransientDef(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 10:20:28 +00:00
|
|
|
endjob:
|
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_NONE)
|
|
|
|
qemuDomainObjEndJob(driver, vm);
|
|
|
|
|
|
|
|
cleanup:
|
2011-02-14 16:09:39 +00:00
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-06-23 09:37:57 +00:00
|
|
|
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
int qemuProcessAttach(virConnectPtr conn ATTRIBUTE_UNUSED,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
pid_t pid,
|
|
|
|
const char *pidfile,
|
|
|
|
virDomainChrSourceDefPtr monConfig,
|
|
|
|
bool monJSON)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
qemuDomainLogContextPtr logCtxt = NULL;
|
|
|
|
char *timestamp;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
bool running = true;
|
|
|
|
virDomainPausedReason reason;
|
|
|
|
virSecurityLabelPtr seclabel = NULL;
|
|
|
|
virSecurityLabelDefPtr seclabeldef = NULL;
|
|
|
|
bool seclabelgen = false;
|
|
|
|
virSecurityManagerPtr* sec_managers = NULL;
|
|
|
|
const char *model;
|
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
virCapsPtr caps = NULL;
|
|
|
|
bool active = false;
|
|
|
|
|
|
|
|
VIR_DEBUG("Beginning VM attach process");
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("VM is already active"));
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* Do this upfront, so any part of the startup process can add
|
|
|
|
* runtime state to vm->def that won't be persisted. This let's us
|
|
|
|
* report implicit runtime defaults in the XML, like vnc listen/socket
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Setting current domain def as transient");
|
|
|
|
if (virDomainObjSetDefTransient(caps, driver->xmlopt, vm) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
vm->def->id = qemuDriverAllocateID(driver);
|
|
|
|
|
|
|
|
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
|
|
|
|
driver->inhibitCallback(true, driver->inhibitOpaque);
|
|
|
|
active = true;
|
|
|
|
|
|
|
|
if (virFileMakePath(cfg->logDir) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot create log directory %s"),
|
|
|
|
cfg->logDir);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(priv->pidfile);
|
|
|
|
if (VIR_STRDUP(priv->pidfile, pidfile) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
vm->pid = pid;
|
|
|
|
|
|
|
|
VIR_DEBUG("Detect security driver config");
|
|
|
|
sec_managers = qemuSecurityGetNested(driver->securityManager);
|
|
|
|
if (sec_managers == NULL)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
for (i = 0; sec_managers[i]; i++) {
|
|
|
|
seclabelgen = false;
|
|
|
|
model = qemuSecurityGetModel(sec_managers[i]);
|
|
|
|
seclabeldef = virDomainDefGetSecurityLabelDef(vm->def, model);
|
|
|
|
if (seclabeldef == NULL) {
|
|
|
|
if (!(seclabeldef = virSecurityLabelDefNew(model)))
|
|
|
|
goto error;
|
|
|
|
seclabelgen = true;
|
|
|
|
}
|
|
|
|
seclabeldef->type = VIR_DOMAIN_SECLABEL_STATIC;
|
|
|
|
if (VIR_ALLOC(seclabel) < 0)
|
|
|
|
goto error;
|
|
|
|
if (qemuSecurityGetProcessLabel(sec_managers[i], vm->def,
|
|
|
|
vm->pid, seclabel) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (VIR_STRDUP(seclabeldef->model, model) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (VIR_STRDUP(seclabeldef->label, seclabel->label) < 0)
|
|
|
|
goto error;
|
|
|
|
VIR_FREE(seclabel);
|
|
|
|
|
|
|
|
if (seclabelgen) {
|
|
|
|
if (VIR_APPEND_ELEMENT(vm->def->seclabels, vm->def->nseclabels, seclabeldef) < 0)
|
|
|
|
goto error;
|
|
|
|
seclabelgen = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
|
|
|
|
goto error;
|
|
|
|
if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (qemuDomainPerfRestart(vm) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
VIR_DEBUG("Creating domain log file");
|
|
|
|
if (!(logCtxt = qemuDomainLogContextNew(driver, vm,
|
|
|
|
QEMU_DOMAIN_LOG_CONTEXT_MODE_ATTACH)))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
VIR_DEBUG("Determining emulator version");
|
|
|
|
virObjectUnref(priv->qemuCaps);
|
|
|
|
if (!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
|
|
|
|
driver->qemuCapsCache,
|
|
|
|
vm->def->emulator,
|
|
|
|
vm->def->os.machine)))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
VIR_DEBUG("Preparing monitor state");
|
|
|
|
priv->monConfig = monConfig;
|
|
|
|
monConfig = NULL;
|
|
|
|
priv->monJSON = monJSON;
|
|
|
|
|
|
|
|
priv->gotShutdown = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally PCI addresses are assigned in the virDomainCreate
|
|
|
|
* or virDomainDefine methods. We might still need to assign
|
|
|
|
* some here to cope with the question of upgrades. Regardless
|
|
|
|
* we also need to populate the PCI address set cache for later
|
|
|
|
* use in hotplug
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Assigning domain PCI addresses");
|
|
|
|
if ((qemuDomainAssignAddresses(vm->def, priv->qemuCaps,
|
|
|
|
driver, vm, false)) < 0) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((timestamp = virTimeStringNow()) == NULL)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
qemuDomainLogContextWrite(logCtxt, "%s: attaching\n", timestamp);
|
|
|
|
VIR_FREE(timestamp);
|
|
|
|
|
|
|
|
qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_EXTERNAL_LAUNCH, logCtxt);
|
|
|
|
|
|
|
|
VIR_DEBUG("Waiting for monitor to show up");
|
|
|
|
if (qemuProcessWaitForMonitor(driver, vm, QEMU_ASYNC_JOB_NONE, NULL) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (qemuConnectAgent(driver, vm) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
VIR_DEBUG("Detecting VCPU PIDs");
|
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (qemuDomainValidateVcpuInfo(vm) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
VIR_DEBUG("Detecting IOThread PIDs");
|
|
|
|
if (qemuProcessDetectIOThreadPIDs(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
VIR_DEBUG("Getting initial memory amount");
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
if (qemuMonitorGetBalloonInfo(priv->mon, &vm->def->mem.cur_balloon) < 0)
|
|
|
|
goto exit_monitor;
|
|
|
|
if (qemuMonitorGetStatus(priv->mon, &running, &reason) < 0)
|
|
|
|
goto exit_monitor;
|
|
|
|
if (qemuMonitorGetVirtType(priv->mon, &vm->def->virtType) < 0)
|
|
|
|
goto exit_monitor;
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (running) {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED);
|
|
|
|
if (vm->def->memballoon &&
|
|
|
|
vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO &&
|
|
|
|
vm->def->memballoon->period) {
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
qemuMonitorSetMemoryStatsPeriod(priv->mon, vm->def->memballoon,
|
|
|
|
vm->def->memballoon->period);
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Writing domain status to disk");
|
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* Run an hook to allow admins to do some magic */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
|
|
|
char *xml = qemuDomainDefFormatXML(driver, vm->def, 0);
|
|
|
|
int hookret;
|
|
|
|
|
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_ATTACH, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, NULL);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
virObjectUnref(logCtxt);
|
|
|
|
VIR_FREE(seclabel);
|
|
|
|
VIR_FREE(sec_managers);
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
virObjectUnref(caps);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
exit_monitor:
|
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
|
|
|
error:
|
|
|
|
/* We jump here if we failed to attach to the VM for any reason.
|
|
|
|
* Leave the domain running, but pretend we never attempted to
|
|
|
|
* attach to it. */
|
|
|
|
if (active && virAtomicIntDecAndTest(&driver->nactive) &&
|
|
|
|
driver->inhibitCallback)
|
|
|
|
driver->inhibitCallback(false, driver->inhibitOpaque);
|
|
|
|
|
|
|
|
qemuMonitorClose(priv->mon);
|
|
|
|
priv->mon = NULL;
|
|
|
|
virObjectUnref(logCtxt);
|
|
|
|
VIR_FREE(seclabel);
|
|
|
|
VIR_FREE(sec_managers);
|
|
|
|
if (seclabelgen)
|
|
|
|
virSecurityLabelDefFree(seclabeldef);
|
|
|
|
virDomainChrSourceDefFree(monConfig);
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
virObjectUnref(caps);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static virDomainObjPtr
|
|
|
|
qemuProcessAutoDestroy(virDomainObjPtr dom,
|
|
|
|
virConnectPtr conn,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
unsigned int stopFlags = 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
|
|
|
|
|
|
|
|
virObjectRef(dom);
|
|
|
|
|
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
|
|
|
|
|
|
|
if (priv->job.asyncJob) {
|
|
|
|
VIR_DEBUG("vm=%s has long-term job active, cancelling",
|
|
|
|
dom->def->name);
|
|
|
|
qemuDomainObjDiscardAsyncJob(driver, dom);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Killing domain");
|
|
|
|
|
|
|
|
if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
|
|
|
|
QEMU_ASYNC_JOB_NONE, stopFlags);
|
|
|
|
|
|
|
|
virDomainAuditStop(dom, "destroyed");
|
|
|
|
event = virDomainEventLifecycleNewFromObj(dom,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
|
|
|
|
|
|
|
|
qemuDomainObjEndJob(driver, dom);
|
|
|
|
|
|
|
|
qemuDomainRemoveInactive(driver, dom);
|
|
|
|
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainObjEndAPI(&dom);
|
|
|
|
return dom;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qemuProcessAutoDestroyAdd(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn)
|
|
|
|
{
|
|
|
|
VIR_DEBUG("vm=%s, conn=%p", vm->def->name, conn);
|
|
|
|
return virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
|
|
|
|
qemuProcessAutoDestroy);
|
|
|
|
}
|
|
|
|
|
|
|
|
int qemuProcessAutoDestroyRemove(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
VIR_DEBUG("vm=%s", vm->def->name);
|
|
|
|
ret = virCloseCallbacksUnset(driver->closeCallbacks, vm,
|
|
|
|
qemuProcessAutoDestroy);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool qemuProcessAutoDestroyActive(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
virCloseCallback cb;
|
|
|
|
VIR_DEBUG("vm=%s", vm->def->name);
|
|
|
|
cb = virCloseCallbacksGet(driver->closeCallbacks, vm, NULL);
|
|
|
|
return cb == qemuProcessAutoDestroy;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuProcessRefreshDisks(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virHashTablePtr table = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
|
|
|
table = qemuMonitorGetBlockInfo(priv->mon);
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!table)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivatePtr diskpriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
struct qemuDomainDiskInfo *info;
|
|
|
|
|
|
|
|
if (!(info = virHashLookup(table, disk->info.alias)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (info->removable) {
|
|
|
|
if (info->empty)
|
|
|
|
virDomainDiskEmptySource(disk);
|
|
|
|
|
|
|
|
if (info->tray) {
|
|
|
|
if (info->tray_open)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
|
|
|
|
else
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fill in additional data */
|
|
|
|
diskpriv->removable = info->removable;
|
|
|
|
diskpriv->tray = info->tray;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virHashFree(table);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct qemuProcessReconnectData {
|
|
|
|
virConnectPtr conn;
|
|
|
|
virQEMUDriverPtr driver;
|
|
|
|
virDomainObjPtr obj;
|
|
|
|
};
|
|
|
|
/*
|
|
|
|
* Open an existing VM's monitor, re-detect VCPU threads
|
|
|
|
* and re-reserve the security labels in use
|
|
|
|
*
|
|
|
|
* We own the virConnectPtr we are passed here - whoever started
|
|
|
|
* this thread function has increased the reference counter to it
|
|
|
|
* so that we now have to close it.
|
|
|
|
*
|
|
|
|
* This function also inherits a locked and ref'd domain object.
|
|
|
|
*
|
|
|
|
* This function needs to:
|
|
|
|
* 1. Enter job
|
|
|
|
* 1. just before monitor reconnect do lightweight MonitorEnter
|
|
|
|
* (increase VM refcount and unlock VM)
|
|
|
|
* 2. reconnect to monitor
|
|
|
|
* 3. do lightweight MonitorExit (lock VM)
|
|
|
|
* 4. continue reconnect process
|
|
|
|
* 5. EndJob
|
|
|
|
*
|
|
|
|
* We can't do normal MonitorEnter & MonitorExit because these two lock the
|
|
|
|
* monitor lock, which does not exists in this early phase.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessReconnect(void *opaque)
|
2011-05-05 16:32:21 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
struct qemuProcessReconnectData *data = opaque;
|
|
|
|
virQEMUDriverPtr driver = data->driver;
|
|
|
|
virDomainObjPtr obj = data->obj;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
virConnectPtr conn = data->conn;
|
|
|
|
struct qemuDomainJobObj oldjob;
|
|
|
|
int state;
|
|
|
|
int reason;
|
|
|
|
virQEMUDriverConfigPtr cfg;
|
2012-08-15 22:10:37 +00:00
|
|
|
size_t i;
|
2017-07-11 13:53:58 +00:00
|
|
|
unsigned int stopFlags = 0;
|
|
|
|
bool jobStarted = false;
|
2013-02-01 17:04:15 +00:00
|
|
|
virCapsPtr caps = NULL;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
VIR_FREE(data);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainObjRestoreJob(obj, &oldjob);
|
|
|
|
if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
priv = obj->privateData;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2013-02-01 17:04:15 +00:00
|
|
|
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2013-02-01 17:04:15 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2017-07-11 13:53:58 +00:00
|
|
|
jobStarted = true;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* XXX If we ever gonna change pid file pattern, come up with
|
|
|
|
* some intelligence here to deal with old paths. */
|
|
|
|
if (!(priv->pidfile = virPidFileBuildPath(cfg->stateDir, obj->def->name)))
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* Restore the masterKey */
|
|
|
|
if (qemuDomainMasterKeyReadFile(priv) < 0)
|
|
|
|
goto error;
|
2012-10-31 19:03:55 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
virNWFilterReadLockFilterUpdates();
|
|
|
|
|
|
|
|
VIR_DEBUG("Reconnect monitor to %p '%s'", obj, obj->def->name);
|
|
|
|
|
|
|
|
/* XXX check PID liveliness & EXE path */
|
|
|
|
if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, NULL) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuHostdevUpdateActiveDomainDevices(driver, obj->def) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuConnectCgroup(driver, obj) < 0)
|
|
|
|
goto error;
|
2014-12-01 09:54:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainPerfRestart(obj) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2012-08-15 22:10:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* XXX: Need to change as long as lock is introduced for
|
|
|
|
* qemu_driver->sharedDevices.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < obj->def->ndisks; i++) {
|
|
|
|
virDomainDeviceDef dev;
|
2012-08-15 22:10:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virStorageTranslateDiskSourcePool(conn, obj->def->disks[i]) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2012-08-15 22:10:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* XXX we should be able to restore all data from XML in the future.
|
|
|
|
* This should be the only place that calls qemuDomainDetermineDiskChain
|
|
|
|
* with @report_broken == false to guarantee best-effort domain
|
|
|
|
* reconnect */
|
|
|
|
if (qemuDomainDetermineDiskChain(driver, obj, obj->def->disks[i],
|
|
|
|
true, false) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2014-03-11 15:14:26 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
dev.type = VIR_DOMAIN_DEVICE_DISK;
|
|
|
|
dev.data.disk = obj->def->disks[i];
|
|
|
|
if (qemuAddSharedDevice(driver, &dev, obj->def->name) < 0)
|
|
|
|
goto error;
|
2012-08-15 22:10:37 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessUpdateState(driver, obj) < 0)
|
2016-03-28 13:30:31 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
state = virDomainObjGetState(obj, &reason);
|
|
|
|
if (state == VIR_DOMAIN_SHUTOFF ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_STARTING_UP)) {
|
|
|
|
VIR_DEBUG("Domain '%s' wasn't fully started yet, killing it",
|
|
|
|
obj->def->name);
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* If upgrading from old libvirtd we won't have found any
|
|
|
|
* caps in the domain status, so re-query them
|
|
|
|
*/
|
|
|
|
if (!priv->qemuCaps &&
|
|
|
|
!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(caps,
|
2016-06-15 11:48:19 +00:00
|
|
|
driver->qemuCapsCache,
|
2017-07-11 13:53:58 +00:00
|
|
|
obj->def->emulator,
|
|
|
|
obj->def->os.machine)))
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* In case the domain shutdown while we were not running,
|
|
|
|
* we need to finish the shutdown process. And we need to do it after
|
|
|
|
* we have virQEMUCaps filled in.
|
|
|
|
*/
|
|
|
|
if (state == VIR_DOMAIN_SHUTDOWN ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN)) {
|
|
|
|
VIR_DEBUG("Finishing shutdown sequence for domain %s",
|
|
|
|
obj->def->name);
|
|
|
|
qemuProcessShutdownOrReboot(driver, obj);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessBuildDestroyHugepagesPath(driver, obj, NULL, true) < 0)
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if ((qemuDomainAssignAddresses(obj->def, priv->qemuCaps,
|
|
|
|
driver, obj, false)) < 0) {
|
2016-01-22 19:09:22 +00:00
|
|
|
goto error;
|
2016-11-03 20:33:32 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* if domain requests security driver we haven't loaded, report error, but
|
|
|
|
* do not kill the domain
|
|
|
|
*/
|
|
|
|
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
|
|
|
|
obj->def));
|
|
|
|
|
2017-07-11 12:16:40 +00:00
|
|
|
/* If the domain with a host-model CPU was started by an old libvirt
|
|
|
|
* (< 2.3) which didn't replace the CPU with a custom one, let's do it now
|
|
|
|
* since the rest of our code does not really expect a host-model CPU in a
|
|
|
|
* running domain.
|
|
|
|
*/
|
|
|
|
if (virQEMUCapsGuestIsNative(caps->host.arch, obj->def->os.arch) &&
|
|
|
|
caps->host.cpu &&
|
|
|
|
obj->def->cpu &&
|
|
|
|
obj->def->cpu->mode == VIR_CPU_MODE_HOST_MODEL) {
|
|
|
|
virCPUDefPtr host;
|
|
|
|
|
|
|
|
if (!(host = virCPUCopyMigratable(caps->host.cpu->arch, caps->host.cpu)))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (virCPUUpdate(obj->def->os.arch, obj->def->cpu, host) < 0) {
|
|
|
|
virCPUDefFree(host);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
virCPUDefFree(host);
|
|
|
|
|
|
|
|
if (qemuProcessUpdateCPU(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainVcpuPersistOrder(obj->def);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessNotifyNets(obj->def);
|
|
|
|
|
|
|
|
if (qemuProcessFiltersInstantiate(obj->def))
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2016-11-16 13:43:01 +00:00
|
|
|
goto error;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2014-09-03 13:07:38 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2016-08-01 05:35:50 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* If querying of guest's RTC failed, report error, but do not kill the domain. */
|
|
|
|
qemuRefreshRTC(driver, obj);
|
|
|
|
|
|
|
|
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessUpdateDevices(driver, obj) < 0)
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessReconnectCheckMemAliasOrderMismatch(obj);
|
|
|
|
|
|
|
|
if (qemuConnectAgent(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* update domain state XML with possibly updated state in virDomainObj */
|
|
|
|
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, obj, driver->caps) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
/* Run an hook to allow admins to do some magic */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2017-07-11 13:53:58 +00:00
|
|
|
char *xml = qemuDomainDefFormatXML(driver, obj->def, 0);
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
int hookret;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, obj->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_RECONNECT, VIR_HOOK_SUBOP_BEGIN,
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
NULL, xml, NULL);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virAtomicIntInc(&driver->nactive) == 1 && driver->inhibitCallback)
|
|
|
|
driver->inhibitCallback(true, driver->inhibitOpaque);
|
2016-10-03 11:11:47 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
cleanup:
|
|
|
|
if (jobStarted)
|
|
|
|
qemuDomainObjEndJob(driver, obj);
|
|
|
|
if (!virDomainObjIsActive(obj))
|
|
|
|
qemuDomainRemoveInactive(driver, obj);
|
|
|
|
virDomainObjEndAPI(&obj);
|
|
|
|
virObjectUnref(conn);
|
2013-01-10 21:03:14 +00:00
|
|
|
virObjectUnref(cfg);
|
2013-02-01 17:04:15 +00:00
|
|
|
virObjectUnref(caps);
|
2017-07-11 13:53:58 +00:00
|
|
|
virNWFilterUnlockFilterUpdates();
|
|
|
|
return;
|
2015-07-30 14:42:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
error:
|
|
|
|
if (virDomainObjIsActive(obj)) {
|
|
|
|
/* We can't get the monitor back, so must kill the VM
|
|
|
|
* to remove danger of it ending up running twice if
|
|
|
|
* user tries to start it again later
|
|
|
|
*/
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NO_SHUTDOWN)) {
|
|
|
|
/* If we couldn't get the monitor and qemu supports
|
|
|
|
* no-shutdown, we can safely say that the domain
|
|
|
|
* crashed ... */
|
|
|
|
state = VIR_DOMAIN_SHUTOFF_CRASHED;
|
|
|
|
} else {
|
|
|
|
/* ... but if it doesn't we can't say what the state
|
|
|
|
* really is and FAILED means "failed to start" */
|
|
|
|
state = VIR_DOMAIN_SHUTOFF_UNKNOWN;
|
|
|
|
}
|
|
|
|
/* If BeginJob failed, we jumped here without a job, let's hope another
|
|
|
|
* thread didn't have a chance to start playing with the domain yet
|
|
|
|
* (it's all we can do anyway).
|
|
|
|
*/
|
|
|
|
qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
|
2011-06-23 09:37:57 +00:00
|
|
|
}
|
2017-07-11 13:53:58 +00:00
|
|
|
goto cleanup;
|
2011-06-23 10:41:57 +00:00
|
|
|
}
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
|
|
|
qemuProcessReconnectHelper(virDomainObjPtr obj,
|
|
|
|
void *opaque)
|
2016-05-23 12:00:35 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
virThread thread;
|
|
|
|
struct qemuProcessReconnectData *src = opaque;
|
|
|
|
struct qemuProcessReconnectData *data;
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* If the VM was inactive, we don't need to reconnect */
|
|
|
|
if (!obj->pid)
|
|
|
|
return 0;
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (VIR_ALLOC(data) < 0)
|
|
|
|
return -1;
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
memcpy(data, src, sizeof(*data));
|
|
|
|
data->obj = obj;
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* this lock and reference will be eventually transferred to the thread
|
|
|
|
* that handles the reconnect */
|
|
|
|
virObjectLock(obj);
|
|
|
|
virObjectRef(obj);
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* Since we close the connection later on, we have to make sure that the
|
|
|
|
* threads we start see a valid connection throughout their lifetime. We
|
|
|
|
* simply increase the reference counter here.
|
|
|
|
*/
|
|
|
|
virObjectRef(data->conn);
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virThreadCreate(&thread, false, qemuProcessReconnect, data) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Could not create thread. QEMU initialization "
|
|
|
|
"might be incomplete"));
|
|
|
|
/* We can't spawn a thread and thus connect to monitor. Kill qemu.
|
|
|
|
* It's safe to call qemuProcessStop without a job here since there
|
|
|
|
* is no thread that could be doing anything else with the same domain
|
|
|
|
* object.
|
|
|
|
*/
|
|
|
|
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
QEMU_ASYNC_JOB_NONE, 0);
|
|
|
|
qemuDomainRemoveInactive(src->driver, obj);
|
2016-05-19 13:29:02 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
virDomainObjEndAPI(&obj);
|
|
|
|
virObjectUnref(data->conn);
|
|
|
|
VIR_FREE(data);
|
|
|
|
return -1;
|
2016-05-23 12:00:35 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessReconnectAll
|
|
|
|
*
|
|
|
|
* Try to re-open the resources for live VMs that we care
|
|
|
|
* about.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuProcessReconnectAll(virConnectPtr conn, virQEMUDriverPtr driver)
|
|
|
|
{
|
|
|
|
struct qemuProcessReconnectData data = {.conn = conn, .driver = driver};
|
|
|
|
virDomainObjListForEach(driver->domains, qemuProcessReconnectHelper, &data);
|
2016-05-23 12:00:35 +00:00
|
|
|
}
|