2011-02-14 16:09:39 +00:00
|
|
|
/*
|
2013-07-26 12:24:55 +00:00
|
|
|
* qemu_process.c: QEMU process management
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
2016-01-19 19:20:54 +00:00
|
|
|
* Copyright (C) 2006-2016 Red Hat, Inc.
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <sys/stat.h>
|
2012-12-12 07:44:21 +00:00
|
|
|
#if defined(__linux__)
|
|
|
|
# include <linux/capability.h>
|
|
|
|
#elif defined(__FreeBSD__)
|
|
|
|
# include <sys/param.h>
|
|
|
|
# include <sys/cpuset.h>
|
|
|
|
#endif
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-05-18 11:17:38 +00:00
|
|
|
#include <sys/utsname.h>
|
|
|
|
|
2019-01-13 00:50:00 +00:00
|
|
|
#if WITH_CAPNG
|
|
|
|
# include <cap-ng.h>
|
|
|
|
#endif
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "qemu_process.h"
|
2018-12-13 14:53:50 +00:00
|
|
|
#define LIBVIRT_QEMU_PROCESSPRIV_H_ALLOW
|
2013-07-26 12:24:55 +00:00
|
|
|
#include "qemu_processpriv.h"
|
2016-02-16 15:24:35 +00:00
|
|
|
#include "qemu_alias.h"
|
2017-03-15 12:03:21 +00:00
|
|
|
#include "qemu_block.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "qemu_domain.h"
|
2016-02-15 18:08:02 +00:00
|
|
|
#include "qemu_domain_address.h"
|
2020-07-20 14:18:57 +00:00
|
|
|
#include "qemu_namespace.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "qemu_cgroup.h"
|
|
|
|
#include "qemu_capabilities.h"
|
|
|
|
#include "qemu_monitor.h"
|
|
|
|
#include "qemu_command.h"
|
|
|
|
#include "qemu_hostdev.h"
|
|
|
|
#include "qemu_hotplug.h"
|
2011-07-19 00:27:33 +00:00
|
|
|
#include "qemu_migration.h"
|
2018-02-21 13:18:03 +00:00
|
|
|
#include "qemu_migration_params.h"
|
2014-09-16 20:50:53 +00:00
|
|
|
#include "qemu_interface.h"
|
2016-11-23 10:52:57 +00:00
|
|
|
#include "qemu_security.h"
|
2017-04-04 16:22:31 +00:00
|
|
|
#include "qemu_extdevice.h"
|
2019-02-25 13:13:46 +00:00
|
|
|
#include "qemu_firmware.h"
|
2019-12-20 10:20:24 +00:00
|
|
|
#include "qemu_backup.h"
|
2020-02-25 09:55:10 +00:00
|
|
|
#include "qemu_dbus.h"
|
2020-09-17 13:30:45 +00:00
|
|
|
#include "qemu_snapshot.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-10-14 09:35:00 +00:00
|
|
|
#include "cpu/cpu.h"
|
2019-06-19 19:59:49 +00:00
|
|
|
#include "cpu/cpu_x86.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "datatypes.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:00:34 +00:00
|
|
|
#include "virhook.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2011-08-05 13:13:12 +00:00
|
|
|
#include "virpidfile.h"
|
2016-04-13 17:53:02 +00:00
|
|
|
#include "virhostcpu.h"
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2022-01-25 16:19:53 +00:00
|
|
|
#include "domain_cgroup.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "domain_nwfilter.h"
|
2020-12-10 22:12:30 +00:00
|
|
|
#include "domain_validate.h"
|
2010-10-26 14:04:46 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2012-12-13 18:01:25 +00:00
|
|
|
#include "viruuid.h"
|
2012-09-24 16:54:51 +00:00
|
|
|
#include "virprocess.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2012-02-10 21:09:00 +00:00
|
|
|
#include "virnetdevtap.h"
|
2015-03-17 17:46:44 +00:00
|
|
|
#include "virnetdevopenvswitch.h"
|
2015-02-23 20:54:56 +00:00
|
|
|
#include "virnetdevmidonet.h"
|
2012-12-04 11:56:32 +00:00
|
|
|
#include "virbitmap.h"
|
2013-03-18 09:04:01 +00:00
|
|
|
#include "virnuma.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2014-03-05 12:14:38 +00:00
|
|
|
#include "virhostdev.h"
|
2019-10-24 16:00:55 +00:00
|
|
|
#include "virsecret.h"
|
2014-08-07 14:59:21 +00:00
|
|
|
#include "configmake.h"
|
2014-11-05 13:28:57 +00:00
|
|
|
#include "nwfilter_conf.h"
|
2014-11-18 23:55:48 +00:00
|
|
|
#include "netdev_bandwidth_conf.h"
|
2018-01-30 22:57:52 +00:00
|
|
|
#include "virresctrl.h"
|
2018-05-22 13:57:47 +00:00
|
|
|
#include "virvsock.h"
|
2018-11-12 13:27:26 +00:00
|
|
|
#include "viridentity.h"
|
2019-11-22 16:19:49 +00:00
|
|
|
#include "virthreadjob.h"
|
2020-02-16 21:59:28 +00:00
|
|
|
#include "virutil.h"
|
2021-01-22 09:36:21 +00:00
|
|
|
#include "storage_source.h"
|
2021-03-11 15:14:17 +00:00
|
|
|
#include "backup_conf.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-10-25 10:42:16 +00:00
|
|
|
#include "logging/log_manager.h"
|
|
|
|
#include "logging/log_protocol.h"
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_process");
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/**
|
2012-10-31 19:03:48 +00:00
|
|
|
* qemuProcessRemoveDomainStatus
|
2011-02-14 16:09:39 +00:00
|
|
|
*
|
|
|
|
* remove all state files of a domain from statedir
|
|
|
|
*/
|
2019-10-22 13:26:14 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRemoveDomainStatus(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *file = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
file = g_strdup_printf("%s/%s.xml", cfg->stateDir, vm->def->name);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (unlink(file) < 0 && errno != ENOENT && errno != ENOTDIR)
|
|
|
|
VIR_WARN("Failed to remove domain XML for %s: %s",
|
2020-02-26 17:57:34 +00:00
|
|
|
vm->def->name, g_strerror(errno));
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-06-17 13:43:54 +00:00
|
|
|
if (priv->pidfile &&
|
|
|
|
unlink(priv->pidfile) < 0 &&
|
|
|
|
errno != ENOENT)
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_WARN("Failed to remove PID file for %s: %s",
|
2020-02-26 17:57:34 +00:00
|
|
|
vm->def->name, g_strerror(errno));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
/*
|
2021-03-11 07:16:13 +00:00
|
|
|
* This is a callback registered with a qemuAgent *instance,
|
2011-10-05 17:31:54 +00:00
|
|
|
* and to be invoked when the agent console hits an end of file
|
|
|
|
* condition, or error, thus indicating VM shutdown should be
|
|
|
|
* performed
|
|
|
|
*/
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleAgentEOF(qemuAgent *agent,
|
|
|
|
virDomainObj *vm)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Received EOF from agent on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
2013-01-21 10:52:44 +00:00
|
|
|
|
|
|
|
if (!priv->agent) {
|
|
|
|
VIR_DEBUG("Agent freed already");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->beingDestroyed) {
|
|
|
|
VIR_DEBUG("Domain is being destroyed, agent EOF is expected");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
qemu_agent: fix deadlock in qemuProcessHandleAgentEOF
If VM A is shutdown a by qemu agent at appoximately the same time
an agent EOF of VM A happened, there's a chance that deadlock may occur:
qemuProcessHandleAgentEOF in main thread
A) priv->agent = NULL; //A happened before B
//deadlock when we get agent lock which's held by worker thread
qemuAgentClose(agent);
qemuDomainObjExitAgent called by qemuDomainShutdownFlags in worker thread
B) hasRefs = virObjectUnref(priv->agent); // priv->agent is NULL,
// return false
if (hasRefs)
virObjectUnlock(priv->agent); //agent lock will not be released here
In order to resolve, during EOF close the agent first, then set priv->agent
to NULL to fix the deadlock.
This essentially reverts commit id '1020a504'. It's also of note that commit
id '362d0477' notes a possible/rare deadlock similar to what was seen in
the monitor in commit id '25f582e3'. However, it seems interceding changes
including commit id 'd960d06f' should remove the deadlock issue.
With this change, if EOF is called:
Get VM lock
Check if !priv->agent || priv->beingDestroyed, then unlock VM
Call qemuAgentClose
Unlock VM
When qemuAgentClose is called
Get Agent lock
If Agent->fd open, close it
Unlock Agent
Unref Agent
qemuDomainObjEnterAgent
Enter with VM lock
Get Agent lock
Increase Agent refcnt
Unlock VM
After running agent command, calling qemuDomainObjExitAgent
Enter with Agent lock
Unref Agent
If not last reference, unlock Agent
Get VM lock
If we were in the middle of an EnterAgent, call Agent command, and
ExitAgent sequence and the EOF code is triggered, then the EOF code
can get the VM lock, make it's checks against !priv->agent ||
priv->beingDestroyed, and call qemuAgentClose. The CloseAgent
would wait to get agent lock. The other thread then will eventually
call ExitAgent, release the Agent lock and unref the Agent. Once
ExitAgent releases the Agent lock, AgentClose will get the Agent
Agent lock, close the fd, unlock the agent, and unref the agent.
The final unref would cause deletion of the agent.
Signed-off-by: Wang Yufei <james.wangyufei@huawei.com>
Reviewed-by: Ren Guannan <renguannan@huawei.com>
2015-09-26 12:18:03 +00:00
|
|
|
qemuAgentClose(agent);
|
2013-01-21 10:52:44 +00:00
|
|
|
priv->agent = NULL;
|
2016-11-16 13:43:03 +00:00
|
|
|
priv->agentError = false;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2013-01-21 10:52:44 +00:00
|
|
|
return;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
unlock:
|
2013-01-21 10:52:44 +00:00
|
|
|
virObjectUnlock(vm);
|
|
|
|
return;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is invoked when there is some kind of error
|
|
|
|
* parsing data to/from the agent. The VM can continue
|
|
|
|
* to run, but no further agent commands will be
|
|
|
|
* allowed
|
|
|
|
*/
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleAgentError(qemuAgent *agent G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Received error from agent on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
priv->agentError = true;
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuAgentCallbacks agentCallbacks = {
|
|
|
|
.eofNotify = qemuProcessHandleAgentEOF,
|
|
|
|
.errorNotify = qemuProcessHandleAgentError,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2015-04-24 14:48:26 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuConnectAgent(virQEMUDriver *driver, virDomainObj *vm)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
qemuAgent *agent = NULL;
|
|
|
|
virDomainChrDef *config = qemuFindAgentConfig(vm->def);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
|
|
|
if (!config)
|
|
|
|
return 0;
|
|
|
|
|
2016-01-08 16:03:48 +00:00
|
|
|
if (priv->agent)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VSERPORT_CHANGE) &&
|
|
|
|
config->state != VIR_DOMAIN_CHR_DEVICE_STATE_CONNECTED) {
|
|
|
|
VIR_DEBUG("Deferring connecting to guest agent");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetDaemonSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ERROR(_("Failed to set security context for agent for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
agent = qemuAgentOpen(vm,
|
2016-10-21 11:45:54 +00:00
|
|
|
config->source,
|
2020-02-12 14:54:19 +00:00
|
|
|
virEventThreadGetContext(priv->eventThread),
|
2020-03-05 14:47:01 +00:00
|
|
|
&agentCallbacks,
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VSERPORT_CHANGE));
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2014-01-14 18:13:30 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuAgentClose(agent);
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest crashed while connecting to the guest agent"));
|
2016-11-16 13:43:01 +00:00
|
|
|
return -1;
|
2014-01-14 18:13:30 +00:00
|
|
|
}
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_ERROR(_("Failed to clear security context for agent for %s"),
|
|
|
|
vm->def->name);
|
2014-09-01 12:08:06 +00:00
|
|
|
qemuAgentClose(agent);
|
2011-10-05 17:31:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->agent = agent;
|
2016-11-16 13:43:01 +00:00
|
|
|
if (!priv->agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
VIR_INFO("Failed to connect agent for %s", vm->def->name);
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2016-11-16 13:43:01 +00:00
|
|
|
if (!priv->agent) {
|
|
|
|
VIR_WARN("Cannot connect to QEMU guest agent for %s", vm->def->name);
|
|
|
|
priv->agentError = true;
|
|
|
|
virResetLastError();
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessEventSubmit:
|
|
|
|
* @driver: QEMU driver object
|
|
|
|
* @event: pointer to the variable holding the event processing data (stolen and cleared)
|
|
|
|
*
|
|
|
|
* Submits @event to be processed by the asynchronous event handling thread.
|
|
|
|
* In case when submission of the handling fails @event is properly freed and
|
|
|
|
* cleared. If (*event)->vm is non-NULL the domain object is uref'd before freeing
|
|
|
|
* @event.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessEventSubmit(virQEMUDriver *driver,
|
|
|
|
struct qemuProcessEvent **event)
|
|
|
|
{
|
|
|
|
if (!*event)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (virThreadPoolSendJob(driver->workerPool, 0, *event) < 0) {
|
|
|
|
if ((*event)->vm)
|
|
|
|
virObjectUnref((*event)->vm);
|
|
|
|
qemuProcessEventFree(*event);
|
|
|
|
}
|
|
|
|
|
|
|
|
*event = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/*
|
2021-03-11 07:16:13 +00:00
|
|
|
* This is a callback registered with a qemuMonitor *instance,
|
2011-02-14 16:09:39 +00:00
|
|
|
* and to be invoked when the monitor console hits an end of file
|
|
|
|
* condition, or error, thus indicating VM shutdown should be
|
|
|
|
* performed
|
|
|
|
*/
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleMonitorEOF(qemuMonitor *mon,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
qemuDomainObjPrivate *priv;
|
2016-02-11 14:32:48 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
VIR_DEBUG("Received EOF on %p '%s'", vm, vm->def->name);
|
2011-12-09 14:33:13 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
priv = vm->privateData;
|
2011-12-09 14:33:13 +00:00
|
|
|
if (priv->beingDestroyed) {
|
|
|
|
VIR_DEBUG("Domain is being destroyed, EOF is expected");
|
2015-07-15 07:07:50 +00:00
|
|
|
goto cleanup;
|
2011-12-09 14:33:13 +00:00
|
|
|
}
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_MONITOR_EOF;
|
2018-02-02 12:13:45 +00:00
|
|
|
processEvent->vm = virObjectRef(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2015-07-02 06:26:48 +00:00
|
|
|
|
2016-02-11 14:32:48 +00:00
|
|
|
/* We don't want this EOF handler to be called over and over while the
|
|
|
|
* thread is waiting for a job.
|
|
|
|
*/
|
2021-02-24 11:28:23 +00:00
|
|
|
virObjectLock(mon);
|
2016-02-11 14:32:48 +00:00
|
|
|
qemuMonitorUnregister(mon);
|
2021-02-24 11:28:23 +00:00
|
|
|
virObjectUnlock(mon);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-03-10 12:34:15 +00:00
|
|
|
/* We don't want any cleanup from EOF handler (or any other
|
|
|
|
* thread) to enter qemu namespace. */
|
|
|
|
qemuDomainDestroyNamespace(driver, vm);
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-07-15 07:07:50 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-05-29 12:37:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is invoked when there is some kind of error
|
|
|
|
* parsing data to/from the monitor. The VM can continue
|
|
|
|
* to run, but no further monitor commands will be
|
|
|
|
* allowed
|
|
|
|
*/
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleMonitorError(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-05-29 12:37:29 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2011-05-29 12:37:29 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Received error on %p '%s'", vm, vm->def->name);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-05-29 12:37:29 +00:00
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
((qemuDomainObjPrivate *) vm->privateData)->monError = true;
|
2011-05-29 12:37:29 +00:00
|
|
|
event = virDomainEventControlErrorNewFromObj(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-05-29 12:37:29 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-13 13:58:07 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessFindDomainDiskByAliasOrQOM:
|
|
|
|
* @vm: domain object to search for the disk
|
|
|
|
* @alias: -drive or -device alias of the disk
|
|
|
|
* @qomid: QOM tree device name
|
|
|
|
*
|
|
|
|
* Looks up a disk in the domain definition of @vm which either matches the
|
|
|
|
* -drive or -device alias used for the backend and frontend respectively or the
|
|
|
|
* QOM name. If @alias is empty it's treated as NULL as it's a mandatory field
|
|
|
|
* in some cases.
|
|
|
|
*
|
|
|
|
* Returns a disk from @vm or NULL if it could not be found.
|
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *
|
|
|
|
qemuProcessFindDomainDiskByAliasOrQOM(virDomainObj *vm,
|
2018-08-13 13:58:07 +00:00
|
|
|
const char *alias,
|
|
|
|
const char *qomid)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-08-13 13:58:07 +00:00
|
|
|
if (alias && *alias == '\0')
|
|
|
|
alias = NULL;
|
|
|
|
|
|
|
|
if (alias)
|
|
|
|
alias = qemuAliasDiskDriveSkipPrefix(alias);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivate *diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-08-13 13:58:07 +00:00
|
|
|
if ((disk->info.alias && STREQ_NULLABLE(disk->info.alias, alias)) ||
|
|
|
|
(diskPriv->qomName && STREQ_NULLABLE(diskPriv->qomName, qomid)))
|
2011-02-14 16:09:39 +00:00
|
|
|
return disk;
|
|
|
|
}
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2018-08-13 13:58:07 +00:00
|
|
|
_("no disk found with alias '%s' or id '%s'"),
|
|
|
|
NULLSTR(alias), NULLSTR(qomid));
|
2011-02-14 16:09:39 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-08-13 13:58:07 +00:00
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleReset(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
2021-07-20 07:28:51 +00:00
|
|
|
virObjectEvent *event = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
2021-07-20 07:28:51 +00:00
|
|
|
virDomainState state;
|
|
|
|
int reason;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
|
2021-07-20 07:28:51 +00:00
|
|
|
state = virDomainObjGetState(vm, &reason);
|
|
|
|
|
|
|
|
/* ignore reset events on VM startup. Libvirt in certain instances does a
|
|
|
|
* reset during startup so that the ACPI tables are re-generated */
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_STARTING_UP) {
|
|
|
|
VIR_DEBUG("ignoring reset event during startup");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventRebootNewFromObj(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_RESET);
|
2014-11-03 11:57:44 +00:00
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2012-09-04 10:01:43 +00:00
|
|
|
|
2021-07-20 07:28:51 +00:00
|
|
|
unlock:
|
2017-07-31 14:55:58 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
/*
|
|
|
|
* Since we have the '-no-shutdown' flag set, the
|
|
|
|
* QEMU process will currently have guest OS shutdown
|
|
|
|
* and the CPUS stopped. To fake the reboot, we thus
|
|
|
|
* want todo a reset of the virtual hardware, followed
|
|
|
|
* by restart of the CPUs. This should result in the
|
|
|
|
* guest OS booting up again
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessFakeReboot(void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm = opaque;
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virQEMUDriver *driver = priv->driver;
|
2013-06-07 10:23:34 +00:00
|
|
|
virDomainRunningReason reason = VIR_DOMAIN_RUNNING_BOOTED;
|
2014-12-16 09:40:58 +00:00
|
|
|
int ret = -1, rc;
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
VIR_DEBUG("vm=%p", vm);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2014-12-16 09:40:58 +00:00
|
|
|
rc = qemuMonitorSystemReset(priv->mon);
|
|
|
|
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
|
2014-12-16 09:40:58 +00:00
|
|
|
if (rc < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2013-06-07 10:23:34 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_CRASHED)
|
|
|
|
reason = VIR_DOMAIN_RUNNING_CRASHED;
|
|
|
|
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2013-06-07 10:23:34 +00:00
|
|
|
reason,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2018-05-05 12:04:21 +00:00
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
2011-06-15 16:49:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
endjob:
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuDomainObjEndJob(driver, vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2019-12-19 08:02:06 +00:00
|
|
|
priv->pausedShutdown = false;
|
2021-12-14 15:36:15 +00:00
|
|
|
qemuDomainSetFakeReboot(vm, false);
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
if (ret == -1)
|
|
|
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_FORCE));
|
2015-04-23 15:27:58 +00:00
|
|
|
virDomainObjEndAPI(&vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-07 10:23:33 +00:00
|
|
|
void
|
2021-12-14 15:36:15 +00:00
|
|
|
qemuProcessShutdownOrReboot(virDomainObj *vm)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-06-15 16:49:58 +00:00
|
|
|
|
2021-08-19 12:44:51 +00:00
|
|
|
if (priv->fakeReboot ||
|
|
|
|
vm->def->onPoweroff == VIR_DOMAIN_LIFECYCLE_ACTION_RESTART) {
|
2020-02-14 11:20:10 +00:00
|
|
|
g_autofree char *name = g_strdup_printf("reboot-%s", vm->def->name);
|
2020-08-03 15:27:58 +00:00
|
|
|
virThread th;
|
|
|
|
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectRef(vm);
|
2020-02-14 11:20:10 +00:00
|
|
|
if (virThreadCreateFull(&th,
|
|
|
|
false,
|
|
|
|
qemuProcessFakeReboot,
|
|
|
|
name,
|
|
|
|
false,
|
|
|
|
vm) < 0) {
|
2011-06-24 11:20:20 +00:00
|
|
|
VIR_ERROR(_("Failed to create reboot thread, killing domain"));
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
|
2019-12-19 08:02:06 +00:00
|
|
|
priv->pausedShutdown = false;
|
2021-12-14 15:36:15 +00:00
|
|
|
qemuDomainSetFakeReboot(vm, false);
|
2012-07-11 13:35:46 +00:00
|
|
|
virObjectUnref(vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuProcessKill(vm, VIR_QEMU_PROCESS_KILL_NOWAIT));
|
2011-06-15 16:49:58 +00:00
|
|
|
}
|
2011-09-27 12:56:17 +00:00
|
|
|
}
|
2011-09-13 16:11:26 +00:00
|
|
|
|
2014-01-30 00:14:44 +00:00
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleEvent(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2014-01-30 00:14:44 +00:00
|
|
|
const char *eventName,
|
|
|
|
long long seconds,
|
|
|
|
unsigned int micros,
|
|
|
|
const char *details,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2014-01-30 00:14:44 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("vm=%p", vm);
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
event = virDomainQemuMonitorEventNew(vm->def->id, vm->def->name,
|
|
|
|
vm->def->uuid, eventName,
|
|
|
|
seconds, micros, details);
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2014-01-30 00:14:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleShutdown(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2017-04-12 10:00:37 +00:00
|
|
|
virTristateBool guest_initiated,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-09-27 12:56:17 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
virObjectEvent *event = NULL;
|
2017-04-12 10:00:37 +00:00
|
|
|
int detail = 0;
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2011-09-27 12:56:17 +00:00
|
|
|
VIR_DEBUG("vm=%p", vm);
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
2018-11-07 13:40:57 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_SHUTDOWN) {
|
2011-11-30 14:31:45 +00:00
|
|
|
VIR_DEBUG("Ignoring repeated SHUTDOWN event from domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
goto unlock;
|
2011-12-07 11:52:59 +00:00
|
|
|
} else if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("Ignoring SHUTDOWN event from inactive domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
goto unlock;
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
|
|
|
|
2019-12-19 08:02:06 +00:00
|
|
|
/* In case of fake reboot qemu shutdown state is transient so don't
|
|
|
|
* change domain state nor send events. */
|
2021-09-26 09:06:38 +00:00
|
|
|
if (!priv->fakeReboot &&
|
2021-08-19 12:44:51 +00:00
|
|
|
vm->def->onPoweroff != VIR_DOMAIN_LIFECYCLE_ACTION_RESTART) {
|
2019-12-19 08:02:06 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to shutdown state",
|
|
|
|
vm->def->name);
|
|
|
|
virDomainObjSetState(vm,
|
|
|
|
VIR_DOMAIN_SHUTDOWN,
|
|
|
|
VIR_DOMAIN_SHUTDOWN_UNKNOWN);
|
|
|
|
|
|
|
|
switch (guest_initiated) {
|
|
|
|
case VIR_TRISTATE_BOOL_YES:
|
|
|
|
detail = VIR_DOMAIN_EVENT_SHUTDOWN_GUEST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_TRISTATE_BOOL_NO:
|
|
|
|
detail = VIR_DOMAIN_EVENT_SHUTDOWN_HOST;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_TRISTATE_BOOL_ABSENT:
|
|
|
|
case VIR_TRISTATE_BOOL_LAST:
|
|
|
|
default:
|
|
|
|
detail = VIR_DOMAIN_EVENT_SHUTDOWN_FINISHED;
|
|
|
|
break;
|
|
|
|
}
|
2017-04-12 10:00:37 +00:00
|
|
|
|
2019-12-19 08:02:06 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SHUTDOWN,
|
|
|
|
detail);
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2019-12-19 08:02:06 +00:00
|
|
|
} else {
|
|
|
|
priv->pausedShutdown = true;
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
|
|
|
|
2012-06-15 16:00:13 +00:00
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SHUTDOWN);
|
|
|
|
|
2021-12-14 15:36:15 +00:00
|
|
|
qemuProcessShutdownOrReboot(vm);
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
unlock:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2018-10-09 13:41:51 +00:00
|
|
|
virDomainPausedReason reason;
|
2018-10-09 13:45:50 +00:00
|
|
|
virDomainEventSuspendedDetailType detail;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-10-09 13:41:51 +00:00
|
|
|
reason = priv->pausedReason;
|
|
|
|
priv->pausedReason = VIR_DOMAIN_PAUSED_UNKNOWN;
|
|
|
|
|
2019-12-19 08:02:06 +00:00
|
|
|
/* In case of fake reboot qemu paused state is transient so don't
|
|
|
|
* reveal it in domain state nor sent events */
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
|
|
|
|
!priv->pausedShutdown) {
|
2016-01-05 21:19:28 +00:00
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
2018-10-09 13:45:50 +00:00
|
|
|
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
|
2015-11-26 14:37:23 +00:00
|
|
|
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
|
2018-10-09 13:45:50 +00:00
|
|
|
else
|
2015-11-26 14:37:23 +00:00
|
|
|
reason = VIR_DOMAIN_PAUSED_MIGRATION;
|
2016-01-05 21:19:28 +00:00
|
|
|
}
|
|
|
|
|
2018-10-09 13:45:50 +00:00
|
|
|
detail = qemuDomainPausedReasonToSuspendedEvent(reason);
|
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state, "
|
|
|
|
"reason %s, event detail %d",
|
|
|
|
vm->def->name, virDomainPausedReasonTypeToString(reason),
|
|
|
|
detail);
|
2011-09-15 13:07:51 +00:00
|
|
|
|
2014-08-28 14:37:38 +00:00
|
|
|
if (priv->job.current)
|
|
|
|
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
|
|
|
|
|
2016-01-05 21:19:28 +00:00
|
|
|
if (priv->signalStop)
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, reason);
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2016-01-05 21:19:28 +00:00
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
detail);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-11-30 14:31:45 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleResume(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2013-01-07 21:25:01 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
|
|
|
qemuDomainObjPrivate *priv;
|
2018-09-10 17:41:53 +00:00
|
|
|
virDomainRunningReason reason = VIR_DOMAIN_RUNNING_UNPAUSED;
|
2018-09-11 13:13:08 +00:00
|
|
|
virDomainEventResumedDetailType eventDetail;
|
2013-01-07 21:25:01 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2013-01-07 21:25:01 +00:00
|
|
|
|
2018-09-10 17:41:53 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->runningReason != VIR_DOMAIN_RUNNING_UNKNOWN) {
|
|
|
|
reason = priv->runningReason;
|
|
|
|
priv->runningReason = VIR_DOMAIN_RUNNING_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2018-11-07 13:34:52 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
|
2018-09-11 13:13:08 +00:00
|
|
|
eventDetail = qemuDomainRunningReasonToResumeEvent(reason);
|
2018-11-07 13:34:52 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s into running state, reason '%s', "
|
|
|
|
"event detail %d",
|
2018-09-11 13:13:08 +00:00
|
|
|
vm->def->name, virDomainRunningReasonTypeToString(reason),
|
|
|
|
eventDetail);
|
2013-01-07 21:25:01 +00:00
|
|
|
|
2018-09-10 17:41:53 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING, reason);
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2018-09-11 13:13:08 +00:00
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
eventDetail);
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2013-01-07 21:25:01 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2013-01-07 21:25:01 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleRTCChange(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
long long offset,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
qemu: fix RTC_CHANGE event for <clock offset='variable' basis='utc'/>
commit e31b5cf393857 attempted to fix libvirt's
VIR_DOMAIN_EVENT_ID_RTC_CHANGE, which is documentated to always
provide the new offset of the domain's real time clock from UTC. The
problem was that, in the case that qemu is provided with an "-rtc
base=x" where x is an absolute time (rather than "utc" or
"localtime"), the offset sent by qemu's RTC_CHANGE event is *not* the
new offset from UTC, but rather is the sum of all changes to the
domain's RTC since it was started with base=x.
So, despite what was said in commit e31b5cf393857, if we assume that
the original value stored in "adjustment" was the offset from UTC at
the time the domain was started, we can always determine the current
offset from UTC by simply adding the most recent (i.e. current) offset
from qemu to that original adjustment.
This patch accomplishes that by storing the initial adjustment in the
domain's status as "adjustment0". Each time a new RTC_CHANGE event is
received from qemu, we simply add adjustment0 to the value sent by
qemu, store that as the new adjustment, and forward that value on to
any event handler.
This patch (*not* e31b5cf393857, which should be reverted prior to
applying this patch) fixes:
https://bugzilla.redhat.com/show_bug.cgi?id=964177
(for the case where basis='utc'. It does not fix basis='localtime')
2014-05-21 09:54:34 +00:00
|
|
|
if (vm->def->clock.offset == VIR_DOMAIN_CLOCK_OFFSET_VARIABLE) {
|
|
|
|
/* when a basedate is manually given on the qemu commandline
|
|
|
|
* rather than simply "-rtc base=utc", the offset sent by qemu
|
|
|
|
* in this event is *not* the new offset from UTC, but is
|
|
|
|
* instead the new offset from the *original basedate* +
|
|
|
|
* uptime. For example, if the original offset was 3600 and
|
|
|
|
* the guest clock has been advanced by 10 seconds, qemu will
|
|
|
|
* send "10" in the event - this means that the new offset
|
|
|
|
* from UTC is 3610, *not* 10. If the guest clock is advanced
|
|
|
|
* by another 10 seconds, qemu will now send "20" - i.e. each
|
|
|
|
* event is the sum of the most recent change and all previous
|
|
|
|
* changes since the domain was started. Fortunately, we have
|
|
|
|
* saved the initial offset in "adjustment0", so to arrive at
|
|
|
|
* the proper new "adjustment", we just add the most recent
|
|
|
|
* offset to adjustment0.
|
|
|
|
*/
|
|
|
|
offset += vm->def->clock.data.variable.adjustment0;
|
2012-02-06 13:59:16 +00:00
|
|
|
vm->def->clock.data.variable.adjustment = offset;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
qemu: fix RTC_CHANGE event for <clock offset='variable' basis='utc'/>
commit e31b5cf393857 attempted to fix libvirt's
VIR_DOMAIN_EVENT_ID_RTC_CHANGE, which is documentated to always
provide the new offset of the domain's real time clock from UTC. The
problem was that, in the case that qemu is provided with an "-rtc
base=x" where x is an absolute time (rather than "utc" or
"localtime"), the offset sent by qemu's RTC_CHANGE event is *not* the
new offset from UTC, but rather is the sum of all changes to the
domain's RTC since it was started with base=x.
So, despite what was said in commit e31b5cf393857, if we assume that
the original value stored in "adjustment" was the offset from UTC at
the time the domain was started, we can always determine the current
offset from UTC by simply adding the most recent (i.e. current) offset
from qemu to that original adjustment.
This patch accomplishes that by storing the initial adjustment in the
domain's status as "adjustment0". Each time a new RTC_CHANGE event is
received from qemu, we simply add adjustment0 to the value sent by
qemu, store that as the new adjustment, and forward that value on to
any event handler.
This patch (*not* e31b5cf393857, which should be reverted prior to
applying this patch) fixes:
https://bugzilla.redhat.com/show_bug.cgi?id=964177
(for the case where basis='utc'. It does not fix basis='localtime')
2014-05-21 09:54:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventRTCChangeNewFromObj(vm, offset);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleWatchdog(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
int action,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *watchdogEvent = NULL;
|
|
|
|
virObjectEvent *lifecycleEvent = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
watchdogEvent = virDomainEventWatchdogNewFromObj(vm, action);
|
|
|
|
|
|
|
|
if (action == VIR_DOMAIN_EVENT_WATCHDOG_PAUSE &&
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state due to watchdog", vm->def->name);
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_WATCHDOG);
|
2013-11-21 17:03:26 +00:00
|
|
|
lifecycleEvent = virDomainEventLifecycleNewFromObj(vm,
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_WATCHDOG);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->watchdog->action == VIR_DOMAIN_WATCHDOG_ACTION_DUMP) {
|
2013-06-07 10:23:32 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2020-10-04 19:51:15 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_WATCHDOG;
|
|
|
|
processEvent->action = VIR_DOMAIN_WATCHDOG_ACTION_DUMP;
|
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted before handling watchdog event is finished.
|
|
|
|
*/
|
|
|
|
processEvent->vm = virObjectRef(vm);
|
2021-07-20 10:25:46 +00:00
|
|
|
|
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2020-05-15 15:33:43 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, watchdogEvent);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, lifecycleEvent);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleIOError(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2011-02-14 16:09:39 +00:00
|
|
|
const char *diskAlias,
|
2018-08-13 15:02:38 +00:00
|
|
|
const char *nodename,
|
2011-02-14 16:09:39 +00:00
|
|
|
int action,
|
2013-07-25 17:26:15 +00:00
|
|
|
const char *reason,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *ioErrorEvent = NULL;
|
|
|
|
virObjectEvent *ioErrorEvent2 = NULL;
|
|
|
|
virObjectEvent *lifecycleEvent = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
const char *srcPath;
|
|
|
|
const char *devAlias;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2018-08-13 15:02:38 +00:00
|
|
|
|
|
|
|
if (*diskAlias == '\0')
|
|
|
|
diskAlias = NULL;
|
|
|
|
|
|
|
|
if (diskAlias)
|
|
|
|
disk = qemuProcessFindDomainDiskByAliasOrQOM(vm, diskAlias, NULL);
|
|
|
|
else if (nodename)
|
2020-12-07 11:38:43 +00:00
|
|
|
disk = qemuDomainDiskLookupByNodename(vm->def, NULL, nodename, NULL);
|
2018-08-13 15:02:38 +00:00
|
|
|
else
|
|
|
|
disk = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (disk) {
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
srcPath = virDomainDiskGetSource(disk);
|
2011-02-14 16:09:39 +00:00
|
|
|
devAlias = disk->info.alias;
|
|
|
|
} else {
|
|
|
|
srcPath = "";
|
|
|
|
devAlias = "";
|
|
|
|
}
|
|
|
|
|
|
|
|
ioErrorEvent = virDomainEventIOErrorNewFromObj(vm, srcPath, devAlias, action);
|
|
|
|
ioErrorEvent2 = virDomainEventIOErrorReasonNewFromObj(vm, srcPath, devAlias, action, reason);
|
|
|
|
|
|
|
|
if (action == VIR_DOMAIN_EVENT_IO_ERROR_PAUSE &&
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to paused state due to IO error", vm->def->name);
|
|
|
|
|
2015-05-29 06:38:44 +00:00
|
|
|
if (priv->signalIOError)
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_IOERROR);
|
2013-11-21 17:03:26 +00:00
|
|
|
lifecycleEvent = virDomainEventLifecycleNewFromObj(vm,
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_IOERROR);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, ioErrorEvent);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, ioErrorEvent2);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, lifecycleEvent);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleBlockJob(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2011-07-22 05:57:42 +00:00
|
|
|
const char *diskAlias,
|
|
|
|
int type,
|
2013-07-25 17:26:15 +00:00
|
|
|
int status,
|
2017-10-27 12:37:22 +00:00
|
|
|
const char *error,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2011-07-22 05:57:42 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virDomainDiskDef *disk;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2015-03-30 09:26:20 +00:00
|
|
|
char *data = NULL;
|
2011-07-22 05:57:42 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
blockjob: properly track blockcopy xml changes on disk
We were not directly saving the domain XML to file after starting
or finishing a blockcopy. Without the startup write, a libvirtd
restart in the middle of a copy job would forget that the job was
underway. Then at pivot, we were indirectly writing new XML in
reaction to events that occur as we stop and restart the guest CPUs.
But there was a race: since pivot is an async action, it is possible
that libvirtd is restarted before the pivot completes, so if XML
changes during the event, that change was not written. The original
blockcopy code cleared out the <mirror> element prior to restarting
the CPUs, but this is also a race, observed if a user does an async
pivot and a dumpxml before the event occurs. Furthermore, this race
will interfere with active commit in a future patch, because that
code will rely on the <mirror> element at the time of the qemu event
to determine whether to inform the user of a normal commit or an
active commit.
Fix things by saving state any time we modify live XML, while
delaying XML disk modifications until after the event completes. We
still need a to teach libvirtd restarts to examine all existing
<mirror> elements to see if the job completed in the meantime (that
is, if libvirtd misses the event, the updated state still needs to be
updated in live XML), but that will be a later patch, in part because
we also need to to start taking advantage of newer qemu's ability to
keep the job around after completion rather than the current usage
where the job disappears both on error and on success.
* src/qemu/qemu_driver.c (qemuDomainBlockCopy): Track XML change
on disk.
(qemuDomainBlockJobImpl, qemuDomainBlockPivot): Move job-end XML
rewrites...
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): ...here.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-07-29 20:42:45 +00:00
|
|
|
|
2018-09-05 14:16:17 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
/* with QEMU_CAPS_BLOCKDEV we handle block job events via JOB_STATUS_CHANGE */
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
|
|
|
goto cleanup;
|
|
|
|
|
2015-03-13 16:00:03 +00:00
|
|
|
VIR_DEBUG("Block job for device %s (domain: %p,%s) type %d status %d",
|
|
|
|
diskAlias, vm, vm->def->name, type, status);
|
2015-02-10 14:32:59 +00:00
|
|
|
|
2018-08-13 13:58:07 +00:00
|
|
|
if (!(disk = qemuProcessFindDomainDiskByAliasOrQOM(vm, diskAlias, NULL)))
|
2018-10-17 12:22:23 +00:00
|
|
|
goto cleanup;
|
2015-02-10 14:32:59 +00:00
|
|
|
|
2018-11-19 15:48:09 +00:00
|
|
|
job = qemuBlockJobDiskGetJob(disk);
|
2018-10-17 06:57:08 +00:00
|
|
|
|
2018-11-19 15:48:09 +00:00
|
|
|
if (job && job->synchronous) {
|
2015-05-14 12:28:12 +00:00
|
|
|
/* We have a SYNC API waiting for this event, dispatch it back */
|
2018-11-22 14:05:50 +00:00
|
|
|
job->newstate = status;
|
2018-10-17 06:57:08 +00:00
|
|
|
VIR_FREE(job->errmsg);
|
2019-10-18 11:27:03 +00:00
|
|
|
job->errmsg = g_strdup(error);
|
2015-06-29 14:28:35 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
2015-03-30 09:26:20 +00:00
|
|
|
} else {
|
|
|
|
/* there is no waiting SYNC API, dispatch the update to a thread */
|
2021-07-20 10:25:46 +00:00
|
|
|
struct qemuProcessEvent *processEvent = g_new0(struct qemuProcessEvent, 1);
|
2015-03-30 09:26:20 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_BLOCK_JOB;
|
2019-10-20 11:49:46 +00:00
|
|
|
data = g_strdup(diskAlias);
|
2015-03-30 09:26:20 +00:00
|
|
|
processEvent->data = data;
|
2018-02-02 12:13:45 +00:00
|
|
|
processEvent->vm = virObjectRef(vm);
|
2015-03-30 09:26:20 +00:00
|
|
|
processEvent->action = type;
|
|
|
|
processEvent->status = status;
|
2015-02-10 14:32:59 +00:00
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2011-07-22 05:57:42 +00:00
|
|
|
}
|
|
|
|
|
2015-03-13 16:00:03 +00:00
|
|
|
cleanup:
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-07-22 05:57:42 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-03-13 16:00:03 +00:00
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleJobStatusChange(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2018-12-05 09:40:45 +00:00
|
|
|
const char *jobname,
|
|
|
|
int status,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
qemuBlockJobData *job = NULL;
|
2018-12-05 09:40:45 +00:00
|
|
|
int jobnewstate;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("job '%s'(domain: %p,%s) state changed to '%s'(%d)",
|
|
|
|
jobname, vm, vm->def->name,
|
|
|
|
qemuMonitorJobStatusTypeToString(status), status);
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
|
|
|
VIR_DEBUG("job '%s' handled by old blockjob handler", jobname);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((jobnewstate = qemuBlockjobConvertMonitorStatus(status)) == QEMU_BLOCKJOB_STATE_LAST)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(job = virHashLookup(priv->blockjobs, jobname))) {
|
|
|
|
VIR_DEBUG("job '%s' not registered", jobname);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
job->newstate = jobnewstate;
|
|
|
|
|
|
|
|
if (job->synchronous) {
|
|
|
|
VIR_DEBUG("job '%s' handled synchronously", jobname);
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
} else {
|
2021-07-20 10:25:46 +00:00
|
|
|
struct qemuProcessEvent *processEvent = g_new0(struct qemuProcessEvent, 1);
|
|
|
|
|
2018-12-05 09:40:45 +00:00
|
|
|
VIR_DEBUG("job '%s' handled by event thread", jobname);
|
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_JOB_STATUS_CHANGE;
|
|
|
|
processEvent->vm = virObjectRef(vm);
|
|
|
|
processEvent->data = virObjectRef(job);
|
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2018-12-05 09:40:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleGraphics(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2011-02-14 16:09:39 +00:00
|
|
|
int phase,
|
|
|
|
int localFamily,
|
|
|
|
const char *localNode,
|
|
|
|
const char *localService,
|
|
|
|
int remoteFamily,
|
|
|
|
const char *remoteNode,
|
|
|
|
const char *remoteService,
|
|
|
|
const char *authScheme,
|
|
|
|
const char *x509dname,
|
2013-07-25 17:26:15 +00:00
|
|
|
const char *saslUsername,
|
|
|
|
void *opaque)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event;
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainEventGraphicsAddressPtr localAddr = NULL;
|
|
|
|
virDomainEventGraphicsAddressPtr remoteAddr = NULL;
|
|
|
|
virDomainEventGraphicsSubjectPtr subject = NULL;
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
localAddr = g_new0(virDomainEventGraphicsAddress, 1);
|
2011-02-14 16:09:39 +00:00
|
|
|
localAddr->family = localFamily;
|
2019-10-20 11:49:46 +00:00
|
|
|
localAddr->service = g_strdup(localService);
|
|
|
|
localAddr->node = g_strdup(localNode);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
remoteAddr = g_new0(virDomainEventGraphicsAddress, 1);
|
2011-02-14 16:09:39 +00:00
|
|
|
remoteAddr->family = remoteFamily;
|
2019-10-20 11:49:46 +00:00
|
|
|
remoteAddr->service = g_strdup(remoteService);
|
|
|
|
remoteAddr->node = g_strdup(remoteNode);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
subject = g_new0(virDomainEventGraphicsSubject, 1);
|
2011-02-14 16:09:39 +00:00
|
|
|
if (x509dname) {
|
2021-03-19 23:37:05 +00:00
|
|
|
VIR_REALLOC_N(subject->identities, subject->nidentity+1);
|
2011-02-14 16:09:39 +00:00
|
|
|
subject->nidentity++;
|
2019-10-20 11:49:46 +00:00
|
|
|
subject->identities[subject->nidentity - 1].type = g_strdup("x509dname");
|
|
|
|
subject->identities[subject->nidentity - 1].name = g_strdup(x509dname);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
if (saslUsername) {
|
2021-03-19 23:37:05 +00:00
|
|
|
VIR_REALLOC_N(subject->identities, subject->nidentity+1);
|
2011-02-14 16:09:39 +00:00
|
|
|
subject->nidentity++;
|
2019-10-20 11:49:46 +00:00
|
|
|
subject->identities[subject->nidentity - 1].type = g_strdup("saslUsername");
|
|
|
|
subject->identities[subject->nidentity - 1].name = g_strdup(saslUsername);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
event = virDomainEventGraphicsNewFromObj(vm, phase, localAddr, remoteAddr, authScheme, subject);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleTrayChange(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2012-03-23 13:44:50 +00:00
|
|
|
const char *devAlias,
|
2018-08-13 14:02:38 +00:00
|
|
|
const char *devid,
|
2013-07-25 17:26:15 +00:00
|
|
|
int reason,
|
|
|
|
void *opaque)
|
2012-03-23 13:44:50 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
|
|
|
virDomainDiskDef *disk;
|
2012-03-23 13:44:50 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2018-08-13 14:02:38 +00:00
|
|
|
disk = qemuProcessFindDomainDiskByAliasOrQOM(vm, devAlias, devid);
|
2012-03-23 13:44:50 +00:00
|
|
|
|
|
|
|
if (disk) {
|
2018-08-13 14:11:49 +00:00
|
|
|
event = virDomainEventTrayChangeNewFromObj(vm, disk->info.alias, reason);
|
2012-03-14 15:26:50 +00:00
|
|
|
/* Update disk tray status */
|
|
|
|
if (reason == VIR_DOMAIN_EVENT_TRAY_CHANGE_OPEN)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
|
|
|
|
else if (reason == VIR_DOMAIN_EVENT_TRAY_CHANGE_CLOSE)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
|
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2015-06-29 14:19:44 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
2012-03-23 13:44:50 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2012-03-23 13:44:50 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandlePMWakeup(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2012-03-23 14:43:14 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
|
|
|
virObjectEvent *lifecycleEvent = NULL;
|
2012-03-23 14:43:14 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-03-23 14:43:14 +00:00
|
|
|
event = virDomainEventPMWakeupNewFromObj(vm);
|
|
|
|
|
2012-03-14 15:26:55 +00:00
|
|
|
/* Don't set domain status back to running if it wasn't paused
|
|
|
|
* from guest side, otherwise it can just cause confusion.
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PMSUSPENDED) {
|
|
|
|
VIR_DEBUG("Transitioned guest %s from pmsuspended to running "
|
|
|
|
"state due to QMP wakeup event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_WAKEUP);
|
2013-11-21 17:03:26 +00:00
|
|
|
lifecycleEvent = virDomainEventLifecycleNewFromObj(vm,
|
2012-03-14 15:26:55 +00:00
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_WAKEUP);
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2012-03-14 15:26:55 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, lifecycleEvent);
|
2012-03-23 14:43:14 +00:00
|
|
|
}
|
2012-03-23 13:44:50 +00:00
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandlePMSuspend(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2012-03-23 14:50:36 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
|
|
|
virObjectEvent *lifecycleEvent = NULL;
|
2012-03-23 14:50:36 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-03-23 14:50:36 +00:00
|
|
|
event = virDomainEventPMSuspendNewFromObj(vm);
|
|
|
|
|
2012-03-14 15:26:54 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2012-03-14 15:26:54 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to pmsuspended state due to "
|
|
|
|
"QMP suspend event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_PMSUSPENDED_UNKNOWN);
|
2012-09-06 15:00:43 +00:00
|
|
|
lifecycleEvent =
|
2013-11-21 17:03:26 +00:00
|
|
|
virDomainEventLifecycleNewFromObj(vm,
|
2012-09-06 15:00:43 +00:00
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY);
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2012-06-15 16:00:13 +00:00
|
|
|
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND);
|
2012-03-14 15:26:54 +00:00
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2012-03-23 14:50:36 +00:00
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, lifecycleEvent);
|
2012-03-23 14:50:36 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleBalloonChange(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
unsigned long long actual,
|
|
|
|
void *opaque)
|
2012-07-12 15:45:57 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2021-03-15 16:18:56 +00:00
|
|
|
size_t i;
|
2012-07-12 15:45:57 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-07-12 15:45:57 +00:00
|
|
|
event = virDomainEventBalloonChangeNewFromObj(vm, actual);
|
|
|
|
|
2021-03-15 16:18:56 +00:00
|
|
|
/* We want the balloon size stored in domain definition to
|
|
|
|
* account for the actual size of virtio-mem too. But the
|
|
|
|
* balloon size as reported by QEMU (@actual) contains just
|
|
|
|
* the balloon size without any virtio-mem. Do a wee bit of
|
|
|
|
* math to fix it. */
|
|
|
|
VIR_DEBUG("balloon size before fix is %lld", actual);
|
|
|
|
for (i = 0; i < vm->def->nmems; i++) {
|
|
|
|
if (vm->def->mems[i]->model == VIR_DOMAIN_MEMORY_MODEL_VIRTIO_MEM)
|
|
|
|
actual += vm->def->mems[i]->currentsize;
|
|
|
|
}
|
|
|
|
|
2012-07-12 15:45:57 +00:00
|
|
|
VIR_DEBUG("Updating balloon from %lld to %lld kb",
|
|
|
|
vm->def->mem.cur_balloon, actual);
|
|
|
|
vm->def->mem.cur_balloon = actual;
|
|
|
|
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2012-07-12 15:45:57 +00:00
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2012-07-12 15:45:57 +00:00
|
|
|
}
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandlePMSuspendDisk(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2012-10-12 19:13:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
|
|
|
virObjectEvent *lifecycleEvent = NULL;
|
2012-10-12 19:13:39 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(vm);
|
2012-10-12 19:13:39 +00:00
|
|
|
event = virDomainEventPMSuspendDiskNewFromObj(vm);
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2012-10-12 19:13:39 +00:00
|
|
|
VIR_DEBUG("Transitioned guest %s to pmsuspended state due to "
|
|
|
|
"QMP suspend_disk event", vm->def->name);
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_PMSUSPENDED_UNKNOWN);
|
|
|
|
lifecycleEvent =
|
2013-11-21 17:03:26 +00:00
|
|
|
virDomainEventLifecycleNewFromObj(vm,
|
2012-10-12 19:13:39 +00:00
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_PMSUSPENDED_DISK);
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2012-10-12 19:13:39 +00:00
|
|
|
|
|
|
|
if (priv->agent)
|
|
|
|
qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND);
|
|
|
|
}
|
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(vm);
|
2012-10-12 19:13:39 +00:00
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, lifecycleEvent);
|
2012-10-12 19:13:39 +00:00
|
|
|
}
|
|
|
|
|
2012-07-12 15:45:57 +00:00
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleGuestPanic(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
|
|
|
qemuMonitorEventPanicInfo *info,
|
2013-07-25 17:26:15 +00:00
|
|
|
void *opaque)
|
2013-06-07 10:23:34 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
2013-06-07 10:23:34 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2013-06-07 10:23:34 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_GUESTPANIC;
|
|
|
|
processEvent->action = vm->def->onCrash;
|
2017-03-20 13:35:33 +00:00
|
|
|
processEvent->data = info;
|
2013-06-07 10:23:34 +00:00
|
|
|
/* Hold an extra reference because we can't allow 'vm' to be
|
|
|
|
* deleted before handling guest panic event is finished.
|
|
|
|
*/
|
2018-02-02 12:13:45 +00:00
|
|
|
processEvent->vm = virObjectRef(vm);
|
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2013-06-07 10:23:34 +00:00
|
|
|
|
2020-05-15 15:33:43 +00:00
|
|
|
virObjectUnlock(vm);
|
2013-06-07 10:23:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleDeviceDeleted(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2013-07-25 17:26:15 +00:00
|
|
|
const char *devAlias,
|
|
|
|
void *opaque)
|
2013-07-11 15:11:02 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
2014-05-26 15:02:05 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
char *data;
|
2013-07-11 15:11:02 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Device %s removed from domain %p %s",
|
|
|
|
devAlias, vm, vm->def->name);
|
|
|
|
|
2016-04-04 15:17:43 +00:00
|
|
|
if (qemuDomainSignalDeviceRemoval(vm, devAlias,
|
|
|
|
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_OK))
|
2014-05-26 15:01:52 +00:00
|
|
|
goto cleanup;
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2014-05-26 15:02:05 +00:00
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_DEVICE_DELETED;
|
2019-10-20 11:49:46 +00:00
|
|
|
data = g_strdup(devAlias);
|
2014-05-26 15:02:05 +00:00
|
|
|
processEvent->data = data;
|
2018-02-02 12:13:45 +00:00
|
|
|
processEvent->vm = virObjectRef(vm);
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2013-07-11 15:11:02 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-07-11 15:11:02 +00:00
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-29 19:54:26 +00:00
|
|
|
static void
|
|
|
|
qemuProcessHandleDeviceUnplugErr(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
|
|
|
const char *devPath,
|
|
|
|
const char *devAlias,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Device %s QOM path %s failed to be removed from domain %p %s",
|
|
|
|
devAlias, devPath, vm, vm->def->name);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DEVICE_UNPLUG_GUEST_ERROR will always contain the QOM path
|
|
|
|
* but QEMU will not guarantee that devAlias will be provided.
|
|
|
|
*
|
|
|
|
* However, given that all Libvirt devices have a devAlias, we
|
|
|
|
* can ignore the case where QEMU emitted this event without it.
|
|
|
|
*/
|
|
|
|
if (!devAlias)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
qemuDomainSignalDeviceRemoval(vm, devAlias,
|
|
|
|
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_GUEST_REJECTED);
|
|
|
|
|
|
|
|
event = virDomainEventDeviceRemovalFailedNewFromObj(vm, devAlias);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-01 15:48:20 +00:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* Meaning of fields reported by the event according to the ACPI standard:
|
|
|
|
* @source:
|
|
|
|
* 0x00 - 0xff: Notification values, as passed at the request time
|
|
|
|
* 0x100: Operating System Shutdown Processing
|
|
|
|
* 0x103: Ejection processing
|
|
|
|
* 0x200: Insertion processing
|
|
|
|
* other values are reserved
|
|
|
|
*
|
|
|
|
* @status:
|
|
|
|
* general values
|
|
|
|
* 0x00: success
|
|
|
|
* 0x01: non-specific failure
|
|
|
|
* 0x02: unrecognized notify code
|
|
|
|
* 0x03 - 0x7f: reserved
|
2019-01-22 09:35:53 +00:00
|
|
|
* other values are specific to the notification type (see below)
|
2016-04-01 15:48:20 +00:00
|
|
|
*
|
2019-01-22 09:35:53 +00:00
|
|
|
* for the 0x100 source the following additional codes are standardized:
|
2016-04-01 15:48:20 +00:00
|
|
|
* 0x80: OS Shutdown request denied
|
|
|
|
* 0x81: OS Shutdown in progress
|
|
|
|
* 0x82: OS Shutdown completed
|
|
|
|
* 0x83: OS Graceful shutdown not supported
|
2019-01-22 09:35:53 +00:00
|
|
|
* other higher values are reserved
|
|
|
|
*
|
|
|
|
* for the 0x003 (Ejection request) and 0x103 (Ejection processing) source
|
|
|
|
* the following additional codes are standardized:
|
|
|
|
* 0x80: Device ejection not supported by OSPM
|
|
|
|
* 0x81: Device in use by application
|
|
|
|
* 0x82: Device Busy
|
|
|
|
* 0x83: Ejection dependency is busy or not supported for ejection by OSPM
|
|
|
|
* 0x84: Ejection is in progress (pending)
|
|
|
|
* other higher values are reserved
|
|
|
|
*
|
|
|
|
* for the 0x200 source the following additional codes are standardized:
|
|
|
|
* 0x80: Device insertion in progress (pending)
|
|
|
|
* 0x81: Device driver load failure
|
|
|
|
* 0x82: Device insertion not supported by OSPM
|
|
|
|
* 0x83-0x8F: Reserved
|
|
|
|
* 0x90-0x9F: Insertion failure - Resources Unavailable as described by the
|
|
|
|
* following bit encodings:
|
|
|
|
* Bit [3]: Bus or Segment Numbers
|
|
|
|
* Bit [2]: Interrupts
|
|
|
|
* Bit [1]: I/O
|
|
|
|
* Bit [0]: Memory
|
|
|
|
* other higher values are reserved
|
2016-04-01 15:48:20 +00:00
|
|
|
*
|
|
|
|
* Other fields and semantics are specific to the qemu handling of the event.
|
|
|
|
* - @alias may be NULL for successful unplug operations
|
|
|
|
* - @slotType describes the device type a bit more closely, currently the
|
|
|
|
* only known value is 'DIMM'
|
|
|
|
* - @slot describes the specific device
|
|
|
|
*
|
|
|
|
* Note that qemu does not emit the event for all the documented sources or
|
|
|
|
* devices.
|
|
|
|
*/
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleAcpiOstInfo(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2016-04-01 15:48:20 +00:00
|
|
|
const char *alias,
|
|
|
|
const char *slotType,
|
|
|
|
const char *slot,
|
|
|
|
unsigned int source,
|
|
|
|
unsigned int status,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2016-04-01 15:48:20 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("ACPI OST info for device %s domain %p %s. "
|
|
|
|
"slotType='%s' slot='%s' source=%u status=%u",
|
|
|
|
NULLSTR(alias), vm, vm->def->name, slotType, slot, source, status);
|
|
|
|
|
2019-01-22 11:14:39 +00:00
|
|
|
if (!alias)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (STREQ(slotType, "DIMM")) {
|
|
|
|
if ((source == 0x003 || source == 0x103) &&
|
|
|
|
(status == 0x01 || (status >= 0x80 && status <= 0x83))) {
|
|
|
|
qemuDomainSignalDeviceRemoval(vm, alias,
|
|
|
|
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_GUEST_REJECTED);
|
2016-04-01 15:48:20 +00:00
|
|
|
|
2019-01-22 11:14:39 +00:00
|
|
|
event = virDomainEventDeviceRemovalFailedNewFromObj(vm, alias);
|
|
|
|
}
|
2016-04-01 15:48:20 +00:00
|
|
|
}
|
|
|
|
|
2019-01-22 11:14:39 +00:00
|
|
|
cleanup:
|
2016-04-01 15:48:20 +00:00
|
|
|
virObjectUnlock(vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2016-04-01 15:48:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleBlockThreshold(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2017-02-22 16:51:26 +00:00
|
|
|
const char *nodename,
|
|
|
|
unsigned long long threshold,
|
|
|
|
unsigned long long excess,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *eventSource = NULL;
|
|
|
|
virObjectEvent *eventDevice = NULL;
|
|
|
|
virDomainDiskDef *disk;
|
|
|
|
virStorageSource *src;
|
2017-02-22 16:51:26 +00:00
|
|
|
const char *path = NULL;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
2020-12-07 11:38:43 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
2017-02-22 16:51:26 +00:00
|
|
|
VIR_DEBUG("BLOCK_WRITE_THRESHOLD event for block node '%s' in domain %p %s:"
|
|
|
|
"threshold '%llu' exceeded by '%llu'",
|
|
|
|
nodename, vm, vm->def->name, threshold, excess);
|
|
|
|
|
2020-12-07 11:38:43 +00:00
|
|
|
if ((disk = qemuDomainDiskLookupByNodename(vm->def, priv->backup, nodename, &src))) {
|
2017-02-22 16:51:26 +00:00
|
|
|
if (virStorageSourceIsLocalStorage(src))
|
|
|
|
path = src->path;
|
|
|
|
|
2021-07-01 14:03:57 +00:00
|
|
|
if (src == disk->src &&
|
|
|
|
!src->thresholdEventWithIndex) {
|
2020-07-15 09:51:17 +00:00
|
|
|
g_autofree char *dev = qemuDomainDiskBackingStoreGetName(disk, 0);
|
|
|
|
|
|
|
|
eventDevice = virDomainEventBlockThresholdNewFromObj(vm, dev, path,
|
|
|
|
threshold, excess);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->id != 0) {
|
|
|
|
g_autofree char *dev = qemuDomainDiskBackingStoreGetName(disk, src->id);
|
|
|
|
|
|
|
|
eventSource = virDomainEventBlockThresholdNewFromObj(vm, dev, path,
|
|
|
|
threshold, excess);
|
|
|
|
}
|
2017-02-22 16:51:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
2020-07-15 09:51:17 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, eventDevice);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, eventSource);
|
2017-02-22 16:51:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleNicRxFilterChanged(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2014-09-17 17:07:50 +00:00
|
|
|
const char *devAlias,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
2014-09-17 17:07:50 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Device %s RX Filter changed in domain %p %s",
|
|
|
|
devAlias, vm, vm->def->name);
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2014-09-17 17:07:50 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED;
|
2019-10-20 11:49:46 +00:00
|
|
|
data = g_strdup(devAlias);
|
2014-09-17 17:07:50 +00:00
|
|
|
processEvent->data = data;
|
2018-02-02 12:13:45 +00:00
|
|
|
processEvent->vm = virObjectRef(vm);
|
2014-09-17 17:07:50 +00:00
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2014-09-17 17:07:50 +00:00
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleSerialChanged(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2014-11-13 13:09:39 +00:00
|
|
|
const char *devAlias,
|
|
|
|
bool connected,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
2014-11-13 13:09:39 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Serial port %s state changed to '%d' in domain %p %s",
|
|
|
|
devAlias, connected, vm, vm->def->name);
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2014-11-13 13:09:39 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_SERIAL_CHANGED;
|
2019-10-20 11:49:46 +00:00
|
|
|
data = g_strdup(devAlias);
|
2014-11-13 13:09:39 +00:00
|
|
|
processEvent->data = data;
|
|
|
|
processEvent->action = connected;
|
2018-02-02 12:13:45 +00:00
|
|
|
processEvent->vm = virObjectRef(vm);
|
2014-11-13 13:09:39 +00:00
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2014-11-13 13:09:39 +00:00
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2019-10-14 12:45:33 +00:00
|
|
|
void *opaque G_GNUC_UNUSED)
|
2015-05-25 14:57:49 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
qemuDomainJobPrivate *jobPriv;
|
2015-05-25 14:57:49 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Spice migration completed for domain %p %s",
|
|
|
|
vm, vm->def->name);
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv = priv->job.privateData;
|
2015-05-25 14:57:49 +00:00
|
|
|
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
|
|
|
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->spiceMigrated = true;
|
2015-06-29 14:28:35 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
2015-05-25 14:57:49 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2015-05-29 06:37:59 +00:00
|
|
|
int status,
|
qemu: Fix post-copy migration on the source
Post-copy migration has been broken on the source since commit
v3.8.0-245-g32c29f10db which implemented support for
pause-before-switchover QEMU migration capability.
Even though the migration itself went well, the source did not really
know when it switched to the post-copy mode despite the messages logged
by MIGRATION event handler. As a result of this, the events emitted by
source libvirtd were not accurate and statistics of the completed
migration would cover only the pre-copy part of migration. Moreover, if
migration failed during the post-copy phase for some reason, the source
libvirtd would just happily resume the domain, which could lead to disk
corruption.
With the pause-before-switchover capability enabled, the order of events
emitted by QEMU changed:
pause-before-switchover
disabled enabled
MIGRATION, postcopy-active STOP
STOP MIGRATION, pre-switchover
MIGRATION, postcopy-active
The STOP even handler checks the migration status (postcopy-active) and
sets the domain state accordingly. Which is sufficient when
pause-before-switchover is disabled, but once we enable it, the
migration status is still active when we get STOP from QEMU. Thus the
domain state set in the STOP handler has to be corrected once we are
notified that migration changed to postcopy-active.
This results in two SUSPENDED events to be emitted by the source
libvirtd during post-copy migration. The first one with
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED detail, while the second one reports
the corrected VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY detail. This is
inevitable because we don't know whether migration will eventually
switch to post-copy at the time we emit the first event.
https://bugzilla.redhat.com/show_bug.cgi?id=1647365
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2018-11-15 10:16:43 +00:00
|
|
|
void *opaque)
|
2015-05-29 06:37:59 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
qemu: Fix post-copy migration on the source
Post-copy migration has been broken on the source since commit
v3.8.0-245-g32c29f10db which implemented support for
pause-before-switchover QEMU migration capability.
Even though the migration itself went well, the source did not really
know when it switched to the post-copy mode despite the messages logged
by MIGRATION event handler. As a result of this, the events emitted by
source libvirtd were not accurate and statistics of the completed
migration would cover only the pre-copy part of migration. Moreover, if
migration failed during the post-copy phase for some reason, the source
libvirtd would just happily resume the domain, which could lead to disk
corruption.
With the pause-before-switchover capability enabled, the order of events
emitted by QEMU changed:
pause-before-switchover
disabled enabled
MIGRATION, postcopy-active STOP
STOP MIGRATION, pre-switchover
MIGRATION, postcopy-active
The STOP even handler checks the migration status (postcopy-active) and
sets the domain state accordingly. Which is sufficient when
pause-before-switchover is disabled, but once we enable it, the
migration status is still active when we get STOP from QEMU. Thus the
domain state set in the STOP handler has to be corrected once we are
notified that migration changed to postcopy-active.
This results in two SUSPENDED events to be emitted by the source
libvirtd during post-copy migration. The first one with
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED detail, while the second one reports
the corrected VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY detail. This is
inevitable because we don't know whether migration will eventually
switch to post-copy at the time we emit the first event.
https://bugzilla.redhat.com/show_bug.cgi?id=1647365
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2018-11-15 10:16:43 +00:00
|
|
|
int reason;
|
2015-05-29 06:37:59 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Migration of domain %p %s changed state to %s",
|
|
|
|
vm, vm->def->name,
|
|
|
|
qemuMonitorMigrationStatusTypeToString(status));
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
2015-07-13 12:15:03 +00:00
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
2015-05-29 06:37:59 +00:00
|
|
|
VIR_DEBUG("got MIGRATION event without a migration job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-01-26 17:30:50 +00:00
|
|
|
priv->job.current->stats.mig.status = status;
|
2015-05-29 06:37:59 +00:00
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
qemu: Fix post-copy migration on the source
Post-copy migration has been broken on the source since commit
v3.8.0-245-g32c29f10db which implemented support for
pause-before-switchover QEMU migration capability.
Even though the migration itself went well, the source did not really
know when it switched to the post-copy mode despite the messages logged
by MIGRATION event handler. As a result of this, the events emitted by
source libvirtd were not accurate and statistics of the completed
migration would cover only the pre-copy part of migration. Moreover, if
migration failed during the post-copy phase for some reason, the source
libvirtd would just happily resume the domain, which could lead to disk
corruption.
With the pause-before-switchover capability enabled, the order of events
emitted by QEMU changed:
pause-before-switchover
disabled enabled
MIGRATION, postcopy-active STOP
STOP MIGRATION, pre-switchover
MIGRATION, postcopy-active
The STOP even handler checks the migration status (postcopy-active) and
sets the domain state accordingly. Which is sufficient when
pause-before-switchover is disabled, but once we enable it, the
migration status is still active when we get STOP from QEMU. Thus the
domain state set in the STOP handler has to be corrected once we are
notified that migration changed to postcopy-active.
This results in two SUSPENDED events to be emitted by the source
libvirtd during post-copy migration. The first one with
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED detail, while the second one reports
the corrected VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY detail. This is
inevitable because we don't know whether migration will eventually
switch to post-copy at the time we emit the first event.
https://bugzilla.redhat.com/show_bug.cgi?id=1647365
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2018-11-15 10:16:43 +00:00
|
|
|
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
|
2020-01-15 14:24:55 +00:00
|
|
|
priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
|
qemu: Fix post-copy migration on the source
Post-copy migration has been broken on the source since commit
v3.8.0-245-g32c29f10db which implemented support for
pause-before-switchover QEMU migration capability.
Even though the migration itself went well, the source did not really
know when it switched to the post-copy mode despite the messages logged
by MIGRATION event handler. As a result of this, the events emitted by
source libvirtd were not accurate and statistics of the completed
migration would cover only the pre-copy part of migration. Moreover, if
migration failed during the post-copy phase for some reason, the source
libvirtd would just happily resume the domain, which could lead to disk
corruption.
With the pause-before-switchover capability enabled, the order of events
emitted by QEMU changed:
pause-before-switchover
disabled enabled
MIGRATION, postcopy-active STOP
STOP MIGRATION, pre-switchover
MIGRATION, postcopy-active
The STOP even handler checks the migration status (postcopy-active) and
sets the domain state accordingly. Which is sufficient when
pause-before-switchover is disabled, but once we enable it, the
migration status is still active when we get STOP from QEMU. Thus the
domain state set in the STOP handler has to be corrected once we are
notified that migration changed to postcopy-active.
This results in two SUSPENDED events to be emitted by the source
libvirtd during post-copy migration. The first one with
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED detail, while the second one reports
the corrected VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY detail. This is
inevitable because we don't know whether migration will eventually
switch to post-copy at the time we emit the first event.
https://bugzilla.redhat.com/show_bug.cgi?id=1647365
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2018-11-15 10:16:43 +00:00
|
|
|
virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
|
|
|
|
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
|
|
|
|
vm->def->name,
|
|
|
|
virDomainPausedReasonTypeToString(VIR_DOMAIN_PAUSED_POSTCOPY));
|
|
|
|
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_POSTCOPY);
|
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY);
|
2021-12-14 15:33:35 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
qemu: Fix post-copy migration on the source
Post-copy migration has been broken on the source since commit
v3.8.0-245-g32c29f10db which implemented support for
pause-before-switchover QEMU migration capability.
Even though the migration itself went well, the source did not really
know when it switched to the post-copy mode despite the messages logged
by MIGRATION event handler. As a result of this, the events emitted by
source libvirtd were not accurate and statistics of the completed
migration would cover only the pre-copy part of migration. Moreover, if
migration failed during the post-copy phase for some reason, the source
libvirtd would just happily resume the domain, which could lead to disk
corruption.
With the pause-before-switchover capability enabled, the order of events
emitted by QEMU changed:
pause-before-switchover
disabled enabled
MIGRATION, postcopy-active STOP
STOP MIGRATION, pre-switchover
MIGRATION, postcopy-active
The STOP even handler checks the migration status (postcopy-active) and
sets the domain state accordingly. Which is sufficient when
pause-before-switchover is disabled, but once we enable it, the
migration status is still active when we get STOP from QEMU. Thus the
domain state set in the STOP handler has to be corrected once we are
notified that migration changed to postcopy-active.
This results in two SUSPENDED events to be emitted by the source
libvirtd during post-copy migration. The first one with
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED detail, while the second one reports
the corrected VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY detail. This is
inevitable because we don't know whether migration will eventually
switch to post-copy at the time we emit the first event.
https://bugzilla.redhat.com/show_bug.cgi?id=1647365
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2018-11-15 10:16:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-29 06:37:59 +00:00
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
qemu: Fix post-copy migration on the source
Post-copy migration has been broken on the source since commit
v3.8.0-245-g32c29f10db which implemented support for
pause-before-switchover QEMU migration capability.
Even though the migration itself went well, the source did not really
know when it switched to the post-copy mode despite the messages logged
by MIGRATION event handler. As a result of this, the events emitted by
source libvirtd were not accurate and statistics of the completed
migration would cover only the pre-copy part of migration. Moreover, if
migration failed during the post-copy phase for some reason, the source
libvirtd would just happily resume the domain, which could lead to disk
corruption.
With the pause-before-switchover capability enabled, the order of events
emitted by QEMU changed:
pause-before-switchover
disabled enabled
MIGRATION, postcopy-active STOP
STOP MIGRATION, pre-switchover
MIGRATION, postcopy-active
The STOP even handler checks the migration status (postcopy-active) and
sets the domain state accordingly. Which is sufficient when
pause-before-switchover is disabled, but once we enable it, the
migration status is still active when we get STOP from QEMU. Thus the
domain state set in the STOP handler has to be corrected once we are
notified that migration changed to postcopy-active.
This results in two SUSPENDED events to be emitted by the source
libvirtd during post-copy migration. The first one with
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED detail, while the second one reports
the corrected VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY detail. This is
inevitable because we don't know whether migration will eventually
switch to post-copy at the time we emit the first event.
https://bugzilla.redhat.com/show_bug.cgi?id=1647365
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2018-11-15 10:16:43 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2015-05-29 06:37:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleMigrationPass(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2015-12-08 14:23:35 +00:00
|
|
|
int pass,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
qemuDomainObjPrivate *priv;
|
2015-12-08 14:23:35 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Migrating domain %p %s, iteration %d",
|
|
|
|
vm, vm->def->name, pass);
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
|
|
|
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState,
|
2015-12-08 14:23:35 +00:00
|
|
|
virDomainEventMigrationIterationNewFromObj(vm, pass));
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2017-11-20 14:51:22 +00:00
|
|
|
int status,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuMonitorDumpStats *stats,
|
2017-11-20 14:51:22 +00:00
|
|
|
const char *error,
|
2019-10-14 12:45:33 +00:00
|
|
|
void *opaque G_GNUC_UNUSED)
|
2017-11-20 14:51:22 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv;
|
|
|
|
qemuDomainJobPrivate *jobPriv;
|
2017-11-20 14:51:22 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Dump completed for domain %p %s with stats=%p error='%s'",
|
|
|
|
vm, vm->def->name, stats, NULLSTR(error));
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv = priv->job.privateData;
|
2017-11-20 14:51:22 +00:00
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
|
|
|
|
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->dumpCompleted = true;
|
2017-11-20 14:51:22 +00:00
|
|
|
priv->job.current->stats.dump = *stats;
|
2019-10-18 11:27:03 +00:00
|
|
|
priv->job.error = g_strdup(error);
|
2017-11-20 14:51:22 +00:00
|
|
|
|
|
|
|
/* Force error if extracting the DUMP_COMPLETED status failed */
|
|
|
|
if (!error && status < 0) {
|
2019-10-18 11:27:03 +00:00
|
|
|
priv->job.error = g_strdup(virGetLastErrorMessage());
|
2017-11-20 14:51:22 +00:00
|
|
|
priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED;
|
|
|
|
}
|
|
|
|
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virResetLastError();
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandlePRManagerStatusChanged(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2018-06-27 10:17:59 +00:00
|
|
|
const char *prManager,
|
|
|
|
bool connected,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
qemuDomainObjPrivate *priv;
|
2018-06-27 10:17:59 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
const char *managedAlias = qemuDomainGetManagedPRAlias();
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("pr-manager %s status changed for domain %p %s connected=%d",
|
|
|
|
prManager, vm, vm->def->name, connected);
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
/* Connect events are boring. */
|
|
|
|
if (connected)
|
2018-06-27 10:17:59 +00:00
|
|
|
goto cleanup;
|
2021-07-20 08:05:06 +00:00
|
|
|
|
2018-06-27 10:17:59 +00:00
|
|
|
/* Disconnect events are more interesting. */
|
|
|
|
|
|
|
|
if (STRNEQ(prManager, managedAlias)) {
|
|
|
|
VIR_DEBUG("pr-manager %s not managed, ignoring event",
|
|
|
|
prManager);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
priv->prDaemonRunning = false;
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2018-06-27 10:17:59 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_PR_DISCONNECT;
|
|
|
|
processEvent->vm = virObjectRef(vm);
|
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2018-06-27 10:17:59 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleRdmaGidStatusChanged(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
2018-12-24 10:15:12 +00:00
|
|
|
const char *netdev,
|
|
|
|
bool gid_status,
|
2019-01-09 10:27:15 +00:00
|
|
|
unsigned long long subnet_prefix,
|
|
|
|
unsigned long long interface_id,
|
2018-12-24 10:15:12 +00:00
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
2018-12-24 10:15:12 +00:00
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuMonitorRdmaGidStatus *info = NULL;
|
2018-12-24 10:15:12 +00:00
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
2019-01-09 10:27:15 +00:00
|
|
|
VIR_DEBUG("netdev=%s,gid_status=%d,subnet_prefix=0x%llx,interface_id=0x%llx",
|
2018-12-24 10:15:12 +00:00
|
|
|
netdev, gid_status, subnet_prefix, interface_id);
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
info = g_new0(qemuMonitorRdmaGidStatus, 1);
|
2018-12-24 10:15:12 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
info->netdev = g_strdup(netdev);
|
|
|
|
|
2018-12-24 10:15:12 +00:00
|
|
|
info->gid_status = gid_status;
|
|
|
|
info->subnet_prefix = subnet_prefix;
|
|
|
|
info->interface_id = interface_id;
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
2018-12-24 10:15:12 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_RDMA_GID_STATUS_CHANGED;
|
|
|
|
processEvent->vm = virObjectRef(vm);
|
2019-10-16 11:43:18 +00:00
|
|
|
processEvent->data = g_steal_pointer(&info);
|
2018-12-24 10:15:12 +00:00
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2018-12-24 10:15:12 +00:00
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleGuestCrashloaded(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
struct qemuProcessEvent *processEvent;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
2020-10-05 10:27:13 +00:00
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_GUEST_CRASHLOADED;
|
|
|
|
processEvent->vm = virObjectRef(vm);
|
|
|
|
|
2021-07-20 10:25:46 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
|
2020-05-15 15:33:43 +00:00
|
|
|
virObjectUnlock(vm);
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-20 08:05:06 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessHandleMemoryFailure(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
|
|
|
qemuMonitorEventMemoryFailure *mfp,
|
2020-10-14 10:37:51 +00:00
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
virObjectEvent *event = NULL;
|
2020-10-14 10:37:51 +00:00
|
|
|
virDomainMemoryFailureRecipientType recipient;
|
|
|
|
virDomainMemoryFailureActionType action;
|
|
|
|
unsigned int flags = 0;
|
|
|
|
|
|
|
|
switch (mfp->recipient) {
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_HYPERVISOR:
|
|
|
|
recipient = VIR_DOMAIN_EVENT_MEMORY_FAILURE_RECIPIENT_HYPERVISOR;
|
|
|
|
break;
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_GUEST:
|
|
|
|
recipient = VIR_DOMAIN_EVENT_MEMORY_FAILURE_RECIPIENT_GUEST;
|
|
|
|
break;
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_RECIPIENT_LAST:
|
|
|
|
default:
|
2021-07-20 08:05:06 +00:00
|
|
|
return;
|
2020-10-14 10:37:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (mfp->action) {
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_ACTION_IGNORE:
|
|
|
|
action = VIR_DOMAIN_EVENT_MEMORY_FAILURE_ACTION_IGNORE;
|
|
|
|
break;
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_ACTION_INJECT:
|
|
|
|
action = VIR_DOMAIN_EVENT_MEMORY_FAILURE_ACTION_INJECT;
|
|
|
|
break;
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_ACTION_FATAL:
|
|
|
|
action = VIR_DOMAIN_EVENT_MEMORY_FAILURE_ACTION_FATAL;
|
|
|
|
break;
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_ACTION_RESET:
|
|
|
|
action = VIR_DOMAIN_EVENT_MEMORY_FAILURE_ACTION_RESET;
|
|
|
|
break;
|
|
|
|
case QEMU_MONITOR_MEMORY_FAILURE_ACTION_LAST:
|
|
|
|
default:
|
2021-07-20 08:05:06 +00:00
|
|
|
return;
|
2020-10-14 10:37:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mfp->action_required)
|
|
|
|
flags |= VIR_DOMAIN_MEMORY_FAILURE_ACTION_REQUIRED;
|
|
|
|
if (mfp->recursive)
|
|
|
|
flags |= VIR_DOMAIN_MEMORY_FAILURE_RECURSIVE;
|
|
|
|
|
|
|
|
event = virDomainEventMemoryFailureNewFromObj(vm, recipient, action, flags);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-25 13:15:09 +00:00
|
|
|
static void
|
2021-01-21 13:16:44 +00:00
|
|
|
qemuProcessHandleMemoryDeviceSizeChange(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm,
|
|
|
|
const char *devAlias,
|
|
|
|
unsigned long long size,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
struct qemuProcessEvent *processEvent = NULL;
|
|
|
|
qemuMonitorMemoryDeviceSizeChange *info = NULL;
|
|
|
|
|
|
|
|
virObjectLock(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Memory device '%s' changed size to '%llu' in domain '%s'",
|
|
|
|
devAlias, size, vm->def->name);
|
|
|
|
|
|
|
|
info = g_new0(qemuMonitorMemoryDeviceSizeChange, 1);
|
|
|
|
info->devAlias = g_strdup(devAlias);
|
|
|
|
info->size = size;
|
|
|
|
|
|
|
|
processEvent = g_new0(struct qemuProcessEvent, 1);
|
|
|
|
processEvent->eventType = QEMU_PROCESS_EVENT_MEMORY_DEVICE_SIZE_CHANGE;
|
|
|
|
processEvent->vm = virObjectRef(vm);
|
|
|
|
processEvent->data = g_steal_pointer(&info);
|
|
|
|
|
2021-10-25 13:14:51 +00:00
|
|
|
qemuProcessEventSubmit(driver, &processEvent);
|
2021-01-21 13:16:44 +00:00
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static qemuMonitorCallbacks monitorCallbacks = {
|
|
|
|
.eofNotify = qemuProcessHandleMonitorEOF,
|
2011-05-29 12:37:29 +00:00
|
|
|
.errorNotify = qemuProcessHandleMonitorError,
|
2014-01-30 00:14:44 +00:00
|
|
|
.domainEvent = qemuProcessHandleEvent,
|
2011-02-14 16:09:39 +00:00
|
|
|
.domainShutdown = qemuProcessHandleShutdown,
|
|
|
|
.domainStop = qemuProcessHandleStop,
|
2013-01-07 21:25:01 +00:00
|
|
|
.domainResume = qemuProcessHandleResume,
|
2011-02-14 16:09:39 +00:00
|
|
|
.domainReset = qemuProcessHandleReset,
|
|
|
|
.domainRTCChange = qemuProcessHandleRTCChange,
|
|
|
|
.domainWatchdog = qemuProcessHandleWatchdog,
|
|
|
|
.domainIOError = qemuProcessHandleIOError,
|
|
|
|
.domainGraphics = qemuProcessHandleGraphics,
|
2011-07-22 05:57:42 +00:00
|
|
|
.domainBlockJob = qemuProcessHandleBlockJob,
|
2018-12-05 09:40:45 +00:00
|
|
|
.jobStatusChange = qemuProcessHandleJobStatusChange,
|
2012-03-23 13:44:50 +00:00
|
|
|
.domainTrayChange = qemuProcessHandleTrayChange,
|
2012-03-23 14:43:14 +00:00
|
|
|
.domainPMWakeup = qemuProcessHandlePMWakeup,
|
2012-03-23 14:50:36 +00:00
|
|
|
.domainPMSuspend = qemuProcessHandlePMSuspend,
|
2012-07-12 15:45:57 +00:00
|
|
|
.domainBalloonChange = qemuProcessHandleBalloonChange,
|
2012-10-12 19:13:39 +00:00
|
|
|
.domainPMSuspendDisk = qemuProcessHandlePMSuspendDisk,
|
2013-06-07 10:23:34 +00:00
|
|
|
.domainGuestPanic = qemuProcessHandleGuestPanic,
|
2013-07-11 15:11:02 +00:00
|
|
|
.domainDeviceDeleted = qemuProcessHandleDeviceDeleted,
|
2014-09-17 17:07:50 +00:00
|
|
|
.domainNicRxFilterChanged = qemuProcessHandleNicRxFilterChanged,
|
2014-11-13 13:09:39 +00:00
|
|
|
.domainSerialChange = qemuProcessHandleSerialChanged,
|
2015-05-25 14:57:49 +00:00
|
|
|
.domainSpiceMigrated = qemuProcessHandleSpiceMigrated,
|
2015-05-29 06:37:59 +00:00
|
|
|
.domainMigrationStatus = qemuProcessHandleMigrationStatus,
|
2015-12-08 14:23:35 +00:00
|
|
|
.domainMigrationPass = qemuProcessHandleMigrationPass,
|
2016-04-01 15:48:20 +00:00
|
|
|
.domainAcpiOstInfo = qemuProcessHandleAcpiOstInfo,
|
2017-02-22 16:51:26 +00:00
|
|
|
.domainBlockThreshold = qemuProcessHandleBlockThreshold,
|
2017-11-20 14:51:22 +00:00
|
|
|
.domainDumpCompleted = qemuProcessHandleDumpCompleted,
|
2018-06-27 10:17:59 +00:00
|
|
|
.domainPRManagerStatusChanged = qemuProcessHandlePRManagerStatusChanged,
|
2018-12-24 10:15:12 +00:00
|
|
|
.domainRdmaGidStatusChanged = qemuProcessHandleRdmaGidStatusChanged,
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
.domainGuestCrashloaded = qemuProcessHandleGuestCrashloaded,
|
2020-10-14 10:37:51 +00:00
|
|
|
.domainMemoryFailure = qemuProcessHandleMemoryFailure,
|
2021-01-21 13:16:44 +00:00
|
|
|
.domainMemoryDeviceSizeChange = qemuProcessHandleMemoryDeviceSizeChange,
|
2021-10-29 19:54:26 +00:00
|
|
|
.domainDeviceUnplugError = qemuProcessHandleDeviceUnplugErr,
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessMonitorReportLogError(qemuMonitor *mon,
|
2015-11-12 13:54:04 +00:00
|
|
|
const char *msg,
|
|
|
|
void *opaque);
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuProcessMonitorLogFree(void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainLogContext *logCtxt = opaque;
|
2020-03-16 12:10:24 +00:00
|
|
|
g_clear_object(&logCtxt);
|
2015-11-12 13:54:04 +00:00
|
|
|
}
|
|
|
|
|
2017-10-16 10:52:36 +00:00
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessInitMonitor(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-10-16 10:52:36 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = qemuMonitorSetCapabilities(QEMU_DOMAIN_PRIVATE(vm)->mon);
|
|
|
|
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2017-10-16 10:52:36 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuConnectMonitor(virQEMUDriver *driver, virDomainObj *vm, int asyncJob,
|
|
|
|
bool retry, qemuDomainLogContext *logCtxt)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
qemuMonitor *mon = NULL;
|
2017-03-11 06:23:42 +00:00
|
|
|
unsigned long long timeout = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetDaemonSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_ERROR(_("Failed to set security context for monitor for %s"),
|
|
|
|
vm->def->name);
|
2015-07-07 16:17:25 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2017-03-11 06:23:42 +00:00
|
|
|
/* When using hugepages, kernel zeroes them out before
|
|
|
|
* handing them over to qemu. This can be very time
|
|
|
|
* consuming. Therefore, add a second to timeout for each
|
|
|
|
* 1GiB of guest RAM. */
|
2021-03-15 14:47:04 +00:00
|
|
|
timeout = virDomainDefGetMemoryTotal(vm->def) / (1024 * 1024);
|
2017-03-11 06:23:42 +00:00
|
|
|
|
2011-11-29 12:33:23 +00:00
|
|
|
ignore_value(virTimeMillisNow(&priv->monStart));
|
2011-08-16 10:51:36 +00:00
|
|
|
|
|
|
|
mon = qemuMonitorOpen(vm,
|
2019-10-08 07:49:18 +00:00
|
|
|
priv->monConfig,
|
2018-03-14 17:27:49 +00:00
|
|
|
retry,
|
2017-03-11 06:23:42 +00:00
|
|
|
timeout,
|
2020-02-12 14:54:19 +00:00
|
|
|
virEventThreadGetContext(priv->eventThread),
|
2013-07-25 17:26:15 +00:00
|
|
|
&monitorCallbacks,
|
|
|
|
driver);
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if (mon && logCtxt) {
|
2020-03-16 12:10:24 +00:00
|
|
|
g_object_ref(logCtxt);
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(mon,
|
|
|
|
qemuProcessMonitorReportLogError,
|
|
|
|
logCtxt,
|
|
|
|
qemuProcessMonitorLogFree);
|
|
|
|
}
|
qemu: Wire up better early error reporting
The previous patches added infrastructure to report better errors from
monitor in some cases. This patch finalizes this "feature" by enabling
this enhanced error reporting on early phases of VM startup. In these
phases the possibility of qemu producing a useful error message is
really high compared to running it during the whole life cycle. After
the start up is complete, the feature is disabled to provide the usual
error messages so that users are not confused by possibly irrelevant
messages that may be in the domain log.
The original motivation to do this enhancement is to capture errors when
using VFIO device passthrough, where qemu reports errors after the
monitor is initialized and the existing error catching code couldn't
catch this producing a unhelpful message:
# virsh start test
error: Failed to start domain test
error: Unable to read from monitor: Connection reset by peer
With this change, the message is changed to:
# virsh start test
error: Failed to start domain test
error: internal error: early end of file from monitor: possible problem:
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: vfio: error, group 8 is not viable, please ensure all devices within the iommu_group are bound to their vfio bus driver.
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: vfio: failed to get group 8
qemu-system-x86_64: -device vfio-pci,host=00:1a.0,id=hostdev0,bus=pci.0,addr=0x5: Device 'vfio-pci' could not be initialized
2013-09-18 14:23:14 +00:00
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
priv->monStart = 0;
|
|
|
|
priv->mon = mon;
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_ERROR(_("Failed to clear security context for monitor for %s"),
|
|
|
|
vm->def->name);
|
2015-07-07 16:17:25 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->mon == NULL) {
|
|
|
|
VIR_INFO("Failed to connect monitor for %s", vm->def->name);
|
2015-07-07 16:17:25 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2017-10-16 10:52:36 +00:00
|
|
|
if (qemuProcessInitMonitor(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-02-20 15:40:21 +00:00
|
|
|
if (qemuMigrationCapsCheck(driver, vm, asyncJob) < 0)
|
2017-10-16 10:52:36 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2013-09-18 12:43:52 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuProcessReadLog: Read log file of a qemu VM
|
2015-11-12 13:54:04 +00:00
|
|
|
* @logCtxt: the domain log context
|
2015-11-12 11:01:07 +00:00
|
|
|
* @msg: pointer to buffer to store the read messages in
|
2017-11-22 12:12:00 +00:00
|
|
|
* @max: maximum length of the message returned in @msg
|
2013-09-18 12:43:52 +00:00
|
|
|
*
|
|
|
|
* Reads log of a qemu VM. Skips messages not produced by qemu or irrelevant
|
2017-11-22 12:12:00 +00:00
|
|
|
* messages. If @max is not zero, @msg will contain at most @max characters
|
|
|
|
* from the end of the log and @msg will start after a new line if possible.
|
|
|
|
*
|
|
|
|
* Returns 0 on success or -1 on error
|
2013-09-18 12:43:52 +00:00
|
|
|
*/
|
2015-11-12 11:01:07 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessReadLog(qemuDomainLogContext *logCtxt,
|
2017-11-22 12:12:00 +00:00
|
|
|
char **msg,
|
|
|
|
size_t max)
|
2013-04-22 16:16:57 +00:00
|
|
|
{
|
2015-11-12 11:01:07 +00:00
|
|
|
char *buf;
|
|
|
|
ssize_t got;
|
2013-04-22 16:16:57 +00:00
|
|
|
char *eol;
|
2015-11-12 11:01:07 +00:00
|
|
|
char *filter_next;
|
2017-11-22 12:12:00 +00:00
|
|
|
size_t skip;
|
2013-04-22 16:16:57 +00:00
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if ((got = qemuDomainLogContextRead(logCtxt, &buf)) < 0)
|
2015-11-12 11:01:07 +00:00
|
|
|
return -1;
|
2013-12-03 16:38:14 +00:00
|
|
|
|
2015-11-12 11:01:07 +00:00
|
|
|
/* Filter out debug messages from intermediate libvirt process */
|
|
|
|
filter_next = buf;
|
|
|
|
while ((eol = strchr(filter_next, '\n'))) {
|
|
|
|
*eol = '\0';
|
|
|
|
if (virLogProbablyLogMessage(filter_next) ||
|
2017-11-22 09:19:12 +00:00
|
|
|
strstr(filter_next, "char device redirected to")) {
|
2017-11-22 12:12:00 +00:00
|
|
|
skip = (eol + 1) - filter_next;
|
2016-01-18 09:50:14 +00:00
|
|
|
memmove(filter_next, eol + 1, buf + got - eol);
|
2015-11-12 11:01:07 +00:00
|
|
|
got -= skip;
|
|
|
|
} else {
|
|
|
|
filter_next = eol + 1;
|
|
|
|
*eol = '\n';
|
2013-12-03 16:38:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
if (got > 0 &&
|
|
|
|
buf[got - 1] == '\n') {
|
2015-11-12 11:01:07 +00:00
|
|
|
buf[got - 1] = '\0';
|
|
|
|
got--;
|
2013-12-03 16:38:14 +00:00
|
|
|
}
|
2017-11-22 12:12:00 +00:00
|
|
|
|
|
|
|
if (max > 0 && got > max) {
|
|
|
|
skip = got - max;
|
|
|
|
|
|
|
|
if (buf[skip - 1] != '\n' &&
|
|
|
|
(eol = strchr(buf + skip, '\n')) &&
|
|
|
|
!virStringIsEmpty(eol + 1))
|
|
|
|
skip = eol + 1 - buf;
|
|
|
|
|
|
|
|
memmove(buf, buf + skip, got - skip + 1);
|
|
|
|
got -= skip;
|
|
|
|
}
|
|
|
|
|
2020-09-11 11:42:13 +00:00
|
|
|
buf = g_renew(char, buf, got + 1);
|
2015-11-12 11:01:07 +00:00
|
|
|
*msg = buf;
|
|
|
|
return 0;
|
|
|
|
}
|
2013-12-03 16:38:14 +00:00
|
|
|
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessReportLogError(qemuDomainLogContext *logCtxt,
|
2015-11-12 11:01:07 +00:00
|
|
|
const char *msgprefix)
|
|
|
|
{
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *logmsg = NULL;
|
2017-11-22 12:12:00 +00:00
|
|
|
|
2021-02-23 14:27:07 +00:00
|
|
|
/* assume that 1024 chars of qemu log is the right balance */
|
|
|
|
if (qemuProcessReadLog(logCtxt, &logmsg, 1024) < 0)
|
2015-11-12 11:01:07 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
virResetLastError();
|
2016-06-08 10:03:38 +00:00
|
|
|
if (virStringIsEmpty(logmsg))
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", msgprefix);
|
|
|
|
else
|
2021-02-25 16:52:47 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s: %s", /* _( silence sc_libvirt_unmarked_diagnostics */
|
|
|
|
msgprefix, logmsg);
|
2016-06-08 10:03:38 +00:00
|
|
|
|
2015-11-12 11:01:07 +00:00
|
|
|
return 0;
|
2013-12-03 16:38:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-12 13:54:04 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessMonitorReportLogError(qemuMonitor *mon G_GNUC_UNUSED,
|
2015-11-12 13:54:04 +00:00
|
|
|
const char *msg,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainLogContext *logCtxt = opaque;
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuProcessReportLogError(logCtxt, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-30 09:07:59 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessLookupPTYs(virDomainChrDef **devices,
|
2011-03-30 09:07:59 +00:00
|
|
|
int count,
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *info)
|
2011-03-30 09:07:59 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *id = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainChrDef *chr = devices[i];
|
2016-10-21 11:45:54 +00:00
|
|
|
if (chr->source->type == VIR_DOMAIN_CHR_TYPE_PTY) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuMonitorChardevInfo *entry;
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
id = g_strdup_printf("char%s", chr->info.alias);
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2014-11-13 18:29:14 +00:00
|
|
|
entry = virHashLookup(info, id);
|
|
|
|
if (!entry || !entry->ptyPath) {
|
2016-10-21 11:45:54 +00:00
|
|
|
if (chr->source->data.file.path == NULL) {
|
2011-03-30 09:07:59 +00:00
|
|
|
/* neither the log output nor 'info chardev' had a
|
|
|
|
* pty path for this chardev, report an error
|
|
|
|
*/
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no assigned pty for device %s"), id);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2011-03-30 09:07:59 +00:00
|
|
|
} else {
|
|
|
|
/* 'info chardev' had no pty path for this chardev,
|
|
|
|
* but the log output had, so we're fine
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:28 +00:00
|
|
|
g_free(chr->source->data.file.path);
|
2019-10-20 11:49:46 +00:00
|
|
|
chr->source->data.file.path = g_strdup(entry->ptyPath);
|
2011-03-30 09:07:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2011-03-30 09:07:59 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessFindCharDevicePTYsMonitor(virDomainObj *vm,
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *info)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i = 0;
|
2011-06-08 16:25:11 +00:00
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->serials, vm->def->nserials, info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->parallels, vm->def->nparallels,
|
2014-11-13 15:17:21 +00:00
|
|
|
info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->channels, vm->def->nchannels, info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2013-01-02 14:36:33 +00:00
|
|
|
/* For historical reasons, console[0] can be just an alias
|
|
|
|
* for serial[0]. That's why we need to update it as well. */
|
|
|
|
if (vm->def->nconsoles) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainChrDef *chr = vm->def->consoles[0];
|
2011-03-30 09:07:59 +00:00
|
|
|
|
2013-01-02 14:36:33 +00:00
|
|
|
if (vm->def->nserials &&
|
|
|
|
chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE &&
|
|
|
|
chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_SERIAL) {
|
|
|
|
/* yes, the first console is just an alias for serials[0] */
|
|
|
|
i = 1;
|
2022-01-24 07:58:00 +00:00
|
|
|
virDomainChrSourceDefCopy(chr->source,
|
|
|
|
((vm->def->serials[0])->source));
|
2013-01-02 14:36:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessLookupPTYs(vm->def->consoles + i, vm->def->nconsoles - i,
|
2014-11-13 15:17:21 +00:00
|
|
|
info) < 0)
|
2011-03-30 09:07:59 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshChannelVirtioState(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *info,
|
2014-11-19 09:31:21 +00:00
|
|
|
int booted)
|
2014-11-13 19:16:46 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
2014-11-19 09:31:21 +00:00
|
|
|
int agentReason = VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_CHANNEL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuMonitorChardevInfo *entry;
|
|
|
|
virObjectEvent *event = NULL;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *id = NULL;
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2014-11-19 09:31:21 +00:00
|
|
|
if (booted)
|
|
|
|
agentReason = VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_DOMAIN_STARTED;
|
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
for (i = 0; i < vm->def->nchannels; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainChrDef *chr = vm->def->channels[i];
|
2014-11-13 19:16:46 +00:00
|
|
|
if (chr->targetType == VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO) {
|
2018-03-29 06:50:01 +00:00
|
|
|
|
|
|
|
VIR_FREE(id);
|
2019-10-22 13:26:14 +00:00
|
|
|
id = g_strdup_printf("char%s", chr->info.alias);
|
2014-11-13 19:16:46 +00:00
|
|
|
|
|
|
|
/* port state not reported */
|
|
|
|
if (!(entry = virHashLookup(info, id)) ||
|
|
|
|
!entry->state)
|
|
|
|
continue;
|
|
|
|
|
2014-11-19 09:31:21 +00:00
|
|
|
if (entry->state != VIR_DOMAIN_CHR_DEVICE_STATE_DEFAULT &&
|
|
|
|
STREQ_NULLABLE(chr->target.name, "org.qemu.guest_agent.0") &&
|
|
|
|
(event = virDomainEventAgentLifecycleNewFromObj(vm, entry->state,
|
|
|
|
agentReason)))
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2014-11-19 09:31:21 +00:00
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
chr->state = entry->state;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-08 16:03:48 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuRefreshVirtioChannelState(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2016-06-29 13:52:49 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2014-11-13 19:16:46 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2021-11-30 10:49:24 +00:00
|
|
|
g_autoptr(GHashTable) info = NULL;
|
|
|
|
int rc;
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2016-06-29 13:52:49 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2021-11-30 10:49:24 +00:00
|
|
|
return -1;
|
2016-06-29 13:52:49 +00:00
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
rc = qemuMonitorGetChardevInfo(priv->mon, &info);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
qemuProcessRefreshChannelVirtioState(driver, vm, info, false);
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
return 0;
|
2014-11-13 19:16:46 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 13:57:30 +00:00
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshPRManagerState(virDomainObj *vm,
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *info)
|
2018-06-27 13:57:30 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
qemuMonitorPRManagerInfo *prManagerInfo;
|
2018-06-27 13:57:30 +00:00
|
|
|
const char *managedAlias = qemuDomainGetManagedPRAlias();
|
|
|
|
|
|
|
|
if (!(prManagerInfo = virHashLookup(info, managedAlias))) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("missing info on pr-manager %s"),
|
|
|
|
managedAlias);
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2018-06-27 13:57:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->prDaemonRunning = prManagerInfo->connected;
|
|
|
|
|
|
|
|
if (!priv->prDaemonRunning &&
|
|
|
|
qemuProcessStartManagedPRDaemon(vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2018-06-27 13:57:30 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2018-06-27 13:57:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuRefreshPRManagerState(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2018-06-27 13:57:30 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2021-11-30 10:49:24 +00:00
|
|
|
g_autoptr(GHashTable) info = NULL;
|
|
|
|
int rc;
|
2018-06-27 13:57:30 +00:00
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_PR_MANAGER_HELPER) ||
|
2019-03-21 12:54:20 +00:00
|
|
|
!qemuDomainDefHasManagedPR(vm))
|
2018-06-27 13:57:30 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2021-11-30 10:49:24 +00:00
|
|
|
rc = qemuMonitorGetPRManagerInfo(priv->mon, &info);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2018-06-27 13:57:30 +00:00
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
2018-06-27 13:57:30 +00:00
|
|
|
|
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
return qemuProcessRefreshPRManagerState(vm, info);
|
2018-06-27 13:57:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-29 16:01:39 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuRefreshRTC(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2016-04-29 16:01:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-04-29 16:01:39 +00:00
|
|
|
time_t now, then;
|
|
|
|
struct tm thenbits;
|
|
|
|
long localOffset;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (vm->def->clock.offset != VIR_DOMAIN_CLOCK_OFFSET_VARIABLE)
|
|
|
|
return;
|
|
|
|
|
|
|
|
memset(&thenbits, 0, sizeof(thenbits));
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
now = time(NULL);
|
|
|
|
rv = qemuMonitorGetRTCTime(priv->mon, &thenbits);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2016-04-29 16:01:39 +00:00
|
|
|
|
|
|
|
if (rv < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
thenbits.tm_isdst = -1;
|
2018-04-25 12:42:34 +00:00
|
|
|
if ((then = mktime(&thenbits)) == (time_t)-1) {
|
2016-04-29 16:01:39 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Unable to convert time"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Thing is, @now is in local TZ but @then in UTC. */
|
|
|
|
if (virTimeLocalOffsetFromUTC(&localOffset) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vm->def->clock.data.variable.adjustment = then - now + localOffset;
|
|
|
|
}
|
2014-11-13 19:16:46 +00:00
|
|
|
|
2016-04-06 13:57:57 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshBalloonState(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2015-06-30 14:31:24 +00:00
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
unsigned long long balloon;
|
2021-03-15 16:18:56 +00:00
|
|
|
size_t i;
|
2015-06-30 14:31:24 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* if no ballooning is available, the current size equals to the current
|
|
|
|
* full memory size */
|
2016-04-06 13:02:31 +00:00
|
|
|
if (!virDomainDefHasMemballoon(vm->def)) {
|
2016-06-15 13:34:04 +00:00
|
|
|
vm->def->mem.cur_balloon = virDomainDefGetMemoryTotal(vm->def);
|
2015-06-30 14:31:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rc = qemuMonitorGetBalloonInfo(qemuDomainGetMonitor(vm), &balloon);
|
2021-11-24 12:11:52 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
if (rc < 0)
|
2015-06-30 14:31:24 +00:00
|
|
|
return -1;
|
|
|
|
|
2021-03-15 16:18:56 +00:00
|
|
|
/* We want the balloon size stored in domain definition to
|
|
|
|
* account for the actual size of virtio-mem too. But the
|
|
|
|
* balloon size as reported by QEMU (@balloon) contains just
|
|
|
|
* the balloon size without any virtio-mem. Do a wee bit of
|
|
|
|
* math to fix it. */
|
|
|
|
VIR_DEBUG("balloon size before fix is %lld", balloon);
|
|
|
|
for (i = 0; i < vm->def->nmems; i++) {
|
|
|
|
if (vm->def->mems[i]->model == VIR_DOMAIN_MEMORY_MODEL_VIRTIO_MEM)
|
|
|
|
balloon += vm->def->mems[i]->currentsize;
|
|
|
|
}
|
|
|
|
VIR_DEBUG("Updating balloon from %lld to %lld kb",
|
|
|
|
vm->def->mem.cur_balloon, balloon);
|
2015-06-30 14:31:24 +00:00
|
|
|
vm->def->mem.cur_balloon = balloon;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessWaitForMonitor(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2014-08-12 02:54:42 +00:00
|
|
|
int asyncJob,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainLogContext *logCtxt)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
2021-11-30 10:49:24 +00:00
|
|
|
g_autoptr(GHashTable) info = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-03-14 17:27:49 +00:00
|
|
|
bool retry = true;
|
|
|
|
|
|
|
|
if (priv->qemuCaps &&
|
2021-07-26 12:31:45 +00:00
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CHARDEV_FD_PASS_COMMANDLINE))
|
2018-03-14 17:27:49 +00:00
|
|
|
retry = false;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-03-14 17:27:49 +00:00
|
|
|
VIR_DEBUG("Connect monitor to vm=%p name='%s' retry=%d",
|
|
|
|
vm, vm->def->name, retry);
|
|
|
|
|
|
|
|
if (qemuConnectMonitor(driver, vm, asyncJob, retry, logCtxt) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Try to get the pty path mappings again via the monitor. This is much more
|
|
|
|
* reliable if it's available.
|
|
|
|
* Note that the monitor itself can be on a pty, so we still need to try the
|
|
|
|
* log output method. */
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
2014-11-13 15:17:21 +00:00
|
|
|
ret = qemuMonitorGetChardevInfo(priv->mon, &info);
|
|
|
|
VIR_DEBUG("qemuMonitorGetChardevInfo returned %i", ret);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2014-12-16 09:40:58 +00:00
|
|
|
|
2014-11-13 19:16:46 +00:00
|
|
|
if (ret == 0) {
|
2017-07-10 21:30:03 +00:00
|
|
|
if ((ret = qemuProcessFindCharDevicePTYsMonitor(vm, info)) < 0)
|
2014-11-13 19:16:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
qemuProcessRefreshChannelVirtioState(driver, vm, info, true);
|
2014-11-13 19:16:46 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-11-12 13:54:04 +00:00
|
|
|
if (logCtxt && kill(vm->pid, 0) == -1 && errno == ESRCH) {
|
|
|
|
qemuProcessReportLogError(logCtxt,
|
2015-11-12 11:01:07 +00:00
|
|
|
_("process exited while connecting to monitor"));
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-06-20 07:16:16 +00:00
|
|
|
|
2014-09-03 13:07:38 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessDetectIOThreadPIDs(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2014-09-03 13:07:38 +00:00
|
|
|
int asyncJob)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
qemuMonitorIOThreadInfo **iothreads = NULL;
|
2014-09-03 13:07:38 +00:00
|
|
|
int niothreads = 0;
|
|
|
|
int ret = -1;
|
|
|
|
size_t i;
|
|
|
|
|
2017-03-08 09:45:10 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2014-09-03 13:07:38 +00:00
|
|
|
/* Get the list of IOThreads from qemu */
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
2020-12-02 17:34:24 +00:00
|
|
|
ret = qemuMonitorGetIOThreads(priv->mon, &iothreads, &niothreads);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2020-12-02 17:34:24 +00:00
|
|
|
if (ret < 0)
|
2014-09-03 13:07:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-10-15 14:26:26 +00:00
|
|
|
if (niothreads != vm->def->niothreadids) {
|
2014-09-03 13:07:38 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("got wrong number of IOThread pids from QEMU monitor. "
|
2015-10-15 14:26:26 +00:00
|
|
|
"got %d, wanted %zu"),
|
|
|
|
niothreads, vm->def->niothreadids);
|
2014-09-03 13:07:38 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-04-27 18:24:34 +00:00
|
|
|
/* Nothing to do */
|
|
|
|
if (niothreads == 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2015-04-10 13:21:23 +00:00
|
|
|
for (i = 0; i < niothreads; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainIOThreadIDDef *iothrid;
|
2015-04-10 13:21:23 +00:00
|
|
|
|
2015-04-27 18:16:54 +00:00
|
|
|
if (!(iothrid = virDomainIOThreadIDFind(vm->def,
|
|
|
|
iothreads[i]->iothread_id))) {
|
2015-04-10 13:21:23 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2015-04-27 18:16:54 +00:00
|
|
|
_("iothread %d not found"),
|
|
|
|
iothreads[i]->iothread_id);
|
2015-04-10 13:21:23 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
iothrid->thread_id = iothreads[i]->thread_id;
|
|
|
|
}
|
2014-09-03 13:07:38 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (iothreads) {
|
|
|
|
for (i = 0; i < niothreads; i++)
|
2015-04-28 10:32:52 +00:00
|
|
|
VIR_FREE(iothreads[i]);
|
2014-09-03 13:07:38 +00:00
|
|
|
VIR_FREE(iothreads);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-10-24 09:22:52 +00:00
|
|
|
|
2019-01-30 08:46:23 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGetAllCpuAffinity(virBitmap **cpumapRet)
|
2019-01-30 08:46:23 +00:00
|
|
|
{
|
|
|
|
*cpumapRet = NULL;
|
|
|
|
|
|
|
|
if (!virHostCPUHasBitmap())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(*cpumapRet = virHostCPUGetOnlineBitmap()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-24 09:22:52 +00:00
|
|
|
/*
|
|
|
|
* To be run between fork/exec of QEMU only
|
|
|
|
*/
|
2020-09-01 11:27:44 +00:00
|
|
|
#if defined(WITH_SCHED_GETAFFINITY) || defined(WITH_BSD_CPU_AFFINITY)
|
2012-10-24 09:22:52 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessInitCpuAffinity(virDomainObj *vm)
|
2012-10-24 09:22:52 +00:00
|
|
|
{
|
2020-09-04 12:17:30 +00:00
|
|
|
bool settingAll = false;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virBitmap) cpumapToSet = NULL;
|
2019-01-30 08:46:23 +00:00
|
|
|
virDomainNumatuneMemMode mem_mode;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2012-10-24 09:22:52 +00:00
|
|
|
|
2013-07-22 14:21:15 +00:00
|
|
|
if (!vm->pid) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Cannot setup CPU affinity until process is started"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-01-30 08:46:23 +00:00
|
|
|
/* Here is the deal, we can't set cpuset.mems before qemu is
|
|
|
|
* started as it clashes with KVM allocation. Therefore, we
|
|
|
|
* used to let qemu allocate its memory anywhere as we would
|
|
|
|
* then move the memory to desired NUMA node via CGroups.
|
|
|
|
* However, that might not be always possible because qemu
|
|
|
|
* might lock some parts of its memory (e.g. due to VFIO).
|
|
|
|
* Even if it possible, memory has to be copied between NUMA
|
|
|
|
* nodes which is suboptimal.
|
|
|
|
* Solution is to set affinity that matches the best what we
|
|
|
|
* would have set in CGroups and then fix it later, once qemu
|
|
|
|
* is already running. */
|
|
|
|
if (virDomainNumaGetNodeCount(vm->def->numa) <= 1 &&
|
|
|
|
virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
|
|
|
|
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virBitmap *nodeset = NULL;
|
qemu: Fix qemuProcessInitCpuAffinity()
Ever since the feature was introduced with commit 0f8e7ae33ace,
it has contained a logic error in that it attempted to use a NUMA
node map where a CPU map was expected.
Because of that, guests using <numatune> might fail to start:
# virsh start guest
error: Failed to start domain guest
error: cannot set CPU affinity on process 40055: Invalid argument
This was particularly easy to trigger on POWER 8 machines, where
secondary threads always show up as offline in the host: having
<numatune>
<memory mode='strict' placement='static' nodeset='1'/>
</numatune>
in the guest configuration, for example, would result in libvirt
trying to set the process affinity so that it would prefer
running on CPU 1, but since that's a secondary thread and thus
shows up as offline, the operation would fail, and so would
starting the guest.
Use the newly introduced virNumaNodesetToCPUset() to convert the
NUMA node map to a CPU map, which in the example above would be
48,56,64,72,80,88 - a valid input for virProcessSetAffinity().
https://bugzilla.redhat.com/show_bug.cgi?id=1703661
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2019-05-30 17:20:34 +00:00
|
|
|
|
2019-01-30 08:46:23 +00:00
|
|
|
if (virDomainNumatuneMaybeGetNodeset(vm->def->numa,
|
|
|
|
priv->autoNodeset,
|
qemu: Fix qemuProcessInitCpuAffinity()
Ever since the feature was introduced with commit 0f8e7ae33ace,
it has contained a logic error in that it attempted to use a NUMA
node map where a CPU map was expected.
Because of that, guests using <numatune> might fail to start:
# virsh start guest
error: Failed to start domain guest
error: cannot set CPU affinity on process 40055: Invalid argument
This was particularly easy to trigger on POWER 8 machines, where
secondary threads always show up as offline in the host: having
<numatune>
<memory mode='strict' placement='static' nodeset='1'/>
</numatune>
in the guest configuration, for example, would result in libvirt
trying to set the process affinity so that it would prefer
running on CPU 1, but since that's a secondary thread and thus
shows up as offline, the operation would fail, and so would
starting the guest.
Use the newly introduced virNumaNodesetToCPUset() to convert the
NUMA node map to a CPU map, which in the example above would be
48,56,64,72,80,88 - a valid input for virProcessSetAffinity().
https://bugzilla.redhat.com/show_bug.cgi?id=1703661
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2019-05-30 17:20:34 +00:00
|
|
|
&nodeset,
|
2019-01-30 08:46:23 +00:00
|
|
|
-1) < 0)
|
2019-06-04 12:37:18 +00:00
|
|
|
return -1;
|
qemu: Fix qemuProcessInitCpuAffinity()
Ever since the feature was introduced with commit 0f8e7ae33ace,
it has contained a logic error in that it attempted to use a NUMA
node map where a CPU map was expected.
Because of that, guests using <numatune> might fail to start:
# virsh start guest
error: Failed to start domain guest
error: cannot set CPU affinity on process 40055: Invalid argument
This was particularly easy to trigger on POWER 8 machines, where
secondary threads always show up as offline in the host: having
<numatune>
<memory mode='strict' placement='static' nodeset='1'/>
</numatune>
in the guest configuration, for example, would result in libvirt
trying to set the process affinity so that it would prefer
running on CPU 1, but since that's a secondary thread and thus
shows up as offline, the operation would fail, and so would
starting the guest.
Use the newly introduced virNumaNodesetToCPUset() to convert the
NUMA node map to a CPU map, which in the example above would be
48,56,64,72,80,88 - a valid input for virProcessSetAffinity().
https://bugzilla.redhat.com/show_bug.cgi?id=1703661
Signed-off-by: Andrea Bolognani <abologna@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2019-05-30 17:20:34 +00:00
|
|
|
|
|
|
|
if (virNumaNodesetToCPUset(nodeset, &cpumapToSet) < 0)
|
2019-06-04 12:37:18 +00:00
|
|
|
return -1;
|
2019-01-30 08:46:23 +00:00
|
|
|
} else if (vm->def->cputune.emulatorpin) {
|
2020-10-02 08:34:53 +00:00
|
|
|
cpumapToSet = virBitmapNewCopy(vm->def->cputune.emulatorpin);
|
2011-02-14 16:09:39 +00:00
|
|
|
} else {
|
2020-09-04 12:17:30 +00:00
|
|
|
settingAll = true;
|
2019-06-04 12:36:21 +00:00
|
|
|
if (qemuProcessGetAllCpuAffinity(&cpumapToSet) < 0)
|
2019-06-04 12:37:18 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2020-09-07 11:00:28 +00:00
|
|
|
/*
|
|
|
|
* We only want to error out if we failed to set the affinity to
|
|
|
|
* user-requested mapping. If we are just trying to reset the affinity
|
|
|
|
* to all CPUs and this fails it can only be an issue if:
|
|
|
|
* 1) libvirtd does not have CAP_SYS_NICE
|
|
|
|
* 2) libvirtd does not run on all CPUs
|
|
|
|
*
|
|
|
|
* This scenario can easily occur when libvirtd is run inside a
|
|
|
|
* container with restrictive permissions and CPU pinning.
|
|
|
|
*
|
|
|
|
* See also: https://bugzilla.redhat.com/1819801#c2
|
|
|
|
*/
|
2019-01-30 08:46:23 +00:00
|
|
|
if (cpumapToSet &&
|
2020-09-07 11:00:28 +00:00
|
|
|
virProcessSetAffinity(vm->pid, cpumapToSet, settingAll) < 0) {
|
|
|
|
return -1;
|
2019-06-04 12:37:18 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2019-06-04 12:37:18 +00:00
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2020-09-01 11:27:44 +00:00
|
|
|
#else /* !defined(WITH_SCHED_GETAFFINITY) && !defined(WITH_BSD_CPU_AFFINITY) */
|
2018-08-23 08:49:48 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessInitCpuAffinity(virDomainObj *vm G_GNUC_UNUSED)
|
2018-08-23 08:49:48 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2020-09-01 11:27:44 +00:00
|
|
|
#endif /* !defined(WITH_SCHED_GETAFFINITY) && !defined(WITH_BSD_CPU_AFFINITY) */
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
/* set link states to down on interfaces at qemu start */
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetLinkStates(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2015-11-02 09:50:21 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-09-06 08:23:47 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virDomainDef *def = vm->def;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2015-11-02 09:50:21 +00:00
|
|
|
int ret = -1;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2011-09-06 08:23:47 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
if (def->nets[i]->linkstate == VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN) {
|
2015-04-14 13:26:36 +00:00
|
|
|
if (!def->nets[i]->info.alias) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("missing alias for network device"));
|
2015-11-02 09:50:21 +00:00
|
|
|
goto cleanup;
|
2015-04-14 13:26:36 +00:00
|
|
|
}
|
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
VIR_DEBUG("Setting link state: %s", def->nets[i]->info.alias);
|
|
|
|
|
2015-11-02 09:50:21 +00:00
|
|
|
rv = qemuMonitorSetLink(priv->mon,
|
|
|
|
def->nets[i]->info.alias,
|
|
|
|
VIR_DOMAIN_NET_INTERFACE_LINK_STATE_DOWN);
|
|
|
|
if (rv < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
2015-11-02 09:50:21 +00:00
|
|
|
_("Couldn't set link state on interface: %s"),
|
|
|
|
def->nets[i]->info.alias);
|
|
|
|
goto cleanup;
|
2011-09-06 08:23:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-02 09:50:21 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-09-06 08:23:47 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-06-21 15:33:06 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessSetupPid:
|
|
|
|
*
|
2018-12-04 17:08:14 +00:00
|
|
|
* This function sets resource properties (affinity, cgroups,
|
2016-06-21 15:33:06 +00:00
|
|
|
* scheduler) for any PID associated with a domain. It should be used
|
|
|
|
* to set up emulator PIDs as well as vCPU and I/O thread pids to
|
|
|
|
* ensure they are all handled the same way.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupPid(virDomainObj *vm,
|
2016-06-21 15:33:06 +00:00
|
|
|
pid_t pid,
|
|
|
|
virCgroupThreadName nameval,
|
|
|
|
int id,
|
2021-03-11 07:16:13 +00:00
|
|
|
virBitmap *cpumask,
|
2016-06-21 15:33:06 +00:00
|
|
|
unsigned long long period,
|
|
|
|
long long quota,
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainThreadSchedParam *sched)
|
2016-06-21 15:33:06 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-06-21 15:33:06 +00:00
|
|
|
virDomainNumatuneMemMode mem_mode;
|
2021-03-11 07:16:13 +00:00
|
|
|
virCgroup *cgroup = NULL;
|
|
|
|
virBitmap *use_cpumask = NULL;
|
|
|
|
virBitmap *affinity_cpumask = NULL;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virBitmap) hostcpumap = NULL;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *mem_mask = NULL;
|
2016-06-21 15:33:06 +00:00
|
|
|
int ret = -1;
|
2021-04-15 13:55:04 +00:00
|
|
|
size_t i;
|
2016-06-21 15:33:06 +00:00
|
|
|
|
|
|
|
if ((period || quota) &&
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Infer which cpumask shall be used. */
|
2019-01-30 08:46:23 +00:00
|
|
|
if (cpumask) {
|
2016-06-21 15:33:06 +00:00
|
|
|
use_cpumask = cpumask;
|
2019-01-30 08:46:23 +00:00
|
|
|
} else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
|
2016-06-21 15:33:06 +00:00
|
|
|
use_cpumask = priv->autoCpuset;
|
2019-01-30 08:46:23 +00:00
|
|
|
} else if (vm->def->cpumask) {
|
2016-06-21 15:33:06 +00:00
|
|
|
use_cpumask = vm->def->cpumask;
|
2019-01-30 08:46:23 +00:00
|
|
|
} else {
|
|
|
|
/* You may think this is redundant, but we can't assume libvirtd
|
|
|
|
* itself is running on all pCPUs, so we need to explicitly set
|
|
|
|
* the spawned QEMU instance to all pCPUs if no map is given in
|
|
|
|
* its config file */
|
|
|
|
if (qemuProcessGetAllCpuAffinity(&hostcpumap) < 0)
|
|
|
|
goto cleanup;
|
2020-07-22 13:50:06 +00:00
|
|
|
affinity_cpumask = hostcpumap;
|
2019-01-30 08:46:23 +00:00
|
|
|
}
|
2016-06-21 15:33:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If CPU cgroup controller is not initialized here, then we need
|
|
|
|
* neither period nor quota settings. And if CPUSET controller is
|
|
|
|
* not initialized either, then there's nothing to do anyway.
|
|
|
|
*/
|
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) ||
|
|
|
|
virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
|
|
|
|
|
|
|
|
if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
|
|
|
|
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
|
|
|
|
virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
|
|
|
|
priv->autoNodeset,
|
|
|
|
&mem_mask, -1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2021-04-15 13:55:04 +00:00
|
|
|
/* For restrictive numatune mode we need to set cpuset.mems for vCPU
|
|
|
|
* threads based on the node they are in as there is nothing else uses
|
|
|
|
* for such restriction (e.g. numa_set_membind). */
|
|
|
|
if (nameval == VIR_CGROUP_THREAD_VCPU) {
|
|
|
|
virDomainNuma *numatune = vm->def->numa;
|
|
|
|
|
|
|
|
/* Look for the guest NUMA node of this vCPU */
|
|
|
|
for (i = 0; i < virDomainNumaGetNodeCount(numatune); i++) {
|
2021-04-23 08:05:50 +00:00
|
|
|
virBitmap *node_cpus = virDomainNumaGetNodeCpumask(numatune, i);
|
2021-04-15 13:55:04 +00:00
|
|
|
|
|
|
|
if (!virBitmapIsBitSet(node_cpus, id))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Update the mem_mask for this vCPU if the mode of its node is
|
|
|
|
* 'restrictive'. */
|
|
|
|
if (virDomainNumatuneGetMode(numatune, i, &mem_mode) == 0 &&
|
|
|
|
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_RESTRICTIVE) {
|
|
|
|
VIR_FREE(mem_mask);
|
|
|
|
|
|
|
|
if (virDomainNumatuneMaybeFormatNodeset(numatune,
|
|
|
|
priv->autoNodeset,
|
|
|
|
&mem_mask, i) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 15:33:06 +00:00
|
|
|
if (virCgroupNewThread(priv->cgroup, nameval, id, true, &cgroup) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
|
|
|
|
if (use_cpumask &&
|
2022-01-25 16:19:53 +00:00
|
|
|
virDomainCgroupSetupCpusetCpus(cgroup, use_cpumask) < 0)
|
2016-06-21 15:33:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2019-01-30 08:46:23 +00:00
|
|
|
if (mem_mask && virCgroupSetCpusetMems(cgroup, mem_mask) < 0)
|
2016-06-21 15:33:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((period || quota) &&
|
2022-01-25 16:19:53 +00:00
|
|
|
virDomainCgroupSetupVcpuBW(cgroup, period, quota) < 0)
|
2016-06-21 15:33:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Move the thread to the sub dir */
|
2018-09-24 22:54:04 +00:00
|
|
|
if (virCgroupAddThread(cgroup, pid) < 0)
|
2016-06-21 15:33:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-07-22 13:50:06 +00:00
|
|
|
if (!affinity_cpumask)
|
|
|
|
affinity_cpumask = use_cpumask;
|
2019-11-07 21:23:04 +00:00
|
|
|
|
2020-09-07 11:00:28 +00:00
|
|
|
/* Setup legacy affinity.
|
|
|
|
*
|
|
|
|
* We only want to error out if we failed to set the affinity to
|
|
|
|
* user-requested mapping. If we are just trying to reset the affinity
|
|
|
|
* to all CPUs and this fails it can only be an issue if:
|
|
|
|
* 1) libvirtd does not have CAP_SYS_NICE
|
|
|
|
* 2) libvirtd does not run on all CPUs
|
|
|
|
*
|
|
|
|
* This scenario can easily occur when libvirtd is run inside a
|
|
|
|
* container with restrictive permissions and CPU pinning.
|
|
|
|
*
|
|
|
|
* See also: https://bugzilla.redhat.com/1819801#c2
|
|
|
|
*/
|
2020-09-05 05:53:58 +00:00
|
|
|
if (affinity_cpumask &&
|
2020-09-07 11:00:28 +00:00
|
|
|
virProcessSetAffinity(pid, affinity_cpumask,
|
|
|
|
affinity_cpumask == hostcpumap) < 0) {
|
|
|
|
goto cleanup;
|
2020-09-04 12:17:30 +00:00
|
|
|
}
|
2016-06-21 15:33:06 +00:00
|
|
|
|
2019-05-22 08:40:58 +00:00
|
|
|
/* Set scheduler type and priority, but not for the main thread. */
|
2016-06-21 15:33:06 +00:00
|
|
|
if (sched &&
|
2019-05-22 08:40:58 +00:00
|
|
|
nameval != VIR_CGROUP_THREAD_EMULATOR &&
|
2016-06-21 15:33:06 +00:00
|
|
|
virProcessSetScheduler(pid, sched->policy, sched->priority) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
if (cgroup) {
|
|
|
|
if (ret < 0)
|
|
|
|
virCgroupRemove(cgroup);
|
2020-09-22 12:07:27 +00:00
|
|
|
virCgroupFree(cgroup);
|
2016-06-21 15:33:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-08-21 09:18:34 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupEmulator(virDomainObj *vm)
|
2012-08-21 09:18:34 +00:00
|
|
|
{
|
2016-07-04 15:14:43 +00:00
|
|
|
return qemuProcessSetupPid(vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR,
|
|
|
|
0, vm->def->cputune.emulatorpin,
|
|
|
|
vm->def->cputune.emulator_period,
|
|
|
|
vm->def->cputune.emulator_quota,
|
2019-04-15 11:13:06 +00:00
|
|
|
vm->def->cputune.emulatorsched);
|
2012-08-21 09:18:34 +00:00
|
|
|
}
|
|
|
|
|
2015-01-08 14:37:50 +00:00
|
|
|
|
2017-11-10 12:21:51 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessResctrlCreate(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2017-11-10 12:21:51 +00:00
|
|
|
{
|
|
|
|
size_t i = 0;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virCaps) caps = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2017-11-10 12:21:51 +00:00
|
|
|
|
2018-07-30 03:12:35 +00:00
|
|
|
if (!vm->def->nresctrls)
|
2018-02-02 07:35:30 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-12-10 10:24:19 +00:00
|
|
|
/* Force capability refresh since resctrl info can change
|
|
|
|
* XXX: move cache info into virresctrl so caps are not needed */
|
|
|
|
caps = virQEMUDriverGetCapabilities(driver, true);
|
|
|
|
if (!caps)
|
2017-11-10 12:21:51 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-07-30 03:12:35 +00:00
|
|
|
for (i = 0; i < vm->def->nresctrls; i++) {
|
2018-11-12 13:31:45 +00:00
|
|
|
size_t j = 0;
|
2019-12-10 10:24:19 +00:00
|
|
|
if (virResctrlAllocCreate(caps->host.resctrl,
|
2018-07-30 03:12:35 +00:00
|
|
|
vm->def->resctrls[i]->alloc,
|
2017-11-10 12:21:51 +00:00
|
|
|
priv->machineName) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-11-12 13:31:45 +00:00
|
|
|
|
|
|
|
for (j = 0; j < vm->def->resctrls[i]->nmonitors; j++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainResctrlMonDef *mon = NULL;
|
2018-11-12 13:31:45 +00:00
|
|
|
|
|
|
|
mon = vm->def->resctrls[i]->monitors[j];
|
|
|
|
if (virResctrlMonitorCreate(mon->instance,
|
|
|
|
priv->machineName) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-11-12 13:31:45 +00:00
|
|
|
}
|
2017-11-10 12:21:51 +00:00
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2017-11-10 12:21:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
static char *
|
2021-10-18 09:20:11 +00:00
|
|
|
qemuProcessBuildPRHelperPidfilePathOld(virDomainObj *vm)
|
2018-04-19 08:00:36 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-04-19 08:00:36 +00:00
|
|
|
const char *prdAlias = qemuDomainGetManagedPRAlias();
|
|
|
|
|
|
|
|
return virPidFileBuildPath(priv->libDir, prdAlias);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-18 09:20:11 +00:00
|
|
|
static char *
|
|
|
|
qemuProcessBuildPRHelperPidfilePath(virDomainObj *vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
g_autofree char *domname = virDomainDefGetShortName(vm->def);
|
|
|
|
g_autofree char *prdName = g_strdup_printf("%s-%s", domname, qemuDomainGetManagedPRAlias());
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
|
|
|
|
|
|
|
|
return virPidFileBuildPath(cfg->stateDir, prdName);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-23 11:21:03 +00:00
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessKillManagedPRDaemon(virDomainObj *vm)
|
2018-04-19 08:00:36 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-04-19 08:00:36 +00:00
|
|
|
virErrorPtr orig_err;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *pidfile = NULL;
|
2018-04-19 08:00:36 +00:00
|
|
|
|
|
|
|
if (!(pidfile = qemuProcessBuildPRHelperPidfilePath(vm))) {
|
|
|
|
VIR_WARN("Unable to construct pr-helper pidfile path");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-10-18 09:20:11 +00:00
|
|
|
if (!virFileExists(pidfile)) {
|
|
|
|
g_free(pidfile);
|
|
|
|
if (!(pidfile = qemuProcessBuildPRHelperPidfilePathOld(vm))) {
|
|
|
|
VIR_WARN("Unable to construct pr-helper pidfile path");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
|
|
|
if (virPidFileForceCleanupPath(pidfile) < 0) {
|
|
|
|
VIR_WARN("Unable to kill pr-helper process");
|
|
|
|
} else {
|
2020-02-20 10:46:24 +00:00
|
|
|
priv->prDaemonRunning = false;
|
2018-04-19 08:00:36 +00:00
|
|
|
}
|
|
|
|
virErrorRestore(&orig_err);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessStartPRDaemonHook(void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm = opaque;
|
2018-04-19 08:00:36 +00:00
|
|
|
size_t i, nfds = 0;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree int *fds = NULL;
|
2018-04-19 08:00:36 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
2018-07-03 11:19:59 +00:00
|
|
|
if (qemuDomainNamespaceEnabled(vm, QEMU_DOMAIN_NS_MOUNT)) {
|
|
|
|
if (virProcessGetNamespaces(vm->pid, &nfds, &fds) < 0)
|
|
|
|
return ret;
|
2018-04-19 08:00:36 +00:00
|
|
|
|
2018-07-03 11:19:59 +00:00
|
|
|
if (nfds > 0 &&
|
|
|
|
virProcessSetNamespaces(nfds, fds) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2018-04-19 08:00:36 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
for (i = 0; i < nfds; i++)
|
|
|
|
VIR_FORCE_CLOSE(fds[i]);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-23 11:21:03 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartManagedPRDaemon(virDomainObj *vm)
|
2018-04-19 08:00:36 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virQEMUDriver *driver = priv->driver;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = NULL;
|
2018-04-19 08:00:36 +00:00
|
|
|
int errfd = -1;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *pidfile = NULL;
|
|
|
|
g_autofree char *socketPath = NULL;
|
2018-04-19 08:00:36 +00:00
|
|
|
pid_t cpid = -1;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virCommand) cmd = NULL;
|
2018-04-19 08:00:36 +00:00
|
|
|
virTimeBackOffVar timebackoff;
|
|
|
|
const unsigned long long timeout = 500000; /* ms */
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
|
|
|
if (!virFileIsExecutable(cfg->prHelperName)) {
|
|
|
|
virReportSystemError(errno, _("'%s' is not a suitable pr helper"),
|
|
|
|
cfg->prHelperName);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(pidfile = qemuProcessBuildPRHelperPidfilePath(vm)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-05-14 05:28:05 +00:00
|
|
|
if (!(socketPath = qemuDomainGetManagedPRSocketPath(priv)))
|
2018-04-19 08:00:36 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Remove stale socket */
|
|
|
|
if (unlink(socketPath) < 0 &&
|
|
|
|
errno != ENOENT) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to remove stale socket path: %s"),
|
|
|
|
socketPath);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(cmd = virCommandNewArgList(cfg->prHelperName,
|
|
|
|
"-k", socketPath,
|
|
|
|
NULL)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
virCommandDaemonize(cmd);
|
|
|
|
virCommandSetPidFile(cmd, pidfile);
|
|
|
|
virCommandSetErrorFD(cmd, &errfd);
|
|
|
|
|
|
|
|
/* Place the process into the same namespace and cgroup as
|
|
|
|
* qemu (so that it shares the same view of the system). */
|
|
|
|
virCommandSetPreExecHook(cmd, qemuProcessStartPRDaemonHook, vm);
|
|
|
|
|
|
|
|
if (virCommandRun(cmd, NULL) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virPidFileReadPath(pidfile, &cpid) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("pr helper %s didn't show up"),
|
|
|
|
cfg->prHelperName);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virTimeBackOffStart(&timebackoff, 1, timeout) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
while (virTimeBackOffWait(&timebackoff)) {
|
|
|
|
char errbuf[1024] = { 0 };
|
|
|
|
|
|
|
|
if (virFileExists(socketPath))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (virProcessKill(cpid, 0) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (saferead(errfd, errbuf, sizeof(errbuf) - 1) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("pr helper %s died unexpectedly"),
|
|
|
|
cfg->prHelperName);
|
|
|
|
} else {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("pr helper died and reported: %s"), errbuf);
|
|
|
|
}
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virFileExists(socketPath)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_TIMEOUT, "%s",
|
|
|
|
_("pr helper socked did not show up"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->cgroup &&
|
2018-09-11 13:03:22 +00:00
|
|
|
virCgroupAddMachineProcess(priv->cgroup, cpid) < 0)
|
2018-04-19 08:00:36 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-09-05 09:19:14 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver, vm, socketPath, true) < 0)
|
2018-04-19 08:00:36 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
priv->prDaemonRunning = true;
|
2018-05-11 13:40:34 +00:00
|
|
|
ret = 0;
|
2018-04-19 08:00:36 +00:00
|
|
|
cleanup:
|
|
|
|
if (ret < 0) {
|
|
|
|
virCommandAbort(cmd);
|
|
|
|
if (cpid >= 0)
|
|
|
|
virProcessKillPainfully(cpid, true);
|
|
|
|
if (pidfile)
|
|
|
|
unlink(pidfile);
|
|
|
|
}
|
|
|
|
VIR_FORCE_CLOSE(errfd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessInitPasswords(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2014-08-12 02:54:42 +00:00
|
|
|
int asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; ++i) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsDef *graphics = vm->def->graphics[i];
|
2012-11-10 01:40:23 +00:00
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = qemuDomainChangeGraphicsPasswords(driver, vm,
|
|
|
|
VIR_DOMAIN_GRAPHICS_TYPE_VNC,
|
2012-11-10 01:40:23 +00:00
|
|
|
&graphics->data.vnc.auth,
|
2014-08-12 02:54:42 +00:00
|
|
|
cfg->vncPassword,
|
|
|
|
asyncJob);
|
2012-11-10 01:40:23 +00:00
|
|
|
} else if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = qemuDomainChangeGraphicsPasswords(driver, vm,
|
|
|
|
VIR_DOMAIN_GRAPHICS_TYPE_SPICE,
|
2012-11-10 01:40:23 +00:00
|
|
|
&graphics->data.spice.auth,
|
2014-08-12 02:54:42 +00:00
|
|
|
cfg->spicePassword,
|
|
|
|
asyncJob);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2013-07-01 07:23:04 +00:00
|
|
|
if (ret < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return ret;
|
2013-07-01 07:23:04 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-13 06:54:20 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessCleanupChardevDevice(virDomainDef *def G_GNUC_UNUSED,
|
|
|
|
virDomainChrDef *dev,
|
2019-10-14 12:45:33 +00:00
|
|
|
void *opaque G_GNUC_UNUSED)
|
2014-05-13 06:54:20 +00:00
|
|
|
{
|
2016-10-21 11:45:54 +00:00
|
|
|
if (dev->source->type == VIR_DOMAIN_CHR_TYPE_UNIX &&
|
|
|
|
dev->source->data.nix.listen &&
|
|
|
|
dev->source->data.nix.path)
|
|
|
|
unlink(dev->source->data.nix.path);
|
2014-05-13 06:54:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-12-10 14:31:23 +00:00
|
|
|
/**
|
|
|
|
* Loads and update video memory size for video devices according to QEMU
|
|
|
|
* process as the QEMU will silently update the values that we pass to QEMU
|
|
|
|
* through command line. We need to load these updated values and store them
|
|
|
|
* into the status XML.
|
|
|
|
*
|
|
|
|
* We will fail if for some reason the values cannot be loaded from QEMU because
|
|
|
|
* its mandatory to get the correct video memory size to status XML to not break
|
|
|
|
* migration.
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateVideoRamSize(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2014-12-10 14:31:23 +00:00
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
ssize_t i;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virDomainVideoDef *video = NULL;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = NULL;
|
2014-12-10 14:31:23 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nvideos; i++) {
|
|
|
|
video = vm->def->videos[i];
|
|
|
|
|
|
|
|
switch (video->type) {
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VGA:
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VGA_VGAMEM)) {
|
|
|
|
if (qemuMonitorUpdateVideoMemorySize(priv->mon, video, "VGA") < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_QXL:
|
|
|
|
if (i == 0) {
|
2016-10-11 15:42:37 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VGAMEM) &&
|
2016-02-23 16:04:19 +00:00
|
|
|
qemuMonitorUpdateVideoMemorySize(priv->mon, video,
|
|
|
|
"qxl-vga") < 0)
|
2014-12-10 14:31:23 +00:00
|
|
|
goto error;
|
2016-02-23 16:04:19 +00:00
|
|
|
|
2016-10-11 15:42:37 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VRAM64) &&
|
2016-02-23 16:04:19 +00:00
|
|
|
qemuMonitorUpdateVideoVram64Size(priv->mon, video,
|
|
|
|
"qxl-vga") < 0)
|
|
|
|
goto error;
|
2014-12-10 14:31:23 +00:00
|
|
|
} else {
|
2016-02-23 16:04:19 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VGAMEM) &&
|
|
|
|
qemuMonitorUpdateVideoMemorySize(priv->mon, video,
|
|
|
|
"qxl") < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QXL_VRAM64) &&
|
|
|
|
qemuMonitorUpdateVideoVram64Size(priv->mon, video,
|
|
|
|
"qxl") < 0)
|
2014-12-10 14:31:23 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VMVGA:
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_VMWARE_SVGA_VGAMEM)) {
|
|
|
|
if (qemuMonitorUpdateVideoMemorySize(priv->mon, video,
|
|
|
|
"vmware-svga") < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_CIRRUS:
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_XEN:
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_VBOX:
|
|
|
|
case VIR_DOMAIN_VIDEO_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2014-12-10 14:31:23 +00:00
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
2019-11-27 12:53:10 +00:00
|
|
|
ret = virDomainObjSave(vm, driver->xmlopt, cfg->stateDir);
|
2014-12-10 14:31:23 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
error:
|
2021-11-24 12:12:40 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2014-12-10 14:31:23 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
struct qemuProcessHookData {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm;
|
|
|
|
virQEMUDriver *driver;
|
|
|
|
virQEMUDriverConfig *cfg;
|
2011-02-14 16:09:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int qemuProcessHook(void *data)
|
|
|
|
{
|
|
|
|
struct qemuProcessHookData *h = data;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = h->vm->privateData;
|
2010-10-26 14:04:46 +00:00
|
|
|
int ret = -1;
|
2011-06-24 14:14:41 +00:00
|
|
|
int fd;
|
2021-03-11 07:16:13 +00:00
|
|
|
virBitmap *nodeset = NULL;
|
2014-11-06 11:16:54 +00:00
|
|
|
virDomainNumatuneMemMode mode;
|
|
|
|
|
2013-02-11 16:08:42 +00:00
|
|
|
/* This method cannot use any mutexes, which are not
|
|
|
|
* protected across fork()
|
|
|
|
*/
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecurityPostFork(h->driver->securityManager);
|
2010-10-26 14:04:46 +00:00
|
|
|
|
|
|
|
/* Some later calls want pid present */
|
|
|
|
h->vm->pid = getpid();
|
|
|
|
|
|
|
|
VIR_DEBUG("Obtaining domain lock");
|
2011-08-26 15:06:31 +00:00
|
|
|
/*
|
|
|
|
* Since we're going to leak the returned FD to QEMU,
|
|
|
|
* we need to make sure it gets a sensible label.
|
|
|
|
* This mildly sucks, because there could be other
|
|
|
|
* sockets the lock driver opens that we don't want
|
|
|
|
* labelled. So far we're ok though.
|
|
|
|
*/
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetSocketLabel(h->driver->securityManager, h->vm->def) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
if (virDomainLockProcessStart(h->driver->lockManager,
|
2013-02-11 16:08:42 +00:00
|
|
|
h->cfg->uri,
|
2010-10-26 14:04:46 +00:00
|
|
|
h->vm,
|
2012-10-11 16:31:20 +00:00
|
|
|
/* QEMU is always paused initially */
|
2011-06-24 14:14:41 +00:00
|
|
|
true,
|
|
|
|
&fd) < 0)
|
2010-10-26 14:04:46 +00:00
|
|
|
goto cleanup;
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(h->driver->securityManager, h->vm->def) < 0)
|
2011-08-26 15:06:31 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
qemu_domain_namespace: Repurpose qemuDomainBuildNamespace()
Okay, here is the deal. Currently, the way we build namespace is
very fragile. It is done from pre-exec hook when starting a
domain, after we mass closed all FDs and before we drop
privileges and exec() QEMU. This fact poses some limitations onto
the namespace build code, e.g. it has to make sure not to keep
any FD opened (not even through a library call), because it would
be leaked to QEMU. Also, it has to call only async signal safe
functions. These requirements are hard to meet - in fact as of my
commit v6.2.0-rc1~235 we are leaking a FD into QEMU by calling
libdevmapper functions.
To solve this issue and avoid similar problems in the future, we
should change our paradigm. We already have functions which can
populate domain's namespace with nodes from the daemon context.
If we use them to populate the namespace and keep only the bare
minimum in the pre-exec hook, we've mitigated the risk.
Therefore, the old qemuDomainBuildNamespace() is renamed to
qemuDomainUnshareNamespace() and new qemuDomainBuildNamespace()
function is introduced. So far, the new function is basically a
NOP and domain's namespace is still populated from the pre-exec
hook - next patches will fix it.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2020-07-21 16:12:26 +00:00
|
|
|
if (qemuDomainUnshareNamespace(h->cfg, h->driver->securityManager, h->vm) < 0)
|
2016-11-15 10:30:18 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-05-19 09:55:26 +00:00
|
|
|
if (virDomainNumatuneGetMode(h->vm->def->numa, -1, &mode) == 0) {
|
|
|
|
if (mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
|
|
|
|
h->cfg->cgroupControllers & (1 << VIR_CGROUP_CONTROLLER_CPUSET) &&
|
|
|
|
virCgroupControllerAvailable(VIR_CGROUP_CONTROLLER_CPUSET)) {
|
|
|
|
/* Use virNuma* API iff necessary. Once set and child is exec()-ed,
|
|
|
|
* there's no way for us to change it. Rely on cgroups (if available
|
|
|
|
* and enabled in the config) rather than virNuma*. */
|
|
|
|
VIR_DEBUG("Relying on CGroups for memory binding");
|
|
|
|
} else {
|
|
|
|
nodeset = virDomainNumatuneGetNodeset(h->vm->def->numa,
|
|
|
|
priv->autoNodeset, -1);
|
qemuProcessHook: Call virNuma*() only when needed
https://bugzilla.redhat.com/show_bug.cgi?id=1198645
Once upon a time, there was a little domain. And the domain was pinned
onto a NUMA node and hasn't fully allocated its memory:
<memory unit='KiB'>2355200</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<numatune>
<memory mode='strict' nodeset='0'/>
</numatune>
Oh little me, said the domain, what will I do with so little memory.
If I only had a few megabytes more. But the old admin noticed the
whimpering, barely audible to untrained human ear. And good admin he
was, he gave the domain yet more memory. But the old NUMA topology
witch forbade to allocate more memory on the node zero. So he
decided to allocate it on a different node:
virsh # numatune little_domain --nodeset 0-1
virsh # setmem little_domain 2355200
The little domain was happy. For a while. Until bad, sharp teeth
shaped creature came. Every process in the system was afraid of him.
The OOM Killer they called him. Oh no, he's after the little domain.
There's no escape.
Do you kids know why? Because when the little domain was born, her
father, Libvirt, called numa_set_membind(). So even if the admin
allowed her to allocate memory from other nodes in the cgroups, the
membind() forbid it.
So what's the lesson? Libvirt should rely on cgroups, whenever
possible and use numa_set_membind() as the last ditch effort.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-03-27 10:39:45 +00:00
|
|
|
|
2015-05-19 09:55:26 +00:00
|
|
|
if (virNumaSetupMemoryPolicy(mode, nodeset) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
qemuProcessHook: Call virNuma*() only when needed
https://bugzilla.redhat.com/show_bug.cgi?id=1198645
Once upon a time, there was a little domain. And the domain was pinned
onto a NUMA node and hasn't fully allocated its memory:
<memory unit='KiB'>2355200</memory>
<currentMemory unit='KiB'>1048576</currentMemory>
<numatune>
<memory mode='strict' nodeset='0'/>
</numatune>
Oh little me, said the domain, what will I do with so little memory.
If I only had a few megabytes more. But the old admin noticed the
whimpering, barely audible to untrained human ear. And good admin he
was, he gave the domain yet more memory. But the old NUMA topology
witch forbade to allocate more memory on the node zero. So he
decided to allocate it on a different node:
virsh # numatune little_domain --nodeset 0-1
virsh # setmem little_domain 2355200
The little domain was happy. For a while. Until bad, sharp teeth
shaped creature came. Every process in the system was afraid of him.
The OOM Killer they called him. Oh no, he's after the little domain.
There's no escape.
Do you kids know why? Because when the little domain was born, her
father, Libvirt, called numa_set_membind(). So even if the admin
allowed her to allocate memory from other nodes in the cgroups, the
membind() forbid it.
So what's the lesson? Libvirt should rely on cgroups, whenever
possible and use numa_set_membind() as the last ditch effort.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2015-03-27 10:39:45 +00:00
|
|
|
}
|
2011-06-20 07:16:16 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-02-11 16:08:42 +00:00
|
|
|
virObjectUnref(h->cfg);
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Hook complete ret=%d", ret);
|
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareMonitorChr(virDomainChrSourceDef *monConfig,
|
2016-02-26 08:15:55 +00:00
|
|
|
const char *domainDir)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
monConfig->type = VIR_DOMAIN_CHR_TYPE_UNIX;
|
|
|
|
monConfig->data.nix.listen = true;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
monConfig->data.nix.path = g_strdup_printf("%s/monitor.sock", domainDir);
|
2013-07-18 10:13:46 +00:00
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
/*
|
2013-02-06 18:17:20 +00:00
|
|
|
* Precondition: vm must be locked, and a job must be active.
|
|
|
|
* This method will call {Enter,Exit}Monitor
|
2011-06-15 16:49:58 +00:00
|
|
|
*/
|
2011-03-15 02:20:53 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartCPUs(virQEMUDriver *driver, virDomainObj *vm,
|
2018-02-09 15:40:51 +00:00
|
|
|
virDomainRunningReason reason,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2013-01-10 21:03:14 +00:00
|
|
|
int ret = -1;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-09-16 20:50:53 +00:00
|
|
|
/* Bring up netdevs before starting CPUs */
|
2014-12-11 19:49:13 +00:00
|
|
|
if (qemuInterfaceStartDevices(vm->def) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2014-09-16 20:50:53 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Using lock state '%s'", NULLSTR(priv->lockState));
|
2013-01-10 21:03:14 +00:00
|
|
|
if (virDomainLockProcessResume(driver->lockManager, cfg->uri,
|
2012-09-17 13:36:47 +00:00
|
|
|
vm, priv->lockState) < 0) {
|
2010-10-26 14:04:46 +00:00
|
|
|
/* Don't free priv->lockState on error, because we need
|
|
|
|
* to make sure we have state still present if the user
|
|
|
|
* tries to resume again
|
|
|
|
*/
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
|
2018-09-10 17:41:53 +00:00
|
|
|
priv->runningReason = reason;
|
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto release;
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2018-02-09 15:40:51 +00:00
|
|
|
ret = qemuMonitorStartCPUs(priv->mon);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2014-05-14 11:22:34 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto release;
|
|
|
|
|
2018-09-12 12:34:33 +00:00
|
|
|
/* The RESUME event handler will change the domain state with the reason
|
|
|
|
* saved in priv->runningReason and it will also emit corresponding domain
|
|
|
|
* lifecycle event.
|
|
|
|
*/
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
return ret;
|
2014-05-14 11:22:34 +00:00
|
|
|
|
|
|
|
release:
|
2018-09-10 17:41:53 +00:00
|
|
|
priv->runningReason = VIR_DOMAIN_RUNNING_UNKNOWN;
|
2014-05-14 11:22:34 +00:00
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
2019-12-20 21:16:31 +00:00
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuProcessStopCPUs(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainPausedReason reason,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2014-05-14 11:22:34 +00:00
|
|
|
int ret = -1;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2018-10-09 13:41:51 +00:00
|
|
|
priv->pausedReason = reason;
|
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
ret = qemuMonitorStopCPUs(priv->mon);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2014-05-14 11:22:34 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2014-12-11 20:11:10 +00:00
|
|
|
/* de-activate netdevs after stopping CPUs */
|
|
|
|
ignore_value(qemuInterfaceStopDevices(vm->def));
|
|
|
|
|
2014-08-28 14:37:38 +00:00
|
|
|
if (priv->job.current)
|
|
|
|
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
|
|
|
|
|
2019-02-08 07:36:56 +00:00
|
|
|
/* The STOP event handler will change the domain state with the reason
|
|
|
|
* saved in priv->pausedReason and it will also emit corresponding domain
|
|
|
|
* lifecycle event.
|
|
|
|
*/
|
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
if (virDomainLockProcessPause(driver->lockManager, vm, &priv->lockState) < 0)
|
|
|
|
VIR_WARN("Unable to release lease on %s", vm->def->name);
|
|
|
|
VIR_DEBUG("Preserving lock state '%s'", NULLSTR(priv->lockState));
|
2011-05-04 09:07:01 +00:00
|
|
|
|
2014-05-14 11:22:34 +00:00
|
|
|
cleanup:
|
2018-10-09 13:41:51 +00:00
|
|
|
if (ret < 0)
|
|
|
|
priv->pausedReason = VIR_DOMAIN_PAUSED_UNKNOWN;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2017-04-25 16:20:30 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessNotifyNets(virDomainDef *def)
|
2011-07-04 06:27:12 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virConnect) conn = NULL;
|
2011-07-04 06:27:12 +00:00
|
|
|
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainNetDef *net = def->nets[i];
|
2016-01-19 19:20:54 +00:00
|
|
|
/* keep others from trying to use the macvtap device name, but
|
|
|
|
* don't return error if this happens, since that causes the
|
|
|
|
* domain to be unceremoniously killed, which would be *very*
|
|
|
|
* impolite.
|
|
|
|
*/
|
util: assign tap device names using a monotonically increasing integer
When creating a standard tap device, if provided with an ifname that
contains "%d", rather than taking that literally as the name to use
for the new device, the kernel will instead use that string as a
template, and search for the lowest number that could be put in place
of %d and produce an otherwise unused and unique name for the new
device. For example, if there is no tap device name given in the XML,
libvirt will always send "vnet%d" as the device name, and the kernel
will create new devices named "vnet0", "vnet1", etc. If one of those
devices is deleted, creating a "hole" in the name list, the kernel
will always attempt to reuse the name in the hole first before using a
name with a higher number (i.e. it finds the lowest possible unused
number).
The problem with this, as described in the previous patch dealing with
macvtap device naming, is that it makes "immediate reuse" of a newly
freed tap device name *much* more common, and in the aftermath of
deleting a tap device, there is some other necessary cleanup of things
which are named based on the device name (nwfilter rules, bandwidth
rules, OVS switch ports, to name a few) that could end up stomping
over the top of the setup of a new device of the same name for a
different guest.
Since the kernel "create a name based on a template" functionality for
tap devices doesn't exist for macvtap, this patch for standard tap
devices is a bit different from the previous patch for macvtap - in
particular there was no previous "bitmap ID reservation system" or
overly-complex retry loop that needed to be removed. We simply find
and unused name, and pass that name on to the kernel instead of
"vnet%d".
This counter is also wrapped when either it gets to INT_MAX or if the
full name would overflow IFNAMSIZ-1 characters. In the case of
"vnet%d" and a 32 bit int, we would reach INT_MAX first, but possibly
someday someone will change the name from vnet to something else.
(NB: It is still possible for a user to provide their own
parameterized template name (e.g. "mytap%d") in the XML, and libvirt
will just pass that through to the kernel as it always has.)
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2020-08-24 01:20:13 +00:00
|
|
|
switch (virDomainNetGetActualType(net)) {
|
|
|
|
case VIR_DOMAIN_NET_TYPE_DIRECT:
|
2020-12-14 01:50:34 +00:00
|
|
|
virNetDevReserveName(net->ifname);
|
util: assign tap device names using a monotonically increasing integer
When creating a standard tap device, if provided with an ifname that
contains "%d", rather than taking that literally as the name to use
for the new device, the kernel will instead use that string as a
template, and search for the lowest number that could be put in place
of %d and produce an otherwise unused and unique name for the new
device. For example, if there is no tap device name given in the XML,
libvirt will always send "vnet%d" as the device name, and the kernel
will create new devices named "vnet0", "vnet1", etc. If one of those
devices is deleted, creating a "hole" in the name list, the kernel
will always attempt to reuse the name in the hole first before using a
name with a higher number (i.e. it finds the lowest possible unused
number).
The problem with this, as described in the previous patch dealing with
macvtap device naming, is that it makes "immediate reuse" of a newly
freed tap device name *much* more common, and in the aftermath of
deleting a tap device, there is some other necessary cleanup of things
which are named based on the device name (nwfilter rules, bandwidth
rules, OVS switch ports, to name a few) that could end up stomping
over the top of the setup of a new device of the same name for a
different guest.
Since the kernel "create a name based on a template" functionality for
tap devices doesn't exist for macvtap, this patch for standard tap
devices is a bit different from the previous patch for macvtap - in
particular there was no previous "bitmap ID reservation system" or
overly-complex retry loop that needed to be removed. We simply find
and unused name, and pass that name on to the kernel instead of
"vnet%d".
This counter is also wrapped when either it gets to INT_MAX or if the
full name would overflow IFNAMSIZ-1 characters. In the case of
"vnet%d" and a 32 bit int, we would reach INT_MAX first, but possibly
someday someone will change the name from vnet to something else.
(NB: It is still possible for a user to provide their own
parameterized template name (e.g. "mytap%d") in the XML, and libvirt
will just pass that through to the kernel as it always has.)
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2020-08-24 01:20:13 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_NET_TYPE_BRIDGE:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_NETWORK:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_ETHERNET:
|
2020-12-14 01:50:33 +00:00
|
|
|
virNetDevReserveName(net->ifname);
|
util: assign tap device names using a monotonically increasing integer
When creating a standard tap device, if provided with an ifname that
contains "%d", rather than taking that literally as the name to use
for the new device, the kernel will instead use that string as a
template, and search for the lowest number that could be put in place
of %d and produce an otherwise unused and unique name for the new
device. For example, if there is no tap device name given in the XML,
libvirt will always send "vnet%d" as the device name, and the kernel
will create new devices named "vnet0", "vnet1", etc. If one of those
devices is deleted, creating a "hole" in the name list, the kernel
will always attempt to reuse the name in the hole first before using a
name with a higher number (i.e. it finds the lowest possible unused
number).
The problem with this, as described in the previous patch dealing with
macvtap device naming, is that it makes "immediate reuse" of a newly
freed tap device name *much* more common, and in the aftermath of
deleting a tap device, there is some other necessary cleanup of things
which are named based on the device name (nwfilter rules, bandwidth
rules, OVS switch ports, to name a few) that could end up stomping
over the top of the setup of a new device of the same name for a
different guest.
Since the kernel "create a name based on a template" functionality for
tap devices doesn't exist for macvtap, this patch for standard tap
devices is a bit different from the previous patch for macvtap - in
particular there was no previous "bitmap ID reservation system" or
overly-complex retry loop that needed to be removed. We simply find
and unused name, and pass that name on to the kernel instead of
"vnet%d".
This counter is also wrapped when either it gets to INT_MAX or if the
full name would overflow IFNAMSIZ-1 characters. In the case of
"vnet%d" and a 32 bit int, we would reach INT_MAX first, but possibly
someday someone will change the name from vnet to something else.
(NB: It is still possible for a user to provide their own
parameterized template name (e.g. "mytap%d") in the XML, and libvirt
will just pass that through to the kernel as it always has.)
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2020-08-24 01:20:13 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_NET_TYPE_USER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_VHOSTUSER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_SERVER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_CLIENT:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_MCAST:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_INTERNAL:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_HOSTDEV:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_UDP:
|
2020-10-14 17:08:25 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_VDPA:
|
util: assign tap device names using a monotonically increasing integer
When creating a standard tap device, if provided with an ifname that
contains "%d", rather than taking that literally as the name to use
for the new device, the kernel will instead use that string as a
template, and search for the lowest number that could be put in place
of %d and produce an otherwise unused and unique name for the new
device. For example, if there is no tap device name given in the XML,
libvirt will always send "vnet%d" as the device name, and the kernel
will create new devices named "vnet0", "vnet1", etc. If one of those
devices is deleted, creating a "hole" in the name list, the kernel
will always attempt to reuse the name in the hole first before using a
name with a higher number (i.e. it finds the lowest possible unused
number).
The problem with this, as described in the previous patch dealing with
macvtap device naming, is that it makes "immediate reuse" of a newly
freed tap device name *much* more common, and in the aftermath of
deleting a tap device, there is some other necessary cleanup of things
which are named based on the device name (nwfilter rules, bandwidth
rules, OVS switch ports, to name a few) that could end up stomping
over the top of the setup of a new device of the same name for a
different guest.
Since the kernel "create a name based on a template" functionality for
tap devices doesn't exist for macvtap, this patch for standard tap
devices is a bit different from the previous patch for macvtap - in
particular there was no previous "bitmap ID reservation system" or
overly-complex retry loop that needed to be removed. We simply find
and unused name, and pass that name on to the kernel instead of
"vnet%d".
This counter is also wrapped when either it gets to INT_MAX or if the
full name would overflow IFNAMSIZ-1 characters. In the case of
"vnet%d" and a 32 bit int, we would reach INT_MAX first, but possibly
someday someone will change the name from vnet to something else.
(NB: It is still possible for a user to provide their own
parameterized template name (e.g. "mytap%d") in the XML, and libvirt
will just pass that through to the kernel as it always has.)
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2020-08-24 01:20:13 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
2016-01-19 19:20:54 +00:00
|
|
|
|
2021-01-08 05:36:31 +00:00
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_NETWORK && !conn)
|
|
|
|
conn = virGetConnectNetwork();
|
|
|
|
|
|
|
|
virDomainNetNotifyActualDevice(conn, def, net);
|
2011-07-04 06:27:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 13:29:24 +00:00
|
|
|
/* Attempt to instantiate the filters. Ignore failures because it's
|
|
|
|
* possible that someone deleted a filter binding and the associated
|
|
|
|
* filter while the guest was running and we don't want that action
|
|
|
|
* to cause failure to keep the guest running during the reconnection
|
|
|
|
* processing. Nor do we necessarily want other failures to do the
|
|
|
|
* same. We'll just log the error conditions other than of course
|
|
|
|
* ignoreExists possibility (e.g. the true flag) */
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessFiltersInstantiate(virDomainDef *def)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainNetDef *net = def->nets[i];
|
2011-02-14 16:09:39 +00:00
|
|
|
if ((net->filter) && (net->ifname)) {
|
2018-08-24 13:29:24 +00:00
|
|
|
if (virDomainConfNWFilterInstantiate(def->name, def->uuid, net,
|
|
|
|
true) < 0) {
|
|
|
|
VIR_WARN("filter '%s' instantiation for '%s' failed '%s'",
|
|
|
|
net->filter, net->ifname, virGetLastErrorMessage());
|
|
|
|
virResetLastError();
|
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-05 11:50:25 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateState(virQEMUDriver *driver, virDomainObj *vm)
|
2011-05-05 11:50:25 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-05-05 11:50:25 +00:00
|
|
|
virDomainState state;
|
2011-09-27 09:42:04 +00:00
|
|
|
virDomainPausedReason reason;
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainState newState = VIR_DOMAIN_NOSTATE;
|
2015-02-16 14:17:00 +00:00
|
|
|
int oldReason;
|
2011-11-30 14:31:45 +00:00
|
|
|
int newReason;
|
2011-05-05 11:50:25 +00:00
|
|
|
bool running;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *msg = NULL;
|
2011-05-05 11:50:25 +00:00
|
|
|
int ret;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-09-27 09:42:04 +00:00
|
|
|
ret = qemuMonitorGetStatus(priv->mon, &running, &reason);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2014-12-16 09:40:58 +00:00
|
|
|
if (ret < 0)
|
2011-05-05 11:50:25 +00:00
|
|
|
return -1;
|
|
|
|
|
2015-02-16 14:17:00 +00:00
|
|
|
state = virDomainObjGetState(vm, &oldReason);
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2015-02-16 14:17:00 +00:00
|
|
|
if (running &&
|
|
|
|
(state == VIR_DOMAIN_SHUTOFF ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
|
|
|
oldReason == VIR_DOMAIN_PAUSED_STARTING_UP))) {
|
|
|
|
newState = VIR_DOMAIN_RUNNING;
|
|
|
|
newReason = VIR_DOMAIN_RUNNING_BOOTED;
|
2019-10-18 11:27:03 +00:00
|
|
|
msg = g_strdup("finished booting");
|
2015-02-16 14:17:00 +00:00
|
|
|
} else if (state == VIR_DOMAIN_PAUSED && running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
newState = VIR_DOMAIN_RUNNING;
|
|
|
|
newReason = VIR_DOMAIN_RUNNING_UNPAUSED;
|
2019-10-18 11:27:03 +00:00
|
|
|
msg = g_strdup("was unpaused");
|
2011-05-05 11:50:25 +00:00
|
|
|
} else if (state == VIR_DOMAIN_RUNNING && !running) {
|
2011-11-30 14:31:45 +00:00
|
|
|
if (reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN) {
|
|
|
|
newState = VIR_DOMAIN_SHUTDOWN;
|
|
|
|
newReason = VIR_DOMAIN_SHUTDOWN_UNKNOWN;
|
2019-10-18 11:27:03 +00:00
|
|
|
msg = g_strdup("shutdown");
|
2013-07-29 16:54:57 +00:00
|
|
|
} else if (reason == VIR_DOMAIN_PAUSED_CRASHED) {
|
2013-06-07 10:23:34 +00:00
|
|
|
newState = VIR_DOMAIN_CRASHED;
|
|
|
|
newReason = VIR_DOMAIN_CRASHED_PANICKED;
|
2019-10-18 11:27:03 +00:00
|
|
|
msg = g_strdup("crashed");
|
2011-11-30 14:31:45 +00:00
|
|
|
} else {
|
|
|
|
newState = VIR_DOMAIN_PAUSED;
|
|
|
|
newReason = reason;
|
2019-10-22 13:26:14 +00:00
|
|
|
msg = g_strdup_printf("was paused (%s)",
|
|
|
|
virDomainPausedReasonTypeToString(reason));
|
2011-11-30 14:31:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (newState != VIR_DOMAIN_NOSTATE) {
|
|
|
|
VIR_DEBUG("Domain %s %s while its monitor was disconnected;"
|
|
|
|
" changing state to %s (%s)",
|
|
|
|
vm->def->name,
|
2013-05-20 09:23:13 +00:00
|
|
|
NULLSTR(msg),
|
2011-11-30 14:31:45 +00:00
|
|
|
virDomainStateTypeToString(newState),
|
|
|
|
virDomainStateReasonToString(newState, newReason));
|
|
|
|
virDomainObjSetState(vm, newState, newReason);
|
2011-05-05 11:50:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:33 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRecoverMigrationIn(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2018-04-03 11:37:24 +00:00
|
|
|
const qemuDomainJobObj *job,
|
2016-01-14 14:55:18 +00:00
|
|
|
virDomainState state,
|
2016-01-13 15:29:58 +00:00
|
|
|
int reason)
|
2016-01-14 14:55:18 +00:00
|
|
|
{
|
2020-07-16 11:48:34 +00:00
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainJobPrivate *jobPriv = job->privateData;
|
2016-01-13 15:29:58 +00:00
|
|
|
bool postcopy = (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED) ||
|
|
|
|
(state == VIR_DOMAIN_RUNNING &&
|
|
|
|
reason == VIR_DOMAIN_RUNNING_POSTCOPY);
|
|
|
|
|
2018-04-03 11:37:24 +00:00
|
|
|
switch ((qemuMigrationJobPhase) job->phase) {
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
/* N/A for incoming migration */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
VIR_DEBUG("Killing unfinished incoming migration for domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
/* source domain is already killed so let's just resume the domain
|
|
|
|
* and hope we are all set */
|
|
|
|
VIR_DEBUG("Incoming migration finished, resuming domain %s",
|
|
|
|
vm->def->name);
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2018-09-11 17:26:07 +00:00
|
|
|
VIR_DOMAIN_RUNNING_MIGRATED,
|
2016-01-14 14:55:18 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
/* migration finished, we started resuming the domain but didn't
|
|
|
|
* confirm success or failure yet; killing it seems safest unless
|
2016-01-13 15:29:58 +00:00
|
|
|
* we already started guest CPUs or we were in post-copy mode */
|
|
|
|
if (postcopy) {
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
2016-01-13 15:29:58 +00:00
|
|
|
} else if (state != VIR_DOMAIN_RUNNING) {
|
2016-01-14 14:55:18 +00:00
|
|
|
VIR_DEBUG("Killing migrated domain %s", vm->def->name);
|
2011-07-19 00:27:33 +00:00
|
|
|
return -1;
|
2016-01-14 14:55:18 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2018-03-21 13:57:44 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, job->apiFlags);
|
2016-01-14 14:55:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2018-04-03 11:37:24 +00:00
|
|
|
const qemuDomainJobObj *job,
|
2016-01-14 14:55:18 +00:00
|
|
|
virDomainState state,
|
2016-11-23 13:07:53 +00:00
|
|
|
int reason,
|
|
|
|
unsigned int *stopFlags)
|
2016-01-14 14:55:18 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainJobPrivate *jobPriv = job->privateData;
|
2016-01-13 15:29:58 +00:00
|
|
|
bool postcopy = state == VIR_DOMAIN_PAUSED &&
|
|
|
|
(reason == VIR_DOMAIN_PAUSED_POSTCOPY ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED);
|
2017-04-04 18:54:57 +00:00
|
|
|
bool resume = false;
|
2016-01-13 15:29:58 +00:00
|
|
|
|
2018-04-03 11:37:24 +00:00
|
|
|
switch ((qemuMigrationJobPhase) job->phase) {
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
/* N/A for outgoing migration */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
/* nothing happened so far, just forget we were about to migrate the
|
|
|
|
* domain */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
/* migration is still in progress, let's cancel it and resume the
|
2016-01-13 15:29:58 +00:00
|
|
|
* domain; however we can only do that before migration enters
|
|
|
|
* post-copy mode
|
|
|
|
*/
|
|
|
|
if (postcopy) {
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
2016-01-13 15:29:58 +00:00
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Cancelling unfinished migration of domain %s",
|
|
|
|
vm->def->name);
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationSrcCancel(driver, vm) < 0) {
|
2016-01-13 15:29:58 +00:00
|
|
|
VIR_WARN("Could not cancel ongoing migration of domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2017-04-04 18:54:57 +00:00
|
|
|
resume = true;
|
2011-07-19 00:27:33 +00:00
|
|
|
}
|
2016-01-13 15:29:58 +00:00
|
|
|
break;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
/* migration finished but we didn't have a chance to get the result
|
2016-01-13 15:29:58 +00:00
|
|
|
* of Finish3 step; third party needs to check what to do next; in
|
|
|
|
* post-copy mode we can use PAUSED_POSTCOPY_FAILED state for this
|
2016-01-14 14:55:18 +00:00
|
|
|
*/
|
2016-01-13 15:29:58 +00:00
|
|
|
if (postcopy)
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
2016-01-14 14:55:18 +00:00
|
|
|
break;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
2016-01-13 15:29:58 +00:00
|
|
|
/* Finish3 failed, we need to resume the domain, but once we enter
|
|
|
|
* post-copy mode there's no way back, so let's just mark the domain
|
|
|
|
* as broken in that case
|
|
|
|
*/
|
|
|
|
if (postcopy) {
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
2016-01-13 15:29:58 +00:00
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Resuming domain %s after failed migration",
|
|
|
|
vm->def->name);
|
2017-04-04 18:54:57 +00:00
|
|
|
resume = true;
|
2016-01-13 15:29:58 +00:00
|
|
|
}
|
|
|
|
break;
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2016-01-14 14:55:18 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
/* migration completed, we need to kill the domain here */
|
2016-11-23 13:07:53 +00:00
|
|
|
*stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2016-01-14 14:55:18 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
|
2017-04-04 18:54:57 +00:00
|
|
|
if (resume) {
|
|
|
|
/* resume the domain but only if it was paused as a result of
|
|
|
|
* migration
|
|
|
|
*/
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2018-09-11 17:26:07 +00:00
|
|
|
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
2017-04-04 18:54:57 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
|
VIR_WARN("Could not resume domain %s", vm->def->name);
|
|
|
|
}
|
2011-07-19 00:27:33 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-04 18:54:57 +00:00
|
|
|
|
2018-03-21 13:57:44 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, job->apiFlags);
|
2011-07-19 00:27:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRecoverJob(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2018-03-29 14:48:34 +00:00
|
|
|
const qemuDomainJobObj *job,
|
2016-11-23 13:07:53 +00:00
|
|
|
unsigned int *stopFlags)
|
2011-07-04 21:33:39 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-07-04 21:33:39 +00:00
|
|
|
virDomainState state;
|
|
|
|
int reason;
|
2019-11-22 16:19:49 +00:00
|
|
|
unsigned long long now;
|
2011-07-04 21:33:39 +00:00
|
|
|
|
|
|
|
state = virDomainObjGetState(vm, &reason);
|
|
|
|
|
|
|
|
switch (job->asyncJob) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
2018-04-03 11:37:24 +00:00
|
|
|
if (qemuProcessRecoverMigrationOut(driver, vm, job,
|
2016-11-23 13:07:53 +00:00
|
|
|
state, reason, stopFlags) < 0)
|
2016-01-14 14:55:18 +00:00
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
2018-04-03 11:37:24 +00:00
|
|
|
if (qemuProcessRecoverMigrationIn(driver, vm, job,
|
2016-01-14 14:55:18 +00:00
|
|
|
state, reason) < 0)
|
2011-07-19 00:27:33 +00:00
|
|
|
return -1;
|
2011-07-04 21:33:39 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
2012-10-08 14:34:19 +00:00
|
|
|
case QEMU_ASYNC_JOB_SNAPSHOT:
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:39 +00:00
|
|
|
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-07-04 21:33:39 +00:00
|
|
|
/* resume the domain but only if it was paused as a result of
|
2012-10-08 14:34:19 +00:00
|
|
|
* running a migration-to-file operation. Although we are
|
|
|
|
* recovering an async job, this function is run at startup
|
|
|
|
* and must resume things using sync monitor connections. */
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
((job->asyncJob == QEMU_ASYNC_JOB_DUMP &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_DUMP) ||
|
|
|
|
(job->asyncJob == QEMU_ASYNC_JOB_SAVE &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_SAVE) ||
|
|
|
|
(job->asyncJob == QEMU_ASYNC_JOB_SNAPSHOT &&
|
2017-01-05 09:34:17 +00:00
|
|
|
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
|
2012-10-08 14:34:19 +00:00
|
|
|
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2018-09-11 17:26:07 +00:00
|
|
|
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
|
2012-10-08 14:34:19 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
|
|
|
VIR_WARN("Could not resume domain '%s' after migration to file",
|
|
|
|
vm->def->name);
|
2011-07-04 21:33:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2015-10-21 08:55:43 +00:00
|
|
|
case QEMU_ASYNC_JOB_START:
|
|
|
|
/* Already handled in VIR_DOMAIN_PAUSED_STARTING_UP check. */
|
|
|
|
break;
|
|
|
|
|
2019-11-22 16:19:49 +00:00
|
|
|
case QEMU_ASYNC_JOB_BACKUP:
|
|
|
|
ignore_value(virTimeMillisNow(&now));
|
|
|
|
|
|
|
|
/* Restore the config of the async job which is not persisted */
|
2021-12-01 09:41:41 +00:00
|
|
|
priv->job.jobsQueued++;
|
2019-11-22 16:19:49 +00:00
|
|
|
priv->job.asyncJob = QEMU_ASYNC_JOB_BACKUP;
|
2021-02-24 18:10:21 +00:00
|
|
|
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
|
2019-11-22 16:19:49 +00:00
|
|
|
priv->job.asyncStarted = now;
|
|
|
|
|
|
|
|
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
|
|
|
|
JOB_MASK(QEMU_JOB_SUSPEND) |
|
|
|
|
JOB_MASK(QEMU_JOB_MODIFY)));
|
|
|
|
|
|
|
|
/* We reset the job parameters for backup so that the job will look
|
|
|
|
* active. This is possible because we are able to recover the state
|
|
|
|
* of blockjobs and also the backup job allows all sub-job types */
|
|
|
|
priv->job.current = g_new0(qemuDomainJobInfo, 1);
|
|
|
|
priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP;
|
|
|
|
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
|
|
|
|
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
|
|
|
|
priv->job.current->started = now;
|
|
|
|
break;
|
|
|
|
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_ASYNC_JOB_NONE:
|
|
|
|
case QEMU_ASYNC_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm))
|
|
|
|
return -1;
|
|
|
|
|
2012-04-06 17:42:34 +00:00
|
|
|
/* In case any special handling is added for job type that has been ignored
|
|
|
|
* before, QEMU_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated
|
|
|
|
* for the job to be properly tracked in domain state XML.
|
|
|
|
*/
|
2011-07-04 21:33:39 +00:00
|
|
|
switch (job->active) {
|
|
|
|
case QEMU_JOB_QUERY:
|
|
|
|
/* harmless */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_JOB_DESTROY:
|
|
|
|
VIR_DEBUG("Domain %s should have already been destroyed",
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
case QEMU_JOB_SUSPEND:
|
|
|
|
/* mostly harmless */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_JOB_MODIFY:
|
|
|
|
/* XXX depending on the command we may be in an inconsistent state and
|
|
|
|
* we should probably fall back to "monitor error" state and refuse to
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
2011-07-19 00:27:36 +00:00
|
|
|
case QEMU_JOB_MIGRATION_OP:
|
2011-07-19 00:27:39 +00:00
|
|
|
case QEMU_JOB_ABORT:
|
2011-07-04 21:33:39 +00:00
|
|
|
case QEMU_JOB_ASYNC:
|
|
|
|
case QEMU_JOB_ASYNC_NESTED:
|
|
|
|
/* async job was already handled above */
|
|
|
|
case QEMU_JOB_NONE:
|
|
|
|
case QEMU_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-19 13:08:29 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateDevices(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2013-07-19 13:08:29 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2013-07-19 13:08:29 +00:00
|
|
|
virDomainDeviceDef dev;
|
2021-02-03 17:39:01 +00:00
|
|
|
g_auto(GStrv) old = g_steal_pointer(&priv->qemuDevices);
|
|
|
|
GStrv tmp;
|
2013-07-19 13:08:29 +00:00
|
|
|
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainUpdateDeviceList(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
2021-02-03 17:39:01 +00:00
|
|
|
return -1;
|
2013-07-19 13:08:29 +00:00
|
|
|
|
2021-02-03 17:39:01 +00:00
|
|
|
if (!old)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (tmp = old; *tmp; tmp++) {
|
2021-02-03 17:39:47 +00:00
|
|
|
if (!g_strv_contains((const char **) priv->qemuDevices, *tmp) &&
|
2021-02-03 17:39:01 +00:00
|
|
|
virDomainDefFindDevice(vm->def, *tmp, &dev, false) == 0 &&
|
|
|
|
qemuDomainRemoveDevice(driver, vm, &dev))
|
|
|
|
return -1;
|
2013-07-19 13:08:29 +00:00
|
|
|
}
|
|
|
|
|
2021-02-03 17:39:01 +00:00
|
|
|
return 0;
|
2013-07-19 13:08:29 +00:00
|
|
|
}
|
|
|
|
|
2016-03-28 13:30:31 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainPerfRestart(virDomainObj *vm)
|
2016-03-28 13:30:31 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-03-28 13:30:31 +00:00
|
|
|
|
2016-04-27 13:22:33 +00:00
|
|
|
if (!(priv->perf = virPerfNew()))
|
2016-03-28 13:30:31 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < VIR_PERF_EVENT_LAST; i++) {
|
2016-06-28 12:37:29 +00:00
|
|
|
if (def->perf.events[i] &&
|
|
|
|
def->perf.events[i] == VIR_TRISTATE_BOOL_YES) {
|
2016-04-27 12:58:32 +00:00
|
|
|
|
|
|
|
/* Failure to re-enable the perf event should not be fatal */
|
|
|
|
if (virPerfEventEnable(priv->perf, i, vm->pid) < 0)
|
2016-06-28 12:37:29 +00:00
|
|
|
def->perf.events[i] = VIR_TRISTATE_BOOL_NO;
|
2016-03-28 13:30:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-10-31 15:49:49 +00:00
|
|
|
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessReconnectCheckMemAliasOrderMismatch(virDomainObj *vm)
|
2016-10-31 15:49:49 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int aliasidx;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-10-31 15:49:49 +00:00
|
|
|
|
|
|
|
if (!virDomainDefHasMemoryHotplug(def) || def->nmems == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nmems; i++) {
|
|
|
|
aliasidx = qemuDomainDeviceAliasIndex(&def->mems[i]->info, "dimm");
|
|
|
|
|
|
|
|
if (def->mems[i]->info.addr.dimm.slot != aliasidx) {
|
|
|
|
priv->memAliasOrderMismatch = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-02-05 12:34:35 +00:00
|
|
|
static bool
|
|
|
|
qemuProcessDomainMemoryDefNeedHugepagesPath(const virDomainMemoryDef *mem,
|
|
|
|
const long system_pagesize)
|
|
|
|
{
|
|
|
|
switch (mem->model) {
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_DIMM:
|
conf: Introduce virtio-mem <memory/> model
The virtio-mem is paravirtualized mechanism of adding/removing
memory to/from a VM. A virtio-mem-pci device is split into blocks
of equal size which are then exposed (all or only a requested
portion of them) to the guest kernel to use as regular memory.
Therefore, the device has two important attributes:
1) block-size, which defines the size of a block
2) requested-size, which defines how much memory (in bytes)
is the device requested to expose to the guest.
The 'block-size' is configured on command line and immutable
throughout device's lifetime. The 'requested-size' can be set on
the command line too, but also is adjustable via monitor. In
fact, that is how management software places its requests to
change the memory allocation. If it wants to give more memory to
the guest it changes 'requested-size' to a bigger value, and if it
wants to shrink guest memory it changes the 'requested-size' to a
smaller value. Note, value of zero means that guest should
release all memory offered by the device. Of course, guest has to
cooperate. Therefore, there is a third attribute 'size' which is
read only and reflects how much memory the guest still has. This
can be different to 'requested-size', obviously. Because of name
clash, I've named it 'current' and it is dealt with in future
commits (it is a runtime information anyway).
In the backend, memory for virtio-mem is backed by usual objects:
memory-backend-{ram,file,memfd} and their size puts the cap on
the amount of memory that a virtio-mem device can offer to a
guest. But we are already able to express this info using <size/>
under <target/>.
Therefore, we need only two more elements to cover 'block-size'
and 'requested-size' attributes. This is the XML I've came up
with:
<memory model='virtio-mem'>
<source>
<nodemask>1-3</nodemask>
<pagesize unit='KiB'>2048</pagesize>
</source>
<target>
<size unit='KiB'>2097152</size>
<node>0</node>
<block unit='KiB'>2048</block>
<requested unit='KiB'>1048576</requested>
</target>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</memory>
I hope by now it is obvious that:
1) 'requested-size' must be an integer multiple of
'block-size', and
2) virtio-mem-pci device goes onto PCI bus and thus needs PCI
address.
Then there is a limitation that the minimal 'block-size' is
transparent huge page size (I'll leave this without explanation).
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2021-01-18 15:13:12 +00:00
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_VIRTIO_MEM:
|
2021-02-05 12:34:35 +00:00
|
|
|
return mem->pagesize && mem->pagesize != system_pagesize;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NONE:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NVDIMM:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_VIRTIO_PMEM:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_LAST:
|
|
|
|
/* None of these can be backed by hugepages. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-07 11:38:14 +00:00
|
|
|
static bool
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessNeedHugepagesPath(virDomainDef *def,
|
|
|
|
virDomainMemoryDef *mem)
|
2017-06-07 11:38:14 +00:00
|
|
|
{
|
|
|
|
const long system_pagesize = virGetSystemPageSizeKB();
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (def->mem.source == VIR_DOMAIN_MEMORY_SOURCE_FILE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
for (i = 0; i < def->mem.nhugepages; i++) {
|
|
|
|
if (def->mem.hugepages[i].size != system_pagesize)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < def->nmems; i++) {
|
2021-02-05 12:34:35 +00:00
|
|
|
if (qemuProcessDomainMemoryDefNeedHugepagesPath(def->mems[i], system_pagesize))
|
2017-06-07 11:38:14 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-07 12:47:37 +00:00
|
|
|
if (mem &&
|
2021-02-05 12:34:35 +00:00
|
|
|
qemuProcessDomainMemoryDefNeedHugepagesPath(mem, system_pagesize))
|
2017-06-07 12:47:37 +00:00
|
|
|
return true;
|
|
|
|
|
2017-06-07 11:38:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-07 14:20:12 +00:00
|
|
|
static bool
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessNeedMemoryBackingPath(virDomainDef *def,
|
|
|
|
virDomainMemoryDef *mem)
|
2017-11-07 14:20:12 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
size_t numaNodes;
|
|
|
|
|
|
|
|
if (def->mem.source == VIR_DOMAIN_MEMORY_SOURCE_FILE ||
|
|
|
|
def->mem.access != VIR_DOMAIN_MEMORY_ACCESS_DEFAULT)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
numaNodes = virDomainNumaGetNodeCount(def->numa);
|
|
|
|
for (i = 0; i < numaNodes; i++) {
|
|
|
|
if (virDomainNumaGetNodeMemoryAccessMode(def->numa, i)
|
|
|
|
!= VIR_DOMAIN_MEMORY_ACCESS_DEFAULT)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-05-20 14:29:05 +00:00
|
|
|
for (i = 0; i < def->nmems; i++) {
|
|
|
|
if (def->mems[i]->access != VIR_DOMAIN_MEMORY_ACCESS_DEFAULT)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-02-05 12:54:26 +00:00
|
|
|
if (mem) {
|
|
|
|
switch (mem->model) {
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_DIMM:
|
conf: Introduce virtio-mem <memory/> model
The virtio-mem is paravirtualized mechanism of adding/removing
memory to/from a VM. A virtio-mem-pci device is split into blocks
of equal size which are then exposed (all or only a requested
portion of them) to the guest kernel to use as regular memory.
Therefore, the device has two important attributes:
1) block-size, which defines the size of a block
2) requested-size, which defines how much memory (in bytes)
is the device requested to expose to the guest.
The 'block-size' is configured on command line and immutable
throughout device's lifetime. The 'requested-size' can be set on
the command line too, but also is adjustable via monitor. In
fact, that is how management software places its requests to
change the memory allocation. If it wants to give more memory to
the guest it changes 'requested-size' to a bigger value, and if it
wants to shrink guest memory it changes the 'requested-size' to a
smaller value. Note, value of zero means that guest should
release all memory offered by the device. Of course, guest has to
cooperate. Therefore, there is a third attribute 'size' which is
read only and reflects how much memory the guest still has. This
can be different to 'requested-size', obviously. Because of name
clash, I've named it 'current' and it is dealt with in future
commits (it is a runtime information anyway).
In the backend, memory for virtio-mem is backed by usual objects:
memory-backend-{ram,file,memfd} and their size puts the cap on
the amount of memory that a virtio-mem device can offer to a
guest. But we are already able to express this info using <size/>
under <target/>.
Therefore, we need only two more elements to cover 'block-size'
and 'requested-size' attributes. This is the XML I've came up
with:
<memory model='virtio-mem'>
<source>
<nodemask>1-3</nodemask>
<pagesize unit='KiB'>2048</pagesize>
</source>
<target>
<size unit='KiB'>2097152</size>
<node>0</node>
<block unit='KiB'>2048</block>
<requested unit='KiB'>1048576</requested>
</target>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</memory>
I hope by now it is obvious that:
1) 'requested-size' must be an integer multiple of
'block-size', and
2) virtio-mem-pci device goes onto PCI bus and thus needs PCI
address.
Then there is a limitation that the minimal 'block-size' is
transparent huge page size (I'll leave this without explanation).
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2021-01-18 15:13:12 +00:00
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_VIRTIO_MEM:
|
2021-02-05 12:54:26 +00:00
|
|
|
if (mem->access != VIR_DOMAIN_MEMORY_ACCESS_DEFAULT) {
|
|
|
|
/* No need to check for access mode on the target node,
|
|
|
|
* it was checked for in the previous loop. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NONE:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NVDIMM:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_VIRTIO_PMEM:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_LAST:
|
|
|
|
/* Backed by user provided path. Not stored in memory
|
|
|
|
* backing dir anyway. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-11-07 14:20:12 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-07 14:19:43 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessBuildDestroyMemoryPathsImpl(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-11-07 14:19:43 +00:00
|
|
|
const char *path,
|
|
|
|
bool build)
|
|
|
|
{
|
|
|
|
if (build) {
|
|
|
|
if (virFileExists(path))
|
|
|
|
return 0;
|
|
|
|
|
2021-02-26 08:17:30 +00:00
|
|
|
if (g_mkdir_with_parents(path, 0700) < 0) {
|
2017-11-07 14:19:43 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to create %s"),
|
|
|
|
path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-09-05 09:19:14 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver, vm, path, true) < 0)
|
2017-11-07 14:19:43 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
2017-11-07 15:03:40 +00:00
|
|
|
if (virFileDeleteTree(path) < 0)
|
|
|
|
return -1;
|
2017-11-07 14:19:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-07 12:47:37 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessBuildDestroyMemoryPaths(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
virDomainMemoryDef *mem,
|
2017-11-07 14:19:43 +00:00
|
|
|
bool build)
|
2016-11-22 12:21:51 +00:00
|
|
|
{
|
2019-12-20 21:16:30 +00:00
|
|
|
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-11-22 12:21:51 +00:00
|
|
|
size_t i;
|
2017-11-07 14:20:12 +00:00
|
|
|
bool shouldBuildHP = false;
|
|
|
|
bool shouldBuildMB = false;
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2017-11-07 14:20:12 +00:00
|
|
|
if (build) {
|
|
|
|
shouldBuildHP = qemuProcessNeedHugepagesPath(vm->def, mem);
|
|
|
|
shouldBuildMB = qemuProcessNeedMemoryBackingPath(vm->def, mem);
|
|
|
|
}
|
2017-06-07 11:38:14 +00:00
|
|
|
|
2017-11-07 14:20:12 +00:00
|
|
|
if (!build || shouldBuildHP) {
|
2016-11-22 12:21:51 +00:00
|
|
|
for (i = 0; i < cfg->nhugetlbfs; i++) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *path = NULL;
|
2020-03-20 18:27:26 +00:00
|
|
|
path = qemuGetDomainHugepagePath(driver, vm->def, &cfg->hugetlbfs[i]);
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2017-11-07 14:20:12 +00:00
|
|
|
if (!path)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2018-09-05 09:19:14 +00:00
|
|
|
if (qemuProcessBuildDestroyMemoryPathsImpl(driver, vm,
|
2017-11-07 14:20:12 +00:00
|
|
|
path, build) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-11-22 12:21:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-07 14:20:12 +00:00
|
|
|
if (!build || shouldBuildMB) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *path = NULL;
|
2020-03-23 12:33:32 +00:00
|
|
|
if (qemuGetMemoryBackingDomainPath(driver, vm->def, &path) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-11-07 14:20:12 +00:00
|
|
|
|
2018-09-05 09:19:14 +00:00
|
|
|
if (qemuProcessBuildDestroyMemoryPathsImpl(driver, vm,
|
2017-11-07 14:20:12 +00:00
|
|
|
path, build) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-11-07 14:20:12 +00:00
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2016-11-22 12:21:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-11 12:02:52 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessDestroyMemoryBackingPath(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
virDomainMemoryDef *mem)
|
2018-01-11 12:02:52 +00:00
|
|
|
{
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *path = NULL;
|
2018-01-11 12:02:52 +00:00
|
|
|
|
2020-03-23 12:33:32 +00:00
|
|
|
if (qemuGetMemoryBackingPath(driver, vm->def, mem->info.alias, &path) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-01-11 12:02:52 +00:00
|
|
|
|
|
|
|
if (unlink(path) < 0 &&
|
|
|
|
errno != ENOENT) {
|
|
|
|
virReportSystemError(errno, _("Unable to remove %s"), path);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-01-11 12:02:52 +00:00
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2018-01-11 12:02:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessVNCAllocatePorts(virQEMUDriver *driver,
|
|
|
|
virDomainGraphicsDef *graphics,
|
2017-07-11 13:53:58 +00:00
|
|
|
bool allocate)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
unsigned short port;
|
2014-11-05 13:28:57 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!allocate) {
|
|
|
|
if (graphics->data.vnc.autoport)
|
|
|
|
graphics->data.vnc.port = 5900;
|
2014-12-04 15:13:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-06-15 11:48:19 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (graphics->data.vnc.autoport) {
|
|
|
|
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
|
|
|
|
return -1;
|
|
|
|
graphics->data.vnc.port = port;
|
|
|
|
}
|
2016-02-12 08:58:22 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (graphics->data.vnc.websocket == -1) {
|
|
|
|
if (virPortAllocatorAcquire(driver->webSocketPorts, &port) < 0)
|
|
|
|
return -1;
|
|
|
|
graphics->data.vnc.websocket = port;
|
|
|
|
graphics->data.vnc.websocketGenerated = true;
|
|
|
|
}
|
2014-12-03 13:22:05 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-03-29 22:22:46 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSPICEAllocatePorts(virQEMUDriver *driver,
|
|
|
|
virDomainGraphicsDef *graphics,
|
2017-07-11 13:53:58 +00:00
|
|
|
bool allocate)
|
|
|
|
{
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2017-07-11 13:53:58 +00:00
|
|
|
unsigned short port = 0;
|
|
|
|
unsigned short tlsPort;
|
|
|
|
size_t i;
|
|
|
|
int defaultMode = graphics->data.spice.defaultMode;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
bool needTLSPort = false;
|
|
|
|
bool needPort = false;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (graphics->data.spice.autoport) {
|
|
|
|
/* check if tlsPort or port need allocation */
|
|
|
|
for (i = 0; i < VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_LAST; i++) {
|
|
|
|
switch (graphics->data.spice.channels[i]) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
|
|
|
|
needTLSPort = true;
|
|
|
|
break;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
|
|
|
|
needPort = true;
|
|
|
|
break;
|
2013-05-03 18:07:29 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
|
|
|
|
/* default mode will be used */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch (defaultMode) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_SECURE:
|
|
|
|
needTLSPort = true;
|
|
|
|
break;
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_INSECURE:
|
|
|
|
needPort = true;
|
|
|
|
break;
|
2016-03-28 13:30:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_SPICE_CHANNEL_MODE_ANY:
|
|
|
|
if (cfg->spiceTLS)
|
|
|
|
needTLSPort = true;
|
|
|
|
needPort = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-05-03 18:07:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!allocate) {
|
|
|
|
if (needPort || graphics->data.spice.port == -1)
|
|
|
|
graphics->data.spice.port = 5901;
|
2013-05-03 18:07:37 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (needTLSPort || graphics->data.spice.tlsPort == -1)
|
|
|
|
graphics->data.spice.tlsPort = 5902;
|
2014-05-16 13:16:18 +00:00
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2013-02-19 12:27:43 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (needPort || graphics->data.spice.port == -1) {
|
|
|
|
if (virPortAllocatorAcquire(driver->remotePorts, &port) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
graphics->data.spice.port = port;
|
|
|
|
|
|
|
|
if (!graphics->data.spice.autoport)
|
|
|
|
graphics->data.spice.portReserved = true;
|
2011-07-12 09:45:16 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (needTLSPort || graphics->data.spice.tlsPort == -1) {
|
|
|
|
if (!cfg->spiceTLS) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Auto allocation of spice TLS port requested "
|
|
|
|
"but spice TLS is disabled in qemu.conf"));
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
2011-05-04 11:55:38 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (virPortAllocatorAcquire(driver->remotePorts, &tlsPort) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2011-09-27 12:56:17 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
graphics->data.spice.tlsPort = tlsPort;
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!graphics->data.spice.autoport)
|
|
|
|
graphics->data.spice.tlsPortReserved = true;
|
2016-11-03 20:33:32 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
2016-08-05 12:48:27 +00:00
|
|
|
|
2017-06-06 05:39:25 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessVerifyHypervFeatures(virDomainDef *def,
|
|
|
|
virCPUData *cpu)
|
2017-07-11 13:53:58 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
int rc;
|
2016-10-31 15:49:49 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
for (i = 0; i < VIR_DOMAIN_HYPERV_LAST; i++) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *cpuFeature = NULL;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* always supported string property */
|
2019-07-30 10:33:42 +00:00
|
|
|
if (i == VIR_DOMAIN_HYPERV_VENDOR_ID ||
|
|
|
|
i == VIR_DOMAIN_HYPERV_SPINLOCKS)
|
2017-07-11 13:53:58 +00:00
|
|
|
continue;
|
2016-01-08 16:03:48 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (def->hyperv_features[i] != VIR_TRISTATE_SWITCH_ON)
|
|
|
|
continue;
|
2011-05-05 11:50:25 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
cpuFeature = g_strdup_printf("hv-%s", virDomainHypervTypeToString(i));
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = virCPUDataCheckFeature(cpu, cpuFeature);
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2019-08-09 14:31:40 +00:00
|
|
|
if (rc < 0) {
|
2017-07-11 13:53:58 +00:00
|
|
|
return -1;
|
2019-08-09 14:31:40 +00:00
|
|
|
} else if (rc == 1) {
|
|
|
|
if (i == VIR_DOMAIN_HYPERV_STIMER) {
|
|
|
|
if (def->hyperv_stimer_direct != VIR_TRISTATE_SWITCH_ON)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = virCPUDataCheckFeature(cpu, VIR_CPU_x86_HV_STIMER_DIRECT);
|
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
|
|
|
else if (rc == 1)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("host doesn't support hyperv stimer '%s' feature"),
|
|
|
|
"direct");
|
|
|
|
return -1;
|
|
|
|
}
|
2017-07-11 13:53:58 +00:00
|
|
|
continue;
|
2019-08-09 14:31:40 +00:00
|
|
|
}
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
switch ((virDomainHyperv) i) {
|
|
|
|
case VIR_DOMAIN_HYPERV_RELAXED:
|
|
|
|
case VIR_DOMAIN_HYPERV_VAPIC:
|
|
|
|
VIR_WARN("host doesn't support hyperv '%s' feature",
|
|
|
|
virDomainHypervTypeToString(i));
|
|
|
|
break;
|
2012-10-31 19:03:55 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_VPINDEX:
|
|
|
|
case VIR_DOMAIN_HYPERV_RUNTIME:
|
|
|
|
case VIR_DOMAIN_HYPERV_SYNIC:
|
|
|
|
case VIR_DOMAIN_HYPERV_STIMER:
|
|
|
|
case VIR_DOMAIN_HYPERV_RESET:
|
2018-08-09 13:14:19 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_FREQUENCIES:
|
2018-08-09 13:14:20 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_REENLIGHTENMENT:
|
2018-08-09 13:14:21 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_TLBFLUSH:
|
2018-11-14 22:46:05 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_IPI:
|
2018-11-14 22:46:07 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_EVMCS:
|
2017-07-11 13:53:58 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("host doesn't support hyperv '%s' feature"),
|
|
|
|
virDomainHypervTypeToString(i));
|
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2019-07-30 10:33:42 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_SPINLOCKS:
|
2017-07-11 13:53:58 +00:00
|
|
|
case VIR_DOMAIN_HYPERV_VENDOR_ID:
|
|
|
|
case VIR_DOMAIN_HYPERV_LAST:
|
|
|
|
break;
|
2011-09-21 19:02:44 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2017-07-11 13:53:58 +00:00
|
|
|
|
|
|
|
return 0;
|
2011-08-16 10:51:36 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2013-01-11 13:54:15 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessVerifyKVMFeatures(virDomainDef *def,
|
|
|
|
virCPUData *cpu)
|
2011-08-16 10:51:36 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
int rc = 0;
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (def->features[VIR_DOMAIN_FEATURE_PVSPINLOCK] != VIR_TRISTATE_SWITCH_ON)
|
2013-10-28 11:49:18 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = virCPUDataCheckFeature(cpu, VIR_CPU_x86_KVM_PV_UNHALT);
|
|
|
|
|
|
|
|
if (rc <= 0) {
|
|
|
|
if (rc == 0)
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("host doesn't support paravirtual spinlocks"));
|
2013-01-11 13:54:15 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessVerifyCPUFeatures(virDomainDef *def,
|
|
|
|
virCPUData *cpu)
|
2017-07-11 13:53:58 +00:00
|
|
|
{
|
|
|
|
int rc;
|
2011-09-16 13:44:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
rc = virCPUCheckFeature(def->os.arch, def->cpu, "invtsc");
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (rc < 0) {
|
2014-12-03 13:22:05 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
} else if (rc == 1) {
|
|
|
|
rc = virCPUDataCheckFeature(cpu, "invtsc");
|
|
|
|
if (rc <= 0) {
|
|
|
|
if (rc == 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("host doesn't support invariant TSC"));
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
2014-12-03 13:22:05 +00:00
|
|
|
}
|
2011-08-16 10:51:36 +00:00
|
|
|
|
2013-01-11 13:54:15 +00:00
|
|
|
return 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-17 21:38:46 +00:00
|
|
|
static const char *
|
|
|
|
qemuProcessTranslateCPUFeatures(const char *name,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUCaps *qemuCaps = opaque;
|
2019-06-17 21:38:46 +00:00
|
|
|
|
|
|
|
return virQEMUCapsCPUFeatureFromQEMU(qemuCaps, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-07 10:45:18 +00:00
|
|
|
/* returns the QOM path to the first vcpu */
|
|
|
|
static const char *
|
2022-02-07 11:29:47 +00:00
|
|
|
qemuProcessGetVCPUQOMPath(virDomainObj *vm)
|
2022-02-07 10:45:18 +00:00
|
|
|
{
|
2022-02-07 11:29:47 +00:00
|
|
|
virDomainVcpuDef *vcpu = virDomainDefGetVcpu(vm->def, 0);
|
|
|
|
qemuDomainVcpuPrivate *vcpupriv;
|
|
|
|
|
|
|
|
if (vcpu &&
|
|
|
|
(vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu)) &&
|
|
|
|
vcpupriv->qomPath)
|
|
|
|
return vcpupriv->qomPath;
|
|
|
|
|
2022-02-07 10:45:18 +00:00
|
|
|
return "/machine/unattached/device[0]";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-09 12:30:27 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessFetchGuestCPU(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2021-03-11 07:16:13 +00:00
|
|
|
virCPUData **enabled,
|
|
|
|
virCPUData **disabled)
|
2013-04-30 14:26:43 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-05-22 19:56:19 +00:00
|
|
|
g_autoptr(virCPUData) dataEnabled = NULL;
|
|
|
|
g_autoptr(virCPUData) dataDisabled = NULL;
|
2022-02-07 11:29:47 +00:00
|
|
|
const char *cpuQOMPath = qemuProcessGetVCPUQOMPath(vm);
|
2019-06-17 21:38:46 +00:00
|
|
|
bool generic;
|
2017-07-11 13:53:58 +00:00
|
|
|
int rc;
|
2013-04-30 14:26:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
*enabled = NULL;
|
|
|
|
*disabled = NULL;
|
2016-04-25 13:24:48 +00:00
|
|
|
|
2019-06-17 21:38:46 +00:00
|
|
|
generic = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CPU_UNAVAILABLE_FEATURES);
|
|
|
|
|
|
|
|
if (!generic && !ARCH_IS_X86(vm->def->os.arch))
|
2016-04-25 13:24:48 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2013-04-30 14:26:43 +00:00
|
|
|
|
2019-06-17 21:38:46 +00:00
|
|
|
if (generic) {
|
|
|
|
rc = qemuMonitorGetGuestCPU(priv->mon,
|
|
|
|
vm->def->os.arch,
|
2022-02-07 10:45:18 +00:00
|
|
|
cpuQOMPath,
|
2019-06-17 21:38:46 +00:00
|
|
|
qemuProcessTranslateCPUFeatures, priv->qemuCaps,
|
|
|
|
&dataEnabled, &dataDisabled);
|
|
|
|
} else {
|
2022-02-07 10:45:18 +00:00
|
|
|
rc = qemuMonitorGetGuestCPUx86(priv->mon, cpuQOMPath, &dataEnabled, &dataDisabled);
|
2019-06-17 21:38:46 +00:00
|
|
|
}
|
2013-04-30 14:26:43 +00:00
|
|
|
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2017-07-11 13:53:58 +00:00
|
|
|
|
|
|
|
if (rc == -1)
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2020-05-22 19:56:19 +00:00
|
|
|
*enabled = g_steal_pointer(&dataEnabled);
|
|
|
|
*disabled = g_steal_pointer(&dataDisabled);
|
2013-04-30 14:26:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-04-22 14:26:57 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2016-05-09 12:30:27 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessVerifyCPU(virDomainObj *vm,
|
|
|
|
virCPUData *cpu)
|
2013-04-22 14:26:57 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
2013-04-23 05:01:38 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (!cpu)
|
|
|
|
return 0;
|
2017-07-11 11:26:12 +00:00
|
|
|
|
|
|
|
if (qemuProcessVerifyKVMFeatures(def, cpu) < 0 ||
|
|
|
|
qemuProcessVerifyHypervFeatures(def, cpu) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!def->cpu ||
|
|
|
|
(def->cpu->mode == VIR_CPU_MODE_CUSTOM &&
|
|
|
|
!def->cpu->model))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuProcessVerifyCPUFeatures(def, cpu) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 11:51:17 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateLiveGuestCPU(virDomainObj *vm,
|
|
|
|
virCPUData *enabled,
|
|
|
|
virCPUData *disabled)
|
2017-07-11 11:51:17 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-05-22 19:56:19 +00:00
|
|
|
g_autoptr(virCPUDef) orig = NULL;
|
2017-07-11 11:51:17 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!def->cpu ||
|
|
|
|
(def->cpu->mode == VIR_CPU_MODE_CUSTOM &&
|
|
|
|
!def->cpu->model))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(orig = virCPUDefCopy(def->cpu)))
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2017-07-11 11:51:17 +00:00
|
|
|
|
|
|
|
if ((rc = virCPUUpdateLive(def->os.arch, def->cpu, enabled, disabled)) < 0) {
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2017-07-11 11:51:17 +00:00
|
|
|
} else if (rc == 0) {
|
|
|
|
/* Store the original CPU in priv if QEMU changed it and we didn't
|
|
|
|
* get the original CPU via migration, restore, or snapshot revert.
|
|
|
|
*/
|
|
|
|
if (!priv->origCPU && !virCPUDefIsEqual(def->cpu, orig, false))
|
2019-10-16 11:43:18 +00:00
|
|
|
priv->origCPU = g_steal_pointer(&orig);
|
2017-07-11 11:51:17 +00:00
|
|
|
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_FULL;
|
|
|
|
}
|
|
|
|
|
2020-05-22 19:56:19 +00:00
|
|
|
return 0;
|
2017-07-11 11:51:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-10 23:45:37 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateAndVerifyCPU(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-03-13 11:32:02 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2013-10-14 09:35:00 +00:00
|
|
|
{
|
2022-02-07 09:57:11 +00:00
|
|
|
g_autoptr(virCPUData) cpu = NULL;
|
|
|
|
g_autoptr(virCPUData) disabled = NULL;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2017-07-11 11:18:45 +00:00
|
|
|
if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0)
|
2022-02-07 09:57:11 +00:00
|
|
|
return -1;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2017-07-11 11:26:12 +00:00
|
|
|
if (qemuProcessVerifyCPU(vm, cpu) < 0)
|
2022-02-07 09:57:11 +00:00
|
|
|
return -1;
|
2014-05-06 11:55:44 +00:00
|
|
|
|
2017-07-11 11:51:17 +00:00
|
|
|
if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0)
|
2022-02-07 09:57:11 +00:00
|
|
|
return -1;
|
2013-10-14 09:35:00 +00:00
|
|
|
|
2022-02-07 09:57:11 +00:00
|
|
|
return 0;
|
2013-10-14 09:35:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-10 14:44:40 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessFetchCPUDefinitions(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2019-10-10 14:44:40 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainCapsCPUModels **cpuModels)
|
2017-12-07 10:23:50 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virDomainCapsCPUModels) models = NULL;
|
2019-10-10 14:44:40 +00:00
|
|
|
int rc;
|
2017-12-07 10:23:50 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2019-12-20 21:16:30 +00:00
|
|
|
return -1;
|
2017-12-07 10:23:50 +00:00
|
|
|
|
2019-10-09 08:14:59 +00:00
|
|
|
rc = virQEMUCapsFetchCPUModels(priv->mon, vm->def->os.arch, &models);
|
2017-12-07 10:23:50 +00:00
|
|
|
|
2021-11-24 12:11:52 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
if (rc < 0)
|
2019-12-20 21:16:30 +00:00
|
|
|
return -1;
|
2017-12-07 10:23:50 +00:00
|
|
|
|
2019-12-20 21:16:30 +00:00
|
|
|
*cpuModels = g_steal_pointer(&models);
|
2019-10-10 14:44:40 +00:00
|
|
|
return 0;
|
2017-12-07 10:23:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 12:16:40 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateCPU(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-07-11 12:16:40 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2020-05-22 19:56:19 +00:00
|
|
|
g_autoptr(virCPUData) cpu = NULL;
|
|
|
|
g_autoptr(virCPUData) disabled = NULL;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virDomainCapsCPUModels) models = NULL;
|
2017-07-11 12:16:40 +00:00
|
|
|
|
2018-01-10 14:56:21 +00:00
|
|
|
/* The host CPU model comes from host caps rather than QEMU caps so
|
|
|
|
* fallback must be allowed no matter what the user specified in the XML.
|
|
|
|
*/
|
|
|
|
vm->def->cpu->fallback = VIR_CPU_FALLBACK_ALLOW;
|
|
|
|
|
2017-07-11 12:16:40 +00:00
|
|
|
if (qemuProcessFetchGuestCPU(driver, vm, asyncJob, &cpu, &disabled) < 0)
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2017-07-11 12:16:40 +00:00
|
|
|
|
|
|
|
if (qemuProcessUpdateLiveGuestCPU(vm, cpu, disabled) < 0)
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2017-07-11 12:16:40 +00:00
|
|
|
|
2019-10-10 14:44:40 +00:00
|
|
|
if (qemuProcessFetchCPUDefinitions(driver, vm, asyncJob, &models) < 0 ||
|
2017-12-07 10:23:50 +00:00
|
|
|
virCPUTranslate(vm->def->os.arch, vm->def->cpu, models) < 0)
|
2020-05-22 19:56:19 +00:00
|
|
|
return -1;
|
2017-07-11 12:16:40 +00:00
|
|
|
|
2020-05-22 19:56:19 +00:00
|
|
|
return 0;
|
2017-07-11 12:16:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-02-09 08:59:24 +00:00
|
|
|
struct qemuPrepareNVRAMHelperData {
|
|
|
|
int srcFD;
|
|
|
|
const char *srcPath;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuPrepareNVRAMHelper(int dstFD,
|
|
|
|
const char *dstPath,
|
|
|
|
const void *opaque)
|
|
|
|
{
|
|
|
|
const struct qemuPrepareNVRAMHelperData *data = opaque;
|
|
|
|
ssize_t r;
|
|
|
|
|
|
|
|
do {
|
|
|
|
char buf[1024];
|
|
|
|
|
|
|
|
if ((r = saferead(data->srcFD, buf, sizeof(buf))) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to read from file '%s'"),
|
|
|
|
data->srcPath);
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (safewrite(dstFD, buf, r) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to write to file '%s'"),
|
|
|
|
dstPath);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} while (r);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-08-07 14:59:21 +00:00
|
|
|
static int
|
2021-05-21 11:41:29 +00:00
|
|
|
qemuPrepareNVRAM(virQEMUDriver *driver,
|
2022-02-03 16:10:45 +00:00
|
|
|
virDomainObj *vm,
|
|
|
|
bool reset_nvram)
|
2014-08-07 14:59:21 +00:00
|
|
|
{
|
2021-05-21 11:41:29 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2022-02-11 08:28:11 +00:00
|
|
|
VIR_AUTOCLOSE srcFD = -1;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainLoaderDef *loader = vm->def->os.loader;
|
2016-03-09 15:10:54 +00:00
|
|
|
const char *master_nvram_path;
|
2022-02-09 08:59:24 +00:00
|
|
|
struct qemuPrepareNVRAMHelperData data;
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2022-02-03 16:10:45 +00:00
|
|
|
if (!loader || !loader->nvram ||
|
|
|
|
(virFileExists(loader->nvram) && !reset_nvram))
|
2014-08-07 14:59:21 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
master_nvram_path = loader->templt;
|
|
|
|
if (!loader->templt) {
|
|
|
|
size_t i;
|
2016-05-17 22:45:27 +00:00
|
|
|
for (i = 0; i < cfg->nfirmwares; i++) {
|
|
|
|
if (STREQ(cfg->firmwares[i]->name, loader->path)) {
|
|
|
|
master_nvram_path = cfg->firmwares[i]->nvram;
|
2016-03-09 15:10:54 +00:00
|
|
|
break;
|
2014-08-07 14:59:21 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-09 15:10:54 +00:00
|
|
|
}
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
if (!master_nvram_path) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("unable to find any master var store for "
|
|
|
|
"loader: %s"), loader->path);
|
2022-02-11 08:31:48 +00:00
|
|
|
return -1;
|
2016-03-09 15:10:54 +00:00
|
|
|
}
|
2014-08-07 14:59:21 +00:00
|
|
|
|
2016-03-09 15:10:54 +00:00
|
|
|
if ((srcFD = virFileOpenAs(master_nvram_path, O_RDONLY,
|
|
|
|
0, -1, -1, 0)) < 0) {
|
|
|
|
virReportSystemError(-srcFD,
|
|
|
|
_("Failed to open file '%s'"),
|
|
|
|
master_nvram_path);
|
2022-02-11 08:31:48 +00:00
|
|
|
return -1;
|
2016-03-09 15:10:54 +00:00
|
|
|
}
|
2021-05-21 11:41:29 +00:00
|
|
|
|
2022-02-09 08:59:24 +00:00
|
|
|
data.srcFD = srcFD;
|
|
|
|
data.srcPath = master_nvram_path;
|
2022-02-03 16:15:51 +00:00
|
|
|
|
2022-02-09 08:59:24 +00:00
|
|
|
if (virFileRewrite(loader->nvram,
|
|
|
|
S_IRUSR | S_IWUSR,
|
|
|
|
cfg->user, cfg->group,
|
|
|
|
qemuPrepareNVRAMHelper,
|
|
|
|
&data) < 0) {
|
2022-02-11 08:31:48 +00:00
|
|
|
return -1;
|
2014-08-07 14:59:21 +00:00
|
|
|
}
|
|
|
|
|
2022-02-11 08:31:48 +00:00
|
|
|
return 0;
|
2014-08-07 14:59:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-02-02 10:28:30 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuLogOperation(virDomainObj *vm,
|
2015-02-02 10:28:30 +00:00
|
|
|
const char *msg,
|
2021-03-11 07:16:13 +00:00
|
|
|
virCommand *cmd,
|
|
|
|
qemuDomainLogContext *logCtxt)
|
2015-02-02 10:28:30 +00:00
|
|
|
{
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *timestamp = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2015-02-02 10:28:30 +00:00
|
|
|
int qemuVersion = virQEMUCapsGetVersion(priv->qemuCaps);
|
|
|
|
const char *package = virQEMUCapsGetPackage(priv->qemuCaps);
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *hostname = virGetHostname();
|
2018-05-18 11:17:38 +00:00
|
|
|
struct utsname uts;
|
|
|
|
|
|
|
|
uname(&uts);
|
2015-02-02 10:28:30 +00:00
|
|
|
|
|
|
|
if ((timestamp = virTimeStringNow()) == NULL)
|
2019-12-20 21:16:28 +00:00
|
|
|
return;
|
2015-02-02 10:28:30 +00:00
|
|
|
|
2015-12-04 17:25:22 +00:00
|
|
|
if (qemuDomainLogContextWrite(logCtxt,
|
2018-05-18 11:17:38 +00:00
|
|
|
"%s: %s %s, qemu version: %d.%d.%d%s, kernel: %s, hostname: %s\n",
|
2015-11-12 13:02:46 +00:00
|
|
|
timestamp, msg, VIR_LOG_VERSION_STRING,
|
|
|
|
(qemuVersion / 1000000) % 1000,
|
|
|
|
(qemuVersion / 1000) % 1000,
|
|
|
|
qemuVersion % 1000,
|
2019-02-12 16:25:06 +00:00
|
|
|
NULLSTR_EMPTY(package),
|
2018-05-18 11:17:38 +00:00
|
|
|
uts.release,
|
2019-02-12 16:25:06 +00:00
|
|
|
NULLSTR_EMPTY(hostname)) < 0)
|
2019-12-20 21:16:28 +00:00
|
|
|
return;
|
2015-02-02 10:28:30 +00:00
|
|
|
|
2015-11-12 13:02:46 +00:00
|
|
|
if (cmd) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *args = virCommandToString(cmd, true);
|
2015-11-12 13:02:46 +00:00
|
|
|
qemuDomainLogContextWrite(logCtxt, "%s\n", args);
|
|
|
|
}
|
2015-02-02 10:28:30 +00:00
|
|
|
}
|
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessIncomingDefFree(qemuProcessIncomingDef *inc)
|
2015-11-06 17:41:37 +00:00
|
|
|
{
|
|
|
|
if (!inc)
|
|
|
|
return;
|
|
|
|
|
2021-02-03 19:36:01 +00:00
|
|
|
g_free(inc->address);
|
|
|
|
g_free(inc->launchURI);
|
|
|
|
g_free(inc->deferredURI);
|
|
|
|
g_free(inc);
|
2015-11-06 17:41:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function does not copy @path, the caller is responsible for keeping
|
|
|
|
* the @path pointer valid during the lifetime of the allocated
|
|
|
|
* qemuProcessIncomingDef structure.
|
2017-06-19 15:00:28 +00:00
|
|
|
*
|
|
|
|
* The caller is responsible for closing @fd, calling
|
|
|
|
* qemuProcessIncomingDefFree will NOT close it.
|
2015-11-06 17:41:37 +00:00
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessIncomingDef *
|
|
|
|
qemuProcessIncomingDefNew(virQEMUCaps *qemuCaps,
|
2016-01-07 23:07:37 +00:00
|
|
|
const char *listenAddress,
|
2015-11-06 17:41:37 +00:00
|
|
|
const char *migrateFrom,
|
|
|
|
int fd,
|
|
|
|
const char *path)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessIncomingDef *inc = NULL;
|
2015-11-06 17:41:37 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstCheckProtocol(qemuCaps, migrateFrom) < 0)
|
2015-11-06 17:41:37 +00:00
|
|
|
return NULL;
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
inc = g_new0(qemuProcessIncomingDef, 1);
|
2015-11-06 17:41:37 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
inc->address = g_strdup(listenAddress);
|
2016-01-07 23:07:37 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
inc->launchURI = qemuMigrationDstGetURI(migrateFrom, fd);
|
2015-11-06 17:41:37 +00:00
|
|
|
if (!inc->launchURI)
|
|
|
|
goto error;
|
|
|
|
|
2015-11-11 17:02:23 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_INCOMING_DEFER)) {
|
|
|
|
inc->deferredURI = inc->launchURI;
|
2019-10-20 11:49:46 +00:00
|
|
|
inc->launchURI = g_strdup("defer");
|
2015-11-11 17:02:23 +00:00
|
|
|
}
|
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
inc->fd = fd;
|
|
|
|
inc->path = path;
|
|
|
|
|
|
|
|
return inc;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuProcessIncomingDefFree(inc);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-21 08:55:43 +00:00
|
|
|
/*
|
|
|
|
* This function starts a new QEMU_ASYNC_JOB_START async job. The user is
|
|
|
|
* responsible for calling qemuProcessEndJob to stop this job and for passing
|
|
|
|
* QEMU_ASYNC_JOB_START as @asyncJob argument to any function requiring this
|
|
|
|
* parameter between qemuProcessBeginJob and qemuProcessEndJob.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessBeginJob(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2018-03-21 12:01:59 +00:00
|
|
|
virDomainJobOperation operation,
|
|
|
|
unsigned long apiFlags)
|
2015-10-21 08:55:43 +00:00
|
|
|
{
|
2017-04-26 10:00:09 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
|
2018-03-21 12:01:59 +00:00
|
|
|
operation, apiFlags) < 0)
|
2015-10-21 08:55:43 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessEndJob(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2015-10-21 08:55:43 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjEndAsyncJob(driver, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-30 16:59:43 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartHook(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2015-10-30 16:59:43 +00:00
|
|
|
virHookQemuOpType op,
|
|
|
|
virHookSubopType subop)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *xml = NULL;
|
2015-10-30 16:59:43 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!virHookPresent(VIR_HOOK_DRIVER_QEMU))
|
|
|
|
return 0;
|
|
|
|
|
2019-08-05 14:05:20 +00:00
|
|
|
if (!(xml = qemuDomainDefFormatXML(driver, priv->qemuCaps, vm->def, 0)))
|
2015-10-30 16:59:43 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name, op, subop,
|
|
|
|
NULL, xml, NULL);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-30 17:16:53 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGraphicsReservePorts(virDomainGraphicsDef *graphics,
|
2017-09-18 13:39:58 +00:00
|
|
|
bool reconnect)
|
2015-10-30 17:16:53 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsListenDef *glisten;
|
2016-08-13 19:03:15 +00:00
|
|
|
|
|
|
|
if (graphics->nListens <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
glisten = &graphics->listens[0];
|
|
|
|
|
|
|
|
if (glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS &&
|
|
|
|
glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NETWORK)
|
|
|
|
return 0;
|
|
|
|
|
2016-11-22 11:09:31 +00:00
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
2017-09-18 13:39:58 +00:00
|
|
|
if (!graphics->data.vnc.autoport ||
|
|
|
|
reconnect) {
|
2018-02-06 09:09:10 +00:00
|
|
|
if (virPortAllocatorSetUsed(graphics->data.vnc.port) < 0)
|
2016-11-22 11:09:31 +00:00
|
|
|
return -1;
|
|
|
|
graphics->data.vnc.portReserved = true;
|
|
|
|
}
|
2016-11-22 11:09:32 +00:00
|
|
|
if (graphics->data.vnc.websocket > 0 &&
|
2018-02-06 09:09:10 +00:00
|
|
|
virPortAllocatorSetUsed(graphics->data.vnc.websocket) < 0)
|
2016-11-22 11:09:32 +00:00
|
|
|
return -1;
|
2016-11-22 11:09:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
2017-09-18 13:39:58 +00:00
|
|
|
if (graphics->data.spice.autoport && !reconnect)
|
2016-11-22 11:09:31 +00:00
|
|
|
return 0;
|
2015-10-30 17:16:53 +00:00
|
|
|
|
2016-08-13 19:01:38 +00:00
|
|
|
if (graphics->data.spice.port > 0) {
|
2018-02-06 09:09:10 +00:00
|
|
|
if (virPortAllocatorSetUsed(graphics->data.spice.port) < 0)
|
2016-05-09 12:20:08 +00:00
|
|
|
return -1;
|
2016-08-13 19:01:38 +00:00
|
|
|
graphics->data.spice.portReserved = true;
|
|
|
|
}
|
2015-10-30 17:16:53 +00:00
|
|
|
|
2016-08-13 19:01:38 +00:00
|
|
|
if (graphics->data.spice.tlsPort > 0) {
|
2018-02-06 09:09:10 +00:00
|
|
|
if (virPortAllocatorSetUsed(graphics->data.spice.tlsPort) < 0)
|
2016-08-13 19:01:38 +00:00
|
|
|
return -1;
|
|
|
|
graphics->data.spice.tlsPortReserved = true;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
2016-11-22 11:09:31 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
2018-06-30 14:23:01 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_EGL_HEADLESS:
|
2016-11-22 11:09:31 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
|
|
|
|
2016-05-09 12:20:08 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-13 18:58:55 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGraphicsAllocatePorts(virQEMUDriver *driver,
|
|
|
|
virDomainGraphicsDef *graphics,
|
2016-08-13 18:58:55 +00:00
|
|
|
bool allocate)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsListenDef *glisten;
|
2016-08-13 18:58:55 +00:00
|
|
|
|
|
|
|
if (graphics->nListens <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
glisten = &graphics->listens[0];
|
|
|
|
|
|
|
|
if (glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS &&
|
|
|
|
glisten->type != VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NETWORK)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
|
|
|
if (qemuProcessVNCAllocatePorts(driver, graphics, allocate) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
|
|
|
if (qemuProcessSPICEAllocatePorts(driver, graphics, allocate) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
2018-06-30 14:23:01 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_EGL_HEADLESS:
|
2016-08-13 18:58:55 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-25 09:35:49 +00:00
|
|
|
static int
|
2018-02-09 16:36:24 +00:00
|
|
|
qemuProcessGetNetworkAddress(const char *netname,
|
2018-01-25 09:35:49 +00:00
|
|
|
char **netaddr)
|
|
|
|
{
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virConnect) conn = NULL;
|
2018-01-25 09:35:49 +00:00
|
|
|
int ret = -1;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virNetwork) net = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virNetworkDef *netdef = NULL;
|
|
|
|
virNetworkIPDef *ipdef;
|
2018-01-25 09:35:49 +00:00
|
|
|
virSocketAddr addr;
|
2021-03-11 07:16:13 +00:00
|
|
|
virSocketAddr *addrptr = NULL;
|
2018-01-25 09:35:49 +00:00
|
|
|
char *dev_name = NULL;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *xml = NULL;
|
2018-01-25 09:35:49 +00:00
|
|
|
|
|
|
|
*netaddr = NULL;
|
2018-02-09 16:36:24 +00:00
|
|
|
|
|
|
|
if (!(conn = virGetConnectNetwork()))
|
|
|
|
return -1;
|
|
|
|
|
2018-01-25 09:35:49 +00:00
|
|
|
net = virNetworkLookupByName(conn, netname);
|
|
|
|
if (!net)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
xml = virNetworkGetXMLDesc(net, 0);
|
|
|
|
if (!xml)
|
|
|
|
goto cleanup;
|
|
|
|
|
2021-09-10 14:48:22 +00:00
|
|
|
netdef = virNetworkDefParseString(xml, NULL, false);
|
2018-01-25 09:35:49 +00:00
|
|
|
if (!netdef)
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-07-24 03:49:48 +00:00
|
|
|
switch ((virNetworkForwardType) netdef->forward.type) {
|
2018-01-25 09:35:49 +00:00
|
|
|
case VIR_NETWORK_FORWARD_NONE:
|
|
|
|
case VIR_NETWORK_FORWARD_NAT:
|
|
|
|
case VIR_NETWORK_FORWARD_ROUTE:
|
|
|
|
case VIR_NETWORK_FORWARD_OPEN:
|
|
|
|
ipdef = virNetworkDefGetIPByIndex(netdef, AF_UNSPEC, 0);
|
|
|
|
if (!ipdef) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("network '%s' doesn't have an IP address"),
|
|
|
|
netdef->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
addrptr = &ipdef->address;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_NETWORK_FORWARD_BRIDGE:
|
|
|
|
if ((dev_name = netdef->bridge))
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* fall through if netdef->bridge wasn't set, since that is
|
|
|
|
* macvtap bridge mode network.
|
|
|
|
*/
|
2019-10-15 11:38:21 +00:00
|
|
|
G_GNUC_FALLTHROUGH;
|
2018-01-25 09:35:49 +00:00
|
|
|
|
|
|
|
case VIR_NETWORK_FORWARD_PRIVATE:
|
|
|
|
case VIR_NETWORK_FORWARD_VEPA:
|
|
|
|
case VIR_NETWORK_FORWARD_PASSTHROUGH:
|
|
|
|
if ((netdef->forward.nifs > 0) && netdef->forward.ifs)
|
|
|
|
dev_name = netdef->forward.ifs[0].device.dev;
|
|
|
|
|
|
|
|
if (!dev_name) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("network '%s' has no associated interface or bridge"),
|
|
|
|
netdef->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
break;
|
2018-07-24 03:49:48 +00:00
|
|
|
|
|
|
|
case VIR_NETWORK_FORWARD_HOSTDEV:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_NETWORK_FORWARD_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virNetworkForwardType, netdef->forward.type);
|
|
|
|
goto cleanup;
|
2018-01-25 09:35:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dev_name) {
|
|
|
|
if (virNetDevIPAddrGet(dev_name, &addr) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
addrptr = &addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(addrptr &&
|
|
|
|
(*netaddr = virSocketAddrFormat(addrptr)))) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virNetworkDefFree(netdef);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-13 18:58:55 +00:00
|
|
|
|
2016-05-18 08:52:22 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGraphicsSetupNetworkAddress(virDomainGraphicsListenDef *glisten,
|
2016-05-18 08:52:22 +00:00
|
|
|
const char *listenAddr)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* TODO: reject configuration without network specified for network listen */
|
|
|
|
if (!glisten->network) {
|
2019-10-20 11:49:46 +00:00
|
|
|
glisten->address = g_strdup(listenAddr);
|
2016-05-18 08:52:22 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-09 16:36:24 +00:00
|
|
|
rc = qemuProcessGetNetworkAddress(glisten->network, &glisten->address);
|
2016-05-18 08:52:22 +00:00
|
|
|
if (rc <= -2) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("network-based listen isn't possible, "
|
|
|
|
"network driver isn't present"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-18 11:27:28 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGraphicsSetupListen(virQEMUDriver *driver,
|
|
|
|
virDomainGraphicsDef *graphics,
|
|
|
|
virDomainObj *vm)
|
2016-05-18 11:27:28 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-06-08 11:30:20 +00:00
|
|
|
const char *type = virDomainGraphicsTypeToString(graphics->type);
|
2016-05-18 11:27:28 +00:00
|
|
|
char *listenAddr = NULL;
|
2016-06-08 11:30:20 +00:00
|
|
|
bool useSocket = false;
|
2016-05-18 11:27:28 +00:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
2016-06-08 11:30:20 +00:00
|
|
|
useSocket = cfg->vncAutoUnixSocket;
|
2016-05-18 11:27:28 +00:00
|
|
|
listenAddr = cfg->vncListen;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
2016-05-18 12:11:20 +00:00
|
|
|
useSocket = cfg->spiceAutoUnixSocket;
|
2016-05-18 11:27:28 +00:00
|
|
|
listenAddr = cfg->spiceListen;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
2018-06-30 14:23:01 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_EGL_HEADLESS:
|
2016-05-18 11:27:28 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < graphics->nListens; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsListenDef *glisten = &graphics->listens[i];
|
2016-05-18 11:27:28 +00:00
|
|
|
|
|
|
|
switch (glisten->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS:
|
2016-06-08 11:30:20 +00:00
|
|
|
if (!glisten->address) {
|
|
|
|
/* If there is no address specified and qemu.conf has
|
|
|
|
* *_auto_unix_socket set we should use unix socket as
|
|
|
|
* default instead of tcp listen. */
|
|
|
|
if (useSocket) {
|
2016-06-08 13:18:25 +00:00
|
|
|
memset(glisten, 0, sizeof(virDomainGraphicsListenDef));
|
2019-10-22 13:26:14 +00:00
|
|
|
glisten->socket = g_strdup_printf("%s/%s.sock", priv->libDir,
|
|
|
|
type);
|
2016-06-08 13:18:25 +00:00
|
|
|
glisten->fromConfig = true;
|
|
|
|
glisten->type = VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_SOCKET;
|
2016-06-08 11:30:20 +00:00
|
|
|
} else if (listenAddr) {
|
2019-10-20 11:49:46 +00:00
|
|
|
glisten->address = g_strdup(listenAddr);
|
2016-06-08 11:30:20 +00:00
|
|
|
glisten->fromConfig = true;
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 11:27:28 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NETWORK:
|
|
|
|
if (glisten->address || !listenAddr)
|
|
|
|
continue;
|
|
|
|
|
2018-02-09 16:36:24 +00:00
|
|
|
if (qemuProcessGraphicsSetupNetworkAddress(glisten,
|
2016-05-18 11:27:28 +00:00
|
|
|
listenAddr) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-05-18 11:27:28 +00:00
|
|
|
break;
|
|
|
|
|
2016-06-08 08:35:37 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_SOCKET:
|
|
|
|
if (!glisten->socket) {
|
2019-10-22 13:26:14 +00:00
|
|
|
glisten->socket = g_strdup_printf("%s/%s.sock", priv->libDir,
|
|
|
|
type);
|
2016-06-08 08:35:37 +00:00
|
|
|
glisten->autoGenerated = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2016-05-18 11:27:28 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_NONE:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2016-05-18 11:27:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-14 15:48:27 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGraphicsSetupRenderNode(virDomainGraphicsDef *graphics,
|
|
|
|
virQEMUCaps *qemuCaps)
|
2018-11-14 15:48:27 +00:00
|
|
|
{
|
2018-11-27 07:23:13 +00:00
|
|
|
char **rendernode = NULL;
|
|
|
|
|
2018-11-14 15:48:27 +00:00
|
|
|
if (!virDomainGraphicsNeedsAutoRenderNode(graphics))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Don't bother picking a DRM node if QEMU doesn't support it. */
|
2018-11-27 07:23:13 +00:00
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_SPICE_RENDERNODE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rendernode = &graphics->data.spice.rendernode;
|
|
|
|
} else {
|
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_EGL_HEADLESS_RENDERNODE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rendernode = &graphics->data.egl_headless.rendernode;
|
|
|
|
}
|
2018-11-14 15:48:27 +00:00
|
|
|
|
2018-11-27 07:23:13 +00:00
|
|
|
if (!(*rendernode = virHostGetDRMRenderNode()))
|
2018-11-14 15:48:27 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-09 12:20:08 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupGraphics(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
virQEMUCaps *qemuCaps,
|
2016-05-09 12:30:27 +00:00
|
|
|
unsigned int flags)
|
2016-05-09 12:20:08 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsDef *graphics;
|
2016-05-09 12:30:27 +00:00
|
|
|
bool allocate = !(flags & VIR_QEMU_PROCESS_START_PRETEND);
|
2016-05-18 11:27:28 +00:00
|
|
|
size_t i;
|
2016-05-09 12:20:08 +00:00
|
|
|
|
2016-08-13 19:29:38 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
graphics = vm->def->graphics[i];
|
|
|
|
|
2018-11-14 15:48:27 +00:00
|
|
|
if (qemuProcessGraphicsSetupRenderNode(graphics, qemuCaps) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2018-11-14 15:48:27 +00:00
|
|
|
|
2018-02-09 16:36:24 +00:00
|
|
|
if (qemuProcessGraphicsSetupListen(driver, graphics, vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-08-13 19:29:38 +00:00
|
|
|
}
|
|
|
|
|
2016-08-13 19:01:38 +00:00
|
|
|
if (allocate) {
|
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
graphics = vm->def->graphics[i];
|
|
|
|
|
2018-02-06 09:09:07 +00:00
|
|
|
if (qemuProcessGraphicsReservePorts(graphics, false) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-08-13 19:01:38 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-09 12:20:08 +00:00
|
|
|
|
2015-10-30 17:16:53 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; ++i) {
|
2016-08-13 19:01:38 +00:00
|
|
|
graphics = vm->def->graphics[i];
|
2016-04-25 13:24:48 +00:00
|
|
|
|
2016-08-13 18:58:55 +00:00
|
|
|
if (qemuProcessGraphicsAllocatePorts(driver, graphics, allocate) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2015-10-30 17:16:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-02 09:35:58 +00:00
|
|
|
static int
|
2022-01-13 17:26:14 +00:00
|
|
|
qemuProcessSetupRawIO(virDomainObj *vm,
|
2021-03-11 07:16:13 +00:00
|
|
|
virCommand *cmd G_GNUC_UNUSED)
|
2015-11-02 09:35:58 +00:00
|
|
|
{
|
|
|
|
bool rawio = false;
|
|
|
|
size_t i;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* in case a certain disk is desirous of CAP_SYS_RAWIO, add this */
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[i];
|
2015-11-02 09:35:58 +00:00
|
|
|
|
|
|
|
if (disk->rawio == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
rawio = true;
|
|
|
|
#ifndef CAP_SYS_RAWIO
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If rawio not already set, check hostdevs as well */
|
|
|
|
if (!rawio) {
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainHostdevSubsysSCSI *scsisrc;
|
2020-07-28 17:50:28 +00:00
|
|
|
|
2016-11-15 18:25:41 +00:00
|
|
|
if (!virHostdevIsSCSIDevice(vm->def->hostdevs[i]))
|
2016-11-15 18:25:39 +00:00
|
|
|
continue;
|
|
|
|
|
2020-07-28 17:50:28 +00:00
|
|
|
scsisrc = &vm->def->hostdevs[i]->source.subsys.u.scsi;
|
2015-11-02 09:35:58 +00:00
|
|
|
if (scsisrc->rawio == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
rawio = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (rawio) {
|
|
|
|
#ifdef CAP_SYS_RAWIO
|
2022-01-13 17:26:14 +00:00
|
|
|
virCommandAllowCap(cmd, CAP_SYS_RAWIO);
|
2015-11-02 09:35:58 +00:00
|
|
|
#else
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Raw I/O is not supported on this platform"));
|
|
|
|
ret = -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-02 10:00:49 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupBalloon(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2015-11-02 10:00:49 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
unsigned long long balloon = vm->def->mem.cur_balloon;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2015-11-02 10:00:49 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
2016-04-06 13:02:31 +00:00
|
|
|
if (!virDomainDefHasMemballoon(vm->def))
|
2015-11-02 10:00:49 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2017-07-19 06:40:24 +00:00
|
|
|
return -1;
|
2015-11-02 10:00:49 +00:00
|
|
|
|
2016-07-25 15:07:38 +00:00
|
|
|
if (vm->def->memballoon->period)
|
|
|
|
qemuMonitorSetMemoryStatsPeriod(priv->mon, vm->def->memballoon,
|
|
|
|
vm->def->memballoon->period);
|
2015-11-02 10:00:49 +00:00
|
|
|
if (qemuMonitorSetBalloon(priv->mon, balloon) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2015-11-02 10:00:49 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-02 12:39:28 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessMakeDir(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2016-02-26 08:15:55 +00:00
|
|
|
const char *path)
|
2015-11-02 12:39:28 +00:00
|
|
|
{
|
2021-02-26 08:17:30 +00:00
|
|
|
if (g_mkdir_with_parents(path, 0750) < 0) {
|
2015-11-02 12:39:28 +00:00
|
|
|
virReportSystemError(errno, _("Cannot create directory '%s'"), path);
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-11-02 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
2018-09-05 09:19:14 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver, vm, path, true) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-11-02 12:39:28 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2015-11-02 12:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-20 20:44:25 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartWarnShmem(virDomainObj *vm)
|
2016-04-20 20:44:25 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
bool check_shmem = false;
|
|
|
|
bool shmem = vm->def->nshmems;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For vhost-user to work, the domain has to have some type of
|
|
|
|
* shared memory configured. We're not the proper ones to judge
|
|
|
|
* whether shared hugepages or shm are enough and will be in the
|
|
|
|
* future, so we'll just warn in case neither is configured.
|
|
|
|
* Moreover failing would give the false illusion that libvirt is
|
|
|
|
* really checking that everything works before running the domain
|
|
|
|
* and not only we are unable to do that, but it's also not our
|
|
|
|
* aim to do so.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < vm->def->nnets; i++) {
|
|
|
|
if (virDomainNetGetActualType(vm->def->nets[i]) ==
|
|
|
|
VIR_DOMAIN_NET_TYPE_VHOSTUSER) {
|
|
|
|
check_shmem = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!check_shmem)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This check is by no means complete. We merely check
|
|
|
|
* whether there are *some* hugepages enabled and *some* NUMA
|
|
|
|
* nodes with shared memory access.
|
|
|
|
*/
|
|
|
|
if (!shmem && vm->def->mem.nhugepages) {
|
|
|
|
for (i = 0; i < virDomainNumaGetNodeCount(vm->def->numa); i++) {
|
|
|
|
if (virDomainNumaGetNodeMemoryAccessMode(vm->def->numa, i) ==
|
2017-02-02 13:27:30 +00:00
|
|
|
VIR_DOMAIN_MEMORY_ACCESS_SHARED) {
|
2016-04-20 20:44:25 +00:00
|
|
|
shmem = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!shmem) {
|
|
|
|
VIR_WARN("Detected vhost-user interface without any shared memory, "
|
|
|
|
"the interface might not be operational");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-20 11:49:21 +00:00
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartValidateGraphics(virDomainObj *vm)
|
2016-09-20 11:49:21 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsDef *graphics = vm->def->graphics[i];
|
2016-09-20 11:49:21 +00:00
|
|
|
|
|
|
|
switch (graphics->type) {
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_VNC:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SPICE:
|
|
|
|
if (graphics->nListens > 1) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("QEMU does not support multiple listens for "
|
|
|
|
"one graphics device."));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_SDL:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_RDP:
|
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_DESKTOP:
|
2018-06-30 14:23:01 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_EGL_HEADLESS:
|
2016-09-20 11:49:21 +00:00
|
|
|
case VIR_DOMAIN_GRAPHICS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-12 03:14:32 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartValidateIOThreads(virDomainObj *vm,
|
|
|
|
virQEMUCaps *qemuCaps)
|
2017-02-12 03:14:32 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (vm->def->niothreadids > 0 &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("IOThreads not supported for this QEMU"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ncontrollers; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainControllerDef *cont = vm->def->controllers[i];
|
2017-02-12 03:14:32 +00:00
|
|
|
|
|
|
|
if (cont->type == VIR_DOMAIN_CONTROLLER_TYPE_SCSI &&
|
|
|
|
cont->model == VIR_DOMAIN_CONTROLLER_MODEL_SCSI_VIRTIO_SCSI &&
|
|
|
|
cont->iothread > 0 &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_VIRTIO_SCSI_IOTHREAD)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("IOThreads for virtio-scsi not supported for "
|
|
|
|
"this QEMU"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-21 13:17:10 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartValidateShmem(virDomainObj *vm)
|
2017-02-01 16:14:00 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nshmems; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainShmemDef *shmem = vm->def->shmems[i];
|
2017-02-01 16:14:00 +00:00
|
|
|
|
|
|
|
if (strchr(shmem->name, '/')) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("shmem name '%s' must not contain '/'"),
|
|
|
|
shmem->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: Add qemu command line generation for a VxHS block device
The VxHS block device will only use the newer formatting options and
avoid the legacy URI syntax.
An excerpt for a sample QEMU command line is:
-drive file.driver=vxhs,file.vdisk-id=eb90327c-8302-4725-9e1b-4e85ed4dc251,\
file.server.type=tcp,file.server.host=192.168.0.1,\
file.server.port=9999,format=raw,if=none,id=drive-virtio-disk0,cache=none \
-device virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,\
id=virtio-disk0
Update qemuxml2argvtest with a simple test.
Signed-off-by: Ashish Mittal <Ashish.Mittal@veritas.com>
Signed-off-by: John Ferlan <jferlan@redhat.com>
2017-08-30 13:46:53 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartValidateDisks(virDomainObj *vm,
|
|
|
|
virQEMUCaps *qemuCaps)
|
qemu: Add qemu command line generation for a VxHS block device
The VxHS block device will only use the newer formatting options and
avoid the legacy URI syntax.
An excerpt for a sample QEMU command line is:
-drive file.driver=vxhs,file.vdisk-id=eb90327c-8302-4725-9e1b-4e85ed4dc251,\
file.server.type=tcp,file.server.host=192.168.0.1,\
file.server.port=9999,format=raw,if=none,id=drive-virtio-disk0,cache=none \
-device virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,\
id=virtio-disk0
Update qemuxml2argvtest with a simple test.
Signed-off-by: Ashish Mittal <Ashish.Mittal@veritas.com>
Signed-off-by: John Ferlan <jferlan@redhat.com>
2017-08-30 13:46:53 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[i];
|
|
|
|
virStorageSource *src = disk->src;
|
qemu: Add qemu command line generation for a VxHS block device
The VxHS block device will only use the newer formatting options and
avoid the legacy URI syntax.
An excerpt for a sample QEMU command line is:
-drive file.driver=vxhs,file.vdisk-id=eb90327c-8302-4725-9e1b-4e85ed4dc251,\
file.server.type=tcp,file.server.host=192.168.0.1,\
file.server.port=9999,format=raw,if=none,id=drive-virtio-disk0,cache=none \
-device virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,\
id=virtio-disk0
Update qemuxml2argvtest with a simple test.
Signed-off-by: Ashish Mittal <Ashish.Mittal@veritas.com>
Signed-off-by: John Ferlan <jferlan@redhat.com>
2017-08-30 13:46:53 +00:00
|
|
|
|
|
|
|
/* This is a best effort check as we can only check if the command
|
|
|
|
* option exists, but we cannot determine whether the running QEMU
|
|
|
|
* was build with '--enable-vxhs'. */
|
|
|
|
if (src->type == VIR_STORAGE_TYPE_NETWORK &&
|
|
|
|
src->protocol == VIR_STORAGE_NET_PROTOCOL_VXHS &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_VXHS)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("VxHS protocol is not supported with this "
|
|
|
|
"QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
2017-10-03 12:06:15 +00:00
|
|
|
|
|
|
|
/* PowerPC pseries based VMs do not support floppy device */
|
|
|
|
if (disk->device == VIR_DOMAIN_DISK_DEVICE_FLOPPY &&
|
|
|
|
qemuDomainIsPSeries(vm->def)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("PowerPC pseries machines do not support floppy device"));
|
|
|
|
return -1;
|
|
|
|
}
|
2019-06-21 13:07:22 +00:00
|
|
|
|
|
|
|
if (src->type == VIR_STORAGE_TYPE_NVME &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DRIVE_NVME)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("NVMe disks are not supported with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
qemu: Add qemu command line generation for a VxHS block device
The VxHS block device will only use the newer formatting options and
avoid the legacy URI syntax.
An excerpt for a sample QEMU command line is:
-drive file.driver=vxhs,file.vdisk-id=eb90327c-8302-4725-9e1b-4e85ed4dc251,\
file.server.type=tcp,file.server.host=192.168.0.1,\
file.server.port=9999,format=raw,if=none,id=drive-virtio-disk0,cache=none \
-device virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,\
id=virtio-disk0
Update qemuxml2argvtest with a simple test.
Signed-off-by: Ashish Mittal <Ashish.Mittal@veritas.com>
Signed-off-by: John Ferlan <jferlan@redhat.com>
2017-08-30 13:46:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-25 09:35:12 +00:00
|
|
|
/* 250 parts per million (ppm) is a half of NTP threshold */
|
|
|
|
#define TSC_TOLERANCE 250
|
|
|
|
|
2019-05-30 22:03:59 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartValidateTSC(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2019-05-30 22:03:59 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
unsigned long long freq = 0;
|
2020-05-25 09:35:12 +00:00
|
|
|
unsigned long long tolerance;
|
|
|
|
unsigned long long minFreq;
|
|
|
|
unsigned long long maxFreq;
|
2021-03-11 07:16:13 +00:00
|
|
|
virHostCPUTscInfo *tsc;
|
2019-11-29 10:40:39 +00:00
|
|
|
g_autoptr(virCPUDef) cpu = NULL;
|
2019-05-30 22:03:59 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->clock.ntimers; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainTimerDef *timer = vm->def->clock.timers[i];
|
2019-05-30 22:03:59 +00:00
|
|
|
|
|
|
|
if (timer->name == VIR_DOMAIN_TIMER_NAME_TSC &&
|
|
|
|
timer->frequency > 0) {
|
|
|
|
freq = timer->frequency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (freq == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Requested TSC frequency %llu Hz", freq);
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
cpu = virQEMUDriverGetHostCPU(driver);
|
|
|
|
if (!cpu || !cpu->tsc) {
|
2019-05-30 22:03:59 +00:00
|
|
|
VIR_DEBUG("Host TSC frequency could not be probed");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
tsc = cpu->tsc;
|
2020-05-25 09:35:12 +00:00
|
|
|
tolerance = tsc->frequency * TSC_TOLERANCE / 1000000;
|
|
|
|
minFreq = tsc->frequency - tolerance;
|
|
|
|
maxFreq = tsc->frequency + tolerance;
|
|
|
|
|
|
|
|
VIR_DEBUG("Host TSC frequency %llu Hz, scaling %s, tolerance +/- %llu Hz",
|
|
|
|
tsc->frequency, virTristateBoolTypeToString(tsc->scaling),
|
|
|
|
tolerance);
|
|
|
|
|
2021-01-05 22:53:25 +00:00
|
|
|
if (freq >= minFreq && freq <= maxFreq) {
|
2020-05-25 09:35:12 +00:00
|
|
|
VIR_DEBUG("Requested TSC frequency is within tolerance interval");
|
|
|
|
return 0;
|
|
|
|
}
|
2019-05-30 22:03:59 +00:00
|
|
|
|
2020-05-25 09:35:12 +00:00
|
|
|
if (tsc->scaling == VIR_TRISTATE_BOOL_YES)
|
2019-05-30 22:03:59 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (tsc->scaling == VIR_TRISTATE_BOOL_ABSENT) {
|
2020-05-25 09:35:12 +00:00
|
|
|
VIR_DEBUG("Requested TSC frequency falls outside tolerance range and "
|
|
|
|
"scaling support is unknown, QEMU will try and possibly "
|
|
|
|
"fail later");
|
2019-05-30 22:03:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2020-05-25 09:35:12 +00:00
|
|
|
_("Requested TSC frequency %llu Hz is outside tolerance "
|
|
|
|
"range ([%llu, %llu] Hz) around host frequency %llu Hz "
|
|
|
|
"and TSC scaling is not supported by the host CPU"),
|
|
|
|
freq, minFreq, maxFreq, tsc->frequency);
|
2019-05-30 22:03:59 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-04 14:25:29 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessStartValidate:
|
|
|
|
* @vm: domain object
|
|
|
|
* @qemuCaps: emulator capabilities
|
|
|
|
* @migration: restoration of existing state
|
|
|
|
*
|
2016-03-17 12:51:20 +00:00
|
|
|
* This function aggregates checks done prior to start of a VM.
|
|
|
|
*
|
|
|
|
* Flag VIR_QEMU_PROCESS_START_PRETEND tells, that we don't want to actually
|
|
|
|
* start the domain but create a valid qemu command. If some code shouldn't be
|
|
|
|
* executed in this case, make sure to check this flag.
|
2016-02-04 14:25:29 +00:00
|
|
|
*/
|
2016-05-27 11:42:24 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartValidate(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
virQEMUCaps *qemuCaps,
|
2016-03-17 12:51:20 +00:00
|
|
|
unsigned int flags)
|
2016-02-04 14:25:29 +00:00
|
|
|
{
|
2016-03-17 12:51:20 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_PRETEND)) {
|
|
|
|
if (vm->def->virtType == VIR_DOMAIN_VIRT_KVM) {
|
|
|
|
VIR_DEBUG("Checking for KVM availability");
|
|
|
|
if (!virFileExists("/dev/kvm")) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Domain requires KVM, but it is not available. "
|
|
|
|
"Check that virtualization is enabled in the "
|
|
|
|
"host BIOS, and host configuration is setup to "
|
|
|
|
"load the kvm modules."));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Checking domain and device security labels");
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityCheckAllLabel(driver->securityManager, vm->def) < 0)
|
2016-03-17 12:51:20 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-01-31 11:55:47 +00:00
|
|
|
if (virDomainDefValidate(vm->def, 0, driver->xmlopt, qemuCaps) < 0)
|
2016-02-04 14:25:29 +00:00
|
|
|
return -1;
|
|
|
|
|
2016-10-10 17:55:17 +00:00
|
|
|
if (qemuProcessStartValidateGraphics(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-02-12 03:14:32 +00:00
|
|
|
if (qemuProcessStartValidateIOThreads(vm, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-02-01 16:14:00 +00:00
|
|
|
if (qemuProcessStartValidateShmem(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-06-19 19:59:49 +00:00
|
|
|
if (vm->def->cpu) {
|
|
|
|
if (virCPUValidateFeatures(vm->def->os.arch, vm->def->cpu) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (ARCH_IS_X86(vm->def->os.arch) &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_CPU_UNAVAILABLE_FEATURES)) {
|
2020-12-01 08:21:32 +00:00
|
|
|
g_auto(GStrv) features = NULL;
|
2019-06-19 19:59:49 +00:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if ((n = virCPUDefCheckFeatures(vm->def->cpu,
|
|
|
|
virCPUx86FeatureFilterSelectMSR,
|
|
|
|
NULL,
|
|
|
|
&features)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (n > 0) {
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *str = NULL;
|
2019-06-19 19:59:49 +00:00
|
|
|
|
2021-02-05 20:33:47 +00:00
|
|
|
str = g_strjoinv(", ", features);
|
2019-06-19 19:59:49 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("Some features cannot be reliably used "
|
|
|
|
"with this QEMU: %s"), str);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-09-14 14:07:59 +00:00
|
|
|
|
qemu: Add qemu command line generation for a VxHS block device
The VxHS block device will only use the newer formatting options and
avoid the legacy URI syntax.
An excerpt for a sample QEMU command line is:
-drive file.driver=vxhs,file.vdisk-id=eb90327c-8302-4725-9e1b-4e85ed4dc251,\
file.server.type=tcp,file.server.host=192.168.0.1,\
file.server.port=9999,format=raw,if=none,id=drive-virtio-disk0,cache=none \
-device virtio-blk-pci,bus=pci.0,addr=0x4,drive=drive-virtio-disk0,\
id=virtio-disk0
Update qemuxml2argvtest with a simple test.
Signed-off-by: Ashish Mittal <Ashish.Mittal@veritas.com>
Signed-off-by: John Ferlan <jferlan@redhat.com>
2017-08-30 13:46:53 +00:00
|
|
|
if (qemuProcessStartValidateDisks(vm, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (qemuProcessStartValidateTSC(driver, vm) < 0)
|
2019-05-30 22:03:59 +00:00
|
|
|
return -1;
|
|
|
|
|
2016-03-17 12:51:20 +00:00
|
|
|
VIR_DEBUG("Checking for any possible (non-fatal) issues");
|
|
|
|
|
2016-04-20 20:44:25 +00:00
|
|
|
qemuProcessStartWarnShmem(vm);
|
2016-03-17 12:51:20 +00:00
|
|
|
|
2016-10-10 17:55:17 +00:00
|
|
|
return 0;
|
2016-02-04 14:25:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-17 15:55:52 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessStartUpdateCustomCaps(virDomainObj *vm)
|
2019-06-17 15:55:52 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainXmlNsDef *nsdef = vm->def->namespaceData;
|
2019-06-18 08:04:32 +00:00
|
|
|
char **next;
|
2019-06-17 15:55:52 +00:00
|
|
|
int tmp;
|
|
|
|
|
2019-06-18 08:04:32 +00:00
|
|
|
if (cfg->capabilityfilters) {
|
|
|
|
for (next = cfg->capabilityfilters; *next; next++) {
|
|
|
|
if ((tmp = virQEMUCapsTypeFromString(*next)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("invalid capability_filters capability '%s'"),
|
|
|
|
*next);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
virQEMUCapsClear(priv->qemuCaps, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-17 15:55:52 +00:00
|
|
|
if (nsdef) {
|
2021-06-21 12:54:24 +00:00
|
|
|
for (next = nsdef->capsadd; next && *next; next++) {
|
|
|
|
if ((tmp = virQEMUCapsTypeFromString(*next)) < 0) {
|
2019-06-17 15:55:52 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("invalid qemu namespace capability '%s'"),
|
2021-06-21 12:54:24 +00:00
|
|
|
*next);
|
2019-06-17 15:55:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
virQEMUCapsSet(priv->qemuCaps, tmp);
|
|
|
|
}
|
|
|
|
|
2021-06-21 12:54:24 +00:00
|
|
|
for (next = nsdef->capsdel; next && *next; next++) {
|
|
|
|
if ((tmp = virQEMUCapsTypeFromString(*next)) < 0) {
|
2019-06-17 15:55:52 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("invalid qemu namespace capability '%s'"),
|
2021-06-21 12:54:24 +00:00
|
|
|
*next);
|
2019-06-17 15:55:52 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
virQEMUCapsClear(priv->qemuCaps, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-24 08:16:11 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessPrepareQEMUCaps:
|
|
|
|
* @vm: domain object
|
|
|
|
* @qemuCapsCache: cache of QEMU capabilities
|
|
|
|
*
|
|
|
|
* Prepare the capabilities of a QEMU process for startup. This includes
|
|
|
|
* copying the caps to a static cache and potential post-processing depending
|
|
|
|
* on the configuration of the VM and startup process.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareQEMUCaps(virDomainObj *vm,
|
2022-01-04 14:13:45 +00:00
|
|
|
virFileCache *qemuCapsCache)
|
2019-11-24 08:16:11 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-11-24 08:16:11 +00:00
|
|
|
|
|
|
|
virObjectUnref(priv->qemuCaps);
|
|
|
|
if (!(priv->qemuCaps = virQEMUCapsCacheLookupCopy(qemuCapsCache,
|
|
|
|
vm->def->virtType,
|
|
|
|
vm->def->emulator,
|
|
|
|
vm->def->os.machine)))
|
|
|
|
return -1;
|
|
|
|
|
2019-11-24 08:48:30 +00:00
|
|
|
/* Update qemu capabilities according to lists passed in via namespace */
|
|
|
|
if (qemuProcessStartUpdateCustomCaps(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-11-25 12:01:13 +00:00
|
|
|
/* re-process capability lockouts since we might have removed capabilities */
|
|
|
|
virQEMUCapsInitProcessCapsInterlock(priv->qemuCaps);
|
|
|
|
|
2019-11-24 08:16:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 15:58:12 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessInit:
|
|
|
|
*
|
|
|
|
* Prepares the domain up to the point when priv->qemuCaps is initialized. The
|
|
|
|
* function calls qemuProcessStop when needed.
|
|
|
|
*
|
2016-03-22 12:17:27 +00:00
|
|
|
* Flag VIR_QEMU_PROCESS_START_PRETEND tells, that we don't want to actually
|
|
|
|
* start the domain but create a valid qemu command. If some code shouldn't be
|
|
|
|
* executed in this case, make sure to check this flag.
|
|
|
|
*
|
2015-11-10 15:58:12 +00:00
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessInit(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
virCPUDef *updatedCPU,
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2016-02-04 14:25:29 +00:00
|
|
|
bool migration,
|
2016-03-17 12:51:20 +00:00
|
|
|
unsigned int flags)
|
2015-11-10 15:58:12 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2015-11-10 15:58:12 +00:00
|
|
|
int stopFlags;
|
2021-03-11 07:16:13 +00:00
|
|
|
virCPUDef *origCPU = NULL;
|
2015-11-10 15:58:12 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%p name=%s id=%d migration=%d",
|
|
|
|
vm, vm->def->name, vm->def->id, migration);
|
|
|
|
|
|
|
|
VIR_DEBUG("Beginning VM startup process");
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("VM is already active"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2017-08-16 14:00:25 +00:00
|
|
|
/* in case when the post parse callback failed we need to re-run it on the
|
|
|
|
* old config prior we start the VM */
|
|
|
|
if (vm->def->postParseFailed) {
|
|
|
|
VIR_DEBUG("re-running the post parse callback");
|
|
|
|
|
2019-11-23 08:43:35 +00:00
|
|
|
/* we don't have the private copy of qemuCaps at this point */
|
2019-11-27 12:08:10 +00:00
|
|
|
if (virDomainDefPostParse(vm->def, 0, driver->xmlopt, NULL) < 0)
|
2017-08-16 14:00:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-02-04 13:48:57 +00:00
|
|
|
VIR_DEBUG("Determining emulator version");
|
2022-01-04 14:13:45 +00:00
|
|
|
if (qemuProcessPrepareQEMUCaps(vm, driver->qemuCapsCache) < 0)
|
2016-02-04 13:48:57 +00:00
|
|
|
goto cleanup;
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuDomainUpdateCPU(vm, updatedCPU, &origCPU) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (qemuProcessStartValidate(driver, vm, priv->qemuCaps, flags) < 0)
|
2016-02-04 14:25:29 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-11-10 15:58:12 +00:00
|
|
|
/* Do this upfront, so any part of the startup process can add
|
|
|
|
* runtime state to vm->def that won't be persisted. This let's us
|
|
|
|
* report implicit runtime defaults in the XML, like vnc listen/socket
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Setting current domain def as transient");
|
2019-11-27 12:41:59 +00:00
|
|
|
if (virDomainObjSetDefTransient(driver->xmlopt, vm, priv->qemuCaps) < 0)
|
2017-02-23 09:44:08 +00:00
|
|
|
goto cleanup;
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2017-02-23 09:44:08 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_PRETEND) {
|
2017-02-23 16:10:55 +00:00
|
|
|
if (qemuDomainSetPrivatePaths(driver, vm) < 0) {
|
|
|
|
virDomainObjRemoveTransientDef(vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2017-02-23 09:44:08 +00:00
|
|
|
} else {
|
2016-03-22 12:17:27 +00:00
|
|
|
vm->def->id = qemuDriverAllocateID(driver);
|
2021-12-14 15:36:15 +00:00
|
|
|
qemuDomainSetFakeReboot(vm, false);
|
2016-03-22 12:17:27 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_STARTING_UP);
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2020-01-31 16:04:24 +00:00
|
|
|
if (g_atomic_int_add(&driver->nactive, 1) == 0 && driver->inhibitCallback)
|
2016-03-22 12:17:27 +00:00
|
|
|
driver->inhibitCallback(true, driver->inhibitOpaque);
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2016-03-22 12:17:27 +00:00
|
|
|
/* Run an early hook to set-up missing devices */
|
|
|
|
if (qemuProcessStartHook(driver, vm,
|
|
|
|
VIR_HOOK_QEMU_OP_PREPARE,
|
|
|
|
VIR_HOOK_SUBOP_BEGIN) < 0)
|
|
|
|
goto stop;
|
2015-11-10 15:58:12 +00:00
|
|
|
|
2017-02-23 09:44:08 +00:00
|
|
|
if (qemuDomainSetPrivatePaths(driver, vm) < 0)
|
|
|
|
goto stop;
|
2017-05-31 10:34:10 +00:00
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
priv->origCPU = g_steal_pointer(&origCPU);
|
2017-02-23 09:44:08 +00:00
|
|
|
}
|
2016-03-14 09:31:28 +00:00
|
|
|
|
2015-11-10 15:58:12 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2017-05-31 10:34:10 +00:00
|
|
|
virCPUDefFree(origCPU);
|
2015-11-10 15:58:12 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
stop:
|
|
|
|
stopFlags = VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
|
|
|
if (migration)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, asyncJob, stopFlags);
|
2015-11-10 15:58:12 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-15 17:01:21 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessNetworkPrepareDevices
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessNetworkPrepareDevices(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2016-02-15 17:01:21 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-02-15 17:01:21 +00:00
|
|
|
size_t i;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virConnect) conn = NULL;
|
2016-02-15 17:01:21 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainNetDef *net = def->nets[i];
|
2016-09-23 15:04:53 +00:00
|
|
|
virDomainNetType actualType;
|
2016-02-15 17:01:21 +00:00
|
|
|
|
|
|
|
/* If appropriate, grab a physical device from the configured
|
|
|
|
* network's pool of devices, or resolve bridge device name
|
|
|
|
* to the one defined in the network definition.
|
|
|
|
*/
|
2018-07-26 14:32:04 +00:00
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_NETWORK) {
|
|
|
|
if (!conn && !(conn = virGetConnectNetwork()))
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-07-26 14:32:04 +00:00
|
|
|
if (virDomainNetAllocateActualDevice(conn, def, net) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-07-26 14:32:04 +00:00
|
|
|
}
|
2016-02-15 17:01:21 +00:00
|
|
|
|
|
|
|
actualType = virDomainNetGetActualType(net);
|
|
|
|
if (actualType == VIR_DOMAIN_NET_TYPE_HOSTDEV &&
|
|
|
|
net->type == VIR_DOMAIN_NET_TYPE_NETWORK) {
|
|
|
|
/* Each type='hostdev' network device must also have a
|
|
|
|
* corresponding entry in the hostdevs array. For netdevs
|
|
|
|
* that are hardcoded as type='hostdev', this is already
|
|
|
|
* done by the parser, but for those allocated from a
|
|
|
|
* network / determined at runtime, we need to do it
|
|
|
|
* separately.
|
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainHostdevDef *hostdev = virDomainNetGetActualHostdev(net);
|
|
|
|
virDomainHostdevSubsysPCI *pcisrc = &hostdev->source.subsys.u.pci;
|
2016-02-15 17:01:21 +00:00
|
|
|
|
|
|
|
if (virDomainHostdevFind(def, hostdev, NULL) >= 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("PCI device %04x:%02x:%02x.%x "
|
|
|
|
"allocated from network %s is already "
|
|
|
|
"in use by domain %s"),
|
|
|
|
pcisrc->addr.domain, pcisrc->addr.bus,
|
|
|
|
pcisrc->addr.slot, pcisrc->addr.function,
|
|
|
|
net->data.network.name, def->name);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-02-15 17:01:21 +00:00
|
|
|
}
|
|
|
|
if (virDomainHostdevInsert(def, hostdev) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2019-08-08 14:55:12 +00:00
|
|
|
} else if (actualType == VIR_DOMAIN_NET_TYPE_USER &&
|
|
|
|
!priv->disableSlirp &&
|
2019-09-10 08:12:24 +00:00
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DBUS_VMSTATE)) {
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSlirp *slirp = NULL;
|
2020-10-15 11:12:15 +00:00
|
|
|
int rv = qemuInterfacePrepareSlirp(driver, net, &slirp);
|
2019-08-08 14:55:12 +00:00
|
|
|
|
2020-10-15 11:12:15 +00:00
|
|
|
if (rv == -1)
|
|
|
|
return -1;
|
|
|
|
if (rv == 1)
|
|
|
|
QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp = slirp;
|
2019-08-08 14:55:12 +00:00
|
|
|
}
|
|
|
|
|
2016-02-15 17:01:21 +00:00
|
|
|
}
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2016-02-15 17:01:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-13 15:36:52 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessSetupVcpu:
|
|
|
|
* @vm: domain object
|
|
|
|
* @vcpuid: id of VCPU to set defaults
|
|
|
|
*
|
|
|
|
* This function sets resource properties (cgroups, affinity, scheduler) for a
|
|
|
|
* vCPU. This function expects that the vCPU is online and the vCPU pids were
|
|
|
|
* correctly detected at the point when it's called.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupVcpu(virDomainObj *vm,
|
2016-01-13 15:36:52 +00:00
|
|
|
unsigned int vcpuid)
|
|
|
|
{
|
|
|
|
pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid);
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
|
|
|
|
virDomainResctrlMonDef *mon = NULL;
|
2017-11-10 12:21:51 +00:00
|
|
|
size_t i = 0;
|
|
|
|
|
|
|
|
if (qemuProcessSetupPid(vm, vcpupid, VIR_CGROUP_THREAD_VCPU,
|
|
|
|
vcpuid, vcpu->cpumask,
|
|
|
|
vm->def->cputune.period,
|
|
|
|
vm->def->cputune.quota,
|
|
|
|
&vcpu->sched) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-07-30 03:12:35 +00:00
|
|
|
for (i = 0; i < vm->def->nresctrls; i++) {
|
2018-11-12 13:31:45 +00:00
|
|
|
size_t j = 0;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainResctrlDef *ct = vm->def->resctrls[i];
|
2017-11-10 12:21:51 +00:00
|
|
|
|
|
|
|
if (virBitmapIsBitSet(ct->vcpus, vcpuid)) {
|
|
|
|
if (virResctrlAllocAddPID(ct->alloc, vcpupid) < 0)
|
|
|
|
return -1;
|
2018-11-12 13:31:45 +00:00
|
|
|
|
|
|
|
for (j = 0; j < ct->nmonitors; j++) {
|
|
|
|
mon = ct->monitors[j];
|
|
|
|
|
2019-06-11 03:31:06 +00:00
|
|
|
if (virBitmapEqual(ct->vcpus, mon->vcpus) &&
|
|
|
|
!virResctrlAllocIsEmpty(ct->alloc))
|
2018-11-12 13:31:45 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (virBitmapIsBitSet(mon->vcpus, vcpuid)) {
|
|
|
|
if (virResctrlMonitorAddPID(mon->instance, vcpupid) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-10 12:21:51 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-01-13 15:36:52 +00:00
|
|
|
|
2017-11-10 12:21:51 +00:00
|
|
|
return 0;
|
2016-01-13 15:36:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupVcpus(virDomainObj *vm)
|
2016-01-13 15:36:52 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpu;
|
2016-01-13 15:36:52 +00:00
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if ((vm->def->cputune.period || vm->def->cputune.quota) &&
|
2021-03-11 07:16:13 +00:00
|
|
|
!virCgroupHasController(((qemuDomainObjPrivate *) vm->privateData)->cgroup,
|
2016-01-13 15:36:52 +00:00
|
|
|
VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuDomainHasVcpuPids(vm)) {
|
|
|
|
/* If any CPU has custom affinity that differs from the
|
|
|
|
* VM default affinity, we must reject it */
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
|
|
|
|
if (!vcpu->online)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vcpu->cpumask &&
|
|
|
|
!virBitmapEqual(vm->def->cpumask, vcpu->cpumask)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cpu affinity is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
|
|
|
|
if (!vcpu->online)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuProcessSetupVcpu(vm, i) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-14 09:38:02 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupIOThread(virDomainObj *vm,
|
|
|
|
virDomainIOThreadIDDef *iothread)
|
2016-01-14 09:38:02 +00:00
|
|
|
{
|
2016-06-21 15:33:06 +00:00
|
|
|
return qemuProcessSetupPid(vm, iothread->thread_id,
|
|
|
|
VIR_CGROUP_THREAD_IOTHREAD,
|
2016-01-14 09:38:02 +00:00
|
|
|
iothread->iothread_id,
|
2016-06-21 15:33:06 +00:00
|
|
|
iothread->cpumask,
|
2016-07-25 11:07:43 +00:00
|
|
|
vm->def->cputune.iothread_period,
|
|
|
|
vm->def->cputune.iothread_quota,
|
2016-06-21 15:33:06 +00:00
|
|
|
&iothread->sched);
|
2016-01-14 09:38:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupIOThreads(virDomainObj *vm)
|
2016-01-14 09:38:02 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->niothreadids; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainIOThreadIDDef *info = vm->def->iothreadids[i];
|
2016-01-14 09:38:02 +00:00
|
|
|
|
|
|
|
if (qemuProcessSetupIOThread(vm, info) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessValidateHotpluggableVcpus(virDomainDef *def)
|
2016-08-04 12:36:24 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpu;
|
|
|
|
virDomainVcpuDef *subvcpu;
|
|
|
|
qemuDomainVcpuPrivate *vcpupriv;
|
2016-08-04 12:36:24 +00:00
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
size_t i = 0;
|
|
|
|
size_t j;
|
2021-12-07 16:22:26 +00:00
|
|
|
g_autoptr(virBitmap) ordermap = virBitmapNew(maxvcpus + 1);
|
2016-08-04 12:36:24 +00:00
|
|
|
|
|
|
|
/* validate:
|
|
|
|
* - all hotpluggable entities to be hotplugged have the correct data
|
|
|
|
* - vcpus belonging to a hotpluggable entity share configuration
|
|
|
|
* - order of the hotpluggable entities is unique
|
|
|
|
*/
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
|
|
|
|
|
|
|
/* skip over hotpluggable entities */
|
|
|
|
if (vcpupriv->vcpus == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (vcpu->order != 0) {
|
2016-09-14 04:50:00 +00:00
|
|
|
if (virBitmapIsBitSet(ordermap, vcpu->order)) {
|
2016-08-04 12:36:24 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2016-09-14 03:58:33 +00:00
|
|
|
_("duplicate vcpu order '%u'"), vcpu->order);
|
2021-12-07 16:22:26 +00:00
|
|
|
return -1;
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
|
2016-09-16 13:51:14 +00:00
|
|
|
if (virBitmapSetBit(ordermap, vcpu->order)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("vcpu order '%u' exceeds vcpu count"),
|
|
|
|
vcpu->order);
|
2021-12-07 16:22:26 +00:00
|
|
|
return -1;
|
2016-09-16 13:51:14 +00:00
|
|
|
}
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (j = i + 1; j < (i + vcpupriv->vcpus); j++) {
|
|
|
|
subvcpu = virDomainDefGetVcpu(def, j);
|
|
|
|
if (subvcpu->hotpluggable != vcpu->hotpluggable ||
|
|
|
|
subvcpu->online != vcpu->online ||
|
|
|
|
subvcpu->order != vcpu->order) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("vcpus '%zu' and '%zu' are in the same hotplug "
|
|
|
|
"group but differ in configuration"), i, j);
|
2021-12-07 16:22:26 +00:00
|
|
|
return -1;
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->online && vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
if ((vcpupriv->socket_id == -1 && vcpupriv->core_id == -1 &&
|
2017-06-27 14:04:38 +00:00
|
|
|
vcpupriv->thread_id == -1 && vcpupriv->node_id == -1) ||
|
2016-08-04 12:36:24 +00:00
|
|
|
!vcpupriv->type) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("vcpu '%zu' is missing hotplug data"), i);
|
2021-12-07 16:22:26 +00:00
|
|
|
return -1;
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-07 16:22:26 +00:00
|
|
|
return 0;
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainHasHotpluggableStartupVcpus(virDomainDef *def)
|
2016-08-04 12:36:24 +00:00
|
|
|
{
|
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(def);
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpu;
|
2016-08-04 12:36:24 +00:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
|
|
|
|
if (vcpu->online && vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessVcpusSortOrder(const void *a,
|
|
|
|
const void *b)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpua = *((virDomainVcpuDef **)a);
|
|
|
|
virDomainVcpuDef *vcpub = *((virDomainVcpuDef **)b);
|
2016-08-04 12:36:24 +00:00
|
|
|
|
|
|
|
return vcpua->order - vcpub->order;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupHotpluggableVcpus(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2016-08-04 12:36:24 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2022-01-25 16:19:53 +00:00
|
|
|
virCgroupEmulatorAllNodesData *emulatorCgroup = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpu;
|
|
|
|
qemuDomainVcpuPrivate *vcpupriv;
|
2016-08-04 12:36:24 +00:00
|
|
|
size_t i;
|
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
|
|
|
|
2021-08-04 09:33:35 +00:00
|
|
|
g_autofree virDomainVcpuDef **bootHotplug = NULL;
|
2016-08-04 12:36:24 +00:00
|
|
|
size_t nbootHotplug = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
|
|
|
|
|
|
|
if (vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES && vcpu->online &&
|
|
|
|
vcpupriv->vcpus != 0) {
|
2019-10-22 13:26:14 +00:00
|
|
|
vcpupriv->alias = g_strdup_printf("vcpu%zu", i);
|
2016-08-04 12:36:24 +00:00
|
|
|
|
2021-08-03 12:14:20 +00:00
|
|
|
VIR_APPEND_ELEMENT(bootHotplug, nbootHotplug, vcpu);
|
2016-08-04 12:36:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-04 09:33:35 +00:00
|
|
|
if (nbootHotplug == 0)
|
|
|
|
return 0;
|
2016-08-04 12:36:24 +00:00
|
|
|
|
|
|
|
qsort(bootHotplug, nbootHotplug, sizeof(*bootHotplug),
|
|
|
|
qemuProcessVcpusSortOrder);
|
|
|
|
|
2022-01-25 16:19:53 +00:00
|
|
|
if (virDomainCgroupEmulatorAllNodesAllow(priv->cgroup, &emulatorCgroup) < 0)
|
2016-09-07 11:20:00 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
for (i = 0; i < nbootHotplug; i++) {
|
2021-08-04 09:33:35 +00:00
|
|
|
g_autoptr(virJSONValue) vcpuprops = NULL;
|
2016-08-04 12:36:24 +00:00
|
|
|
vcpu = bootHotplug[i];
|
|
|
|
|
|
|
|
if (!(vcpuprops = qemuBuildHotpluggableCPUProps(vcpu)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2021-03-24 10:07:56 +00:00
|
|
|
rc = qemuMonitorAddDeviceProps(qemuDomainGetMonitor(vm), &vcpuprops);
|
2016-08-04 12:36:24 +00:00
|
|
|
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2016-08-04 12:36:24 +00:00
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2022-01-25 16:19:53 +00:00
|
|
|
virDomainCgroupEmulatorAllNodesRestore(emulatorCgroup);
|
2016-08-04 12:36:24 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-18 12:33:19 +00:00
|
|
|
static bool
|
|
|
|
qemuProcessDropUnknownCPUFeatures(const char *name,
|
|
|
|
virCPUFeaturePolicy policy,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
const char **features = opaque;
|
|
|
|
|
|
|
|
if (policy != VIR_CPU_FEATURE_DISABLE &&
|
|
|
|
policy != VIR_CPU_FEATURE_FORBID)
|
|
|
|
return true;
|
|
|
|
|
2021-02-03 17:39:47 +00:00
|
|
|
if (g_strv_contains(features, name))
|
2019-10-18 12:33:19 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Features unknown to QEMU are implicitly disabled, we can just drop them
|
|
|
|
* from the definition. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-22 13:53:48 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateGuestCPU(virDomainDef *def,
|
|
|
|
virQEMUCaps *qemuCaps,
|
2019-11-26 17:51:22 +00:00
|
|
|
virArch hostarch,
|
2016-06-22 13:53:48 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
if (!def->cpu)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* nothing to do if only topology part of CPU def is used */
|
|
|
|
if (def->cpu->mode == VIR_CPU_MODE_CUSTOM && !def->cpu->model)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Old libvirt added host CPU model to host-model CPUs for migrations,
|
|
|
|
* while new libvirt just turns host-model into custom mode. We need
|
|
|
|
* to fix the mode to maintain backward compatibility and to avoid
|
|
|
|
* the CPU model to be replaced in virCPUUpdate.
|
|
|
|
*/
|
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_NEW) &&
|
|
|
|
ARCH_IS_X86(def->os.arch) &&
|
|
|
|
def->cpu->mode == VIR_CPU_MODE_HOST_MODEL &&
|
|
|
|
def->cpu->model) {
|
|
|
|
def->cpu->mode = VIR_CPU_MODE_CUSTOM;
|
|
|
|
}
|
|
|
|
|
2019-11-26 17:51:22 +00:00
|
|
|
if (!virQEMUCapsIsCPUModeSupported(qemuCaps, hostarch, def->virtType,
|
2020-02-05 14:51:09 +00:00
|
|
|
def->cpu->mode, def->os.machine)) {
|
2016-06-22 13:53:48 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("CPU mode '%s' for %s %s domain on %s host is not "
|
|
|
|
"supported by hypervisor"),
|
|
|
|
virCPUModeTypeToString(def->cpu->mode),
|
|
|
|
virArchToString(def->os.arch),
|
|
|
|
virDomainVirtTypeToString(def->virtType),
|
2019-11-26 17:51:22 +00:00
|
|
|
virArchToString(hostarch));
|
2016-06-22 13:53:48 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-11-26 17:51:22 +00:00
|
|
|
if (virCPUConvertLegacy(hostarch, def->cpu) < 0)
|
2016-11-09 16:09:48 +00:00
|
|
|
return -1;
|
|
|
|
|
2021-03-23 10:01:55 +00:00
|
|
|
if (def->cpu->check != VIR_CPU_CHECK_NONE) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virCPUDef *host;
|
2021-03-23 10:01:55 +00:00
|
|
|
|
|
|
|
host = virQEMUCapsGetHostModel(qemuCaps, def->virtType,
|
|
|
|
VIR_QEMU_CAPS_HOST_CPU_FULL);
|
|
|
|
|
|
|
|
if (host && virCPUCheckForbiddenFeatures(def->cpu, host) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-02-05 18:35:10 +00:00
|
|
|
/* nothing to update for host-passthrough / maximum */
|
|
|
|
if (def->cpu->mode != VIR_CPU_MODE_HOST_PASSTHROUGH &&
|
|
|
|
def->cpu->mode != VIR_CPU_MODE_MAXIMUM) {
|
2019-09-20 20:41:34 +00:00
|
|
|
g_autoptr(virDomainCapsCPUModels) cpuModels = NULL;
|
|
|
|
|
2019-06-06 12:51:14 +00:00
|
|
|
if (def->cpu->check == VIR_CPU_CHECK_PARTIAL &&
|
2019-11-26 17:51:22 +00:00
|
|
|
virCPUCompare(hostarch,
|
2019-06-06 12:51:14 +00:00
|
|
|
virQEMUCapsGetHostModel(qemuCaps, def->virtType,
|
|
|
|
VIR_QEMU_CAPS_HOST_CPU_FULL),
|
|
|
|
def->cpu, true) < 0)
|
|
|
|
return -1;
|
2016-06-22 13:53:48 +00:00
|
|
|
|
2019-06-06 12:51:14 +00:00
|
|
|
if (virCPUUpdate(def->os.arch, def->cpu,
|
|
|
|
virQEMUCapsGetHostModel(qemuCaps, def->virtType,
|
|
|
|
VIR_QEMU_CAPS_HOST_CPU_MIGRATABLE)) < 0)
|
|
|
|
return -1;
|
2016-06-22 13:53:48 +00:00
|
|
|
|
2019-10-09 08:14:59 +00:00
|
|
|
cpuModels = virQEMUCapsGetCPUModels(qemuCaps, def->virtType, NULL, NULL);
|
2019-09-20 20:41:34 +00:00
|
|
|
|
|
|
|
if (virCPUTranslate(def->os.arch, def->cpu, cpuModels) < 0)
|
2019-06-06 12:51:14 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
def->cpu->fallback = VIR_CPU_FALLBACK_FORBID;
|
|
|
|
}
|
2016-06-22 13:53:48 +00:00
|
|
|
|
2019-06-06 12:51:14 +00:00
|
|
|
if (virCPUDefFilterFeatures(def->cpu, virQEMUCapsCPUFilterFeatures,
|
|
|
|
&def->os.arch) < 0)
|
2019-06-06 10:33:43 +00:00
|
|
|
return -1;
|
2016-06-22 13:53:48 +00:00
|
|
|
|
2019-10-18 12:33:19 +00:00
|
|
|
if (ARCH_IS_X86(def->os.arch)) {
|
2020-12-01 08:21:32 +00:00
|
|
|
g_auto(GStrv) features = NULL;
|
2019-10-18 12:33:19 +00:00
|
|
|
|
|
|
|
if (virQEMUCapsGetCPUFeatures(qemuCaps, def->virtType, false, &features) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (features &&
|
|
|
|
virCPUDefFilterFeatures(def->cpu, qemuProcessDropUnknownCPUFeatures,
|
|
|
|
features) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-06-06 10:33:43 +00:00
|
|
|
return 0;
|
2016-06-22 13:53:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-12 07:06:42 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareDomainNUMAPlacement(virDomainObj *vm)
|
2017-07-12 07:06:42 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-11-29 10:40:39 +00:00
|
|
|
g_autofree char *nodeset = NULL;
|
|
|
|
g_autoptr(virBitmap) numadNodeset = NULL;
|
|
|
|
g_autoptr(virBitmap) hostMemoryNodeset = NULL;
|
|
|
|
g_autoptr(virCapsHostNUMA) caps = NULL;
|
2017-07-12 07:06:42 +00:00
|
|
|
|
|
|
|
/* Get the advisory nodeset from numad if 'placement' of
|
|
|
|
* either <vcpu> or <numatune> is 'auto'.
|
|
|
|
*/
|
|
|
|
if (!virDomainDefNeedsPlacementAdvice(vm->def))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
nodeset = virNumaGetAutoPlacementAdvice(virDomainDefGetVcpus(vm->def),
|
|
|
|
virDomainDefGetMemoryTotal(vm->def));
|
|
|
|
|
|
|
|
if (!nodeset)
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
2017-07-12 07:06:42 +00:00
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
if (!(hostMemoryNodeset = virNumaGetHostMemoryNodeset()))
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
2017-07-12 11:59:35 +00:00
|
|
|
|
2017-07-12 07:06:42 +00:00
|
|
|
VIR_DEBUG("Nodeset returned from numad: %s", nodeset);
|
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
if (virBitmapParse(nodeset, &numadNodeset, VIR_DOMAIN_CPUMASK_LEN) < 0)
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
|
|
|
|
2020-12-02 08:26:30 +00:00
|
|
|
if (!(caps = virCapabilitiesHostNUMANewHost()))
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
2017-07-12 07:06:42 +00:00
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
/* numad may return a nodeset that only contains cpus but cgroups don't play
|
|
|
|
* well with that. Set the autoCpuset from all cpus from that nodeset, but
|
|
|
|
* assign autoNodeset only with nodes containing memory. */
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!(priv->autoCpuset = virCapabilitiesHostNUMAGetCpus(caps, numadNodeset)))
|
|
|
|
return -1;
|
2017-07-12 07:06:42 +00:00
|
|
|
|
2017-07-12 11:59:35 +00:00
|
|
|
virBitmapIntersect(numadNodeset, hostMemoryNodeset);
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
priv->autoNodeset = g_steal_pointer(&numadNodeset);
|
2017-07-12 11:59:35 +00:00
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
return 0;
|
2017-07-12 07:06:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-04-30 06:58:45 +00:00
|
|
|
static void
|
2021-10-01 13:22:17 +00:00
|
|
|
qemuProcessPrepareDeviceBootorder(virDomainDef *def)
|
2021-04-30 06:58:45 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
unsigned int bootCD = 0;
|
|
|
|
unsigned int bootFloppy = 0;
|
|
|
|
unsigned int bootDisk = 0;
|
2021-10-01 14:18:13 +00:00
|
|
|
unsigned int bootNetwork = 0;
|
2021-04-30 06:58:45 +00:00
|
|
|
|
2021-10-01 13:16:57 +00:00
|
|
|
if (def->os.nBootDevs == 0)
|
|
|
|
return;
|
|
|
|
|
2021-04-30 06:58:45 +00:00
|
|
|
for (i = 0; i < def->os.nBootDevs; i++) {
|
2022-01-20 20:57:49 +00:00
|
|
|
switch (def->os.bootDevs[i]) {
|
2021-04-30 06:58:45 +00:00
|
|
|
case VIR_DOMAIN_BOOT_CDROM:
|
|
|
|
bootCD = i + 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_BOOT_FLOPPY:
|
|
|
|
bootFloppy = i + 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_BOOT_DISK:
|
|
|
|
bootDisk = i + 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_BOOT_NET:
|
2021-10-01 14:18:13 +00:00
|
|
|
bootNetwork = i + 1;
|
|
|
|
break;
|
|
|
|
|
2021-04-30 06:58:45 +00:00
|
|
|
case VIR_DOMAIN_BOOT_LAST:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
|
|
|
virDomainDiskDef *disk = def->disks[i];
|
|
|
|
|
|
|
|
switch (disk->device) {
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_CDROM:
|
2021-10-01 13:16:57 +00:00
|
|
|
disk->info.effectiveBootIndex = bootCD;
|
2021-04-30 06:58:45 +00:00
|
|
|
bootCD = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_DISK:
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_LUN:
|
2021-10-01 13:16:57 +00:00
|
|
|
disk->info.effectiveBootIndex = bootDisk;
|
2021-04-30 06:58:45 +00:00
|
|
|
bootDisk = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_FLOPPY:
|
2021-10-01 13:16:57 +00:00
|
|
|
disk->info.effectiveBootIndex = bootFloppy;
|
2021-04-30 06:58:45 +00:00
|
|
|
bootFloppy = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_LAST:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-10-01 14:18:13 +00:00
|
|
|
|
|
|
|
if (def->nnets > 0 && bootNetwork > 0) {
|
|
|
|
/* If network boot is enabled, the first network device gets enabled. If
|
|
|
|
* that one is backed by a host device, then we need to find the first
|
|
|
|
* corresponding host device */
|
|
|
|
if (virDomainNetGetActualType(def->nets[0]) == VIR_DOMAIN_NET_TYPE_HOSTDEV) {
|
|
|
|
for (i = 0; i < def->nhostdevs; i++) {
|
|
|
|
virDomainHostdevDef *hostdev = def->hostdevs[i];
|
|
|
|
virDomainHostdevSubsys *subsys = &hostdev->source.subsys;
|
|
|
|
|
|
|
|
if (hostdev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS &&
|
|
|
|
subsys->type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI &&
|
|
|
|
hostdev->info->type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_UNASSIGNED &&
|
|
|
|
hostdev->parentnet) {
|
|
|
|
hostdev->info->effectiveBootIndex = bootNetwork;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
def->nets[0]->info.effectiveBootIndex = bootNetwork;
|
|
|
|
}
|
|
|
|
}
|
2021-04-30 06:58:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-03 10:38:23 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareDomainStorage(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
qemuDomainObjPrivate *priv,
|
|
|
|
virQEMUDriverConfig *cfg,
|
2017-10-03 10:38:23 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
bool cold_boot = flags & VIR_QEMU_PROCESS_START_COLD;
|
|
|
|
|
|
|
|
for (i = vm->def->ndisks; i > 0; i--) {
|
|
|
|
size_t idx = i - 1;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[idx];
|
2017-10-03 10:38:23 +00:00
|
|
|
|
2018-02-09 16:06:43 +00:00
|
|
|
if (virDomainDiskTranslateSourcePool(disk) < 0) {
|
2017-10-03 10:38:23 +00:00
|
|
|
if (qemuDomainCheckDiskStartupPolicy(driver, vm, idx, cold_boot) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* disk source was dropped */
|
|
|
|
continue;
|
|
|
|
}
|
2017-10-03 11:16:05 +00:00
|
|
|
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainPrepareDiskSource(disk, priv, cfg) < 0)
|
2017-11-08 20:11:47 +00:00
|
|
|
return -1;
|
2017-10-03 10:38:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-09-10 10:32:04 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareDomainHostdevs(virDomainObj *vm,
|
|
|
|
qemuDomainObjPrivate *priv)
|
2020-09-10 10:32:04 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainHostdevDef *hostdev = vm->def->hostdevs[i];
|
2020-09-10 10:32:04 +00:00
|
|
|
|
|
|
|
if (qemuDomainPrepareHostdev(hostdev, priv) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-15 13:06:01 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareHostHostdev(virDomainHostdevDef *hostdev)
|
2020-10-15 13:06:01 +00:00
|
|
|
{
|
|
|
|
if (virHostdevIsSCSIDevice(hostdev)) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainHostdevSubsysSCSI *scsisrc = &hostdev->source.subsys.u.scsi;
|
2020-10-15 13:06:01 +00:00
|
|
|
|
|
|
|
switch ((virDomainHostdevSCSIProtocolType) scsisrc->protocol) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_NONE: {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainHostdevSubsysSCSIHost *scsihostsrc = &scsisrc->u.host;
|
|
|
|
virStorageSource *src = scsisrc->u.host.src;
|
2020-10-15 13:06:01 +00:00
|
|
|
g_autofree char *devstr = NULL;
|
|
|
|
|
|
|
|
if (!(devstr = virSCSIDeviceGetSgName(NULL,
|
|
|
|
scsihostsrc->adapter,
|
|
|
|
scsihostsrc->bus,
|
|
|
|
scsihostsrc->target,
|
|
|
|
scsihostsrc->unit)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
src->path = g_strdup_printf("/dev/%s", devstr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virDomainHostdevSCSIProtocolType, scsisrc->protocol);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareHostHostdevs(virDomainObj *vm)
|
2020-10-15 13:06:01 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainHostdevDef *hostdev = vm->def->hostdevs[i];
|
2020-10-15 13:06:01 +00:00
|
|
|
|
|
|
|
if (qemuProcessPrepareHostHostdev(hostdev) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-08-30 04:30:42 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessRebootAllowed:
|
|
|
|
* @def: domain definition
|
|
|
|
*
|
|
|
|
* This function encapsulates the logic which dictated whether '-no-reboot' was
|
|
|
|
* used instead of '-no-shutdown' which is used QEMU versions which don't
|
|
|
|
* support the 'set-action' QMP command.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuProcessRebootAllowed(const virDomainDef *def)
|
|
|
|
{
|
|
|
|
return def->onReboot != VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY ||
|
|
|
|
def->onPoweroff != VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY ||
|
|
|
|
(def->onCrash != VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY &&
|
|
|
|
def->onCrash != VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-11 13:57:16 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareAllowReboot(virDomainObj *vm)
|
2017-10-11 13:57:16 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2017-10-11 13:57:16 +00:00
|
|
|
|
2021-08-23 14:43:26 +00:00
|
|
|
/* with 'set-action' QMP command we don't need to keep this around as
|
|
|
|
* we always update qemu with the proper state */
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SET_ACTION))
|
|
|
|
return;
|
|
|
|
|
2017-10-11 13:57:16 +00:00
|
|
|
if (priv->allowReboot != VIR_TRISTATE_BOOL_ABSENT)
|
|
|
|
return;
|
|
|
|
|
2021-08-30 04:30:42 +00:00
|
|
|
priv->allowReboot = virTristateBoolFromBool(qemuProcessRebootAllowed(def));
|
2017-10-11 13:57:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-08 12:11:45 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessUpdateSEVInfo(virDomainObj *vm)
|
2020-10-08 12:11:45 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virQEMUCaps *qemuCaps = priv->qemuCaps;
|
2021-07-21 11:07:51 +00:00
|
|
|
virDomainSEVDef *sev = &vm->def->sec->data.sev;
|
2021-03-11 07:16:13 +00:00
|
|
|
virSEVCapability *sevCaps = NULL;
|
2020-10-08 12:11:45 +00:00
|
|
|
|
|
|
|
/* if platform specific info like 'cbitpos' and 'reducedPhysBits' have
|
|
|
|
* not been supplied, we need to autofill them from caps now as both are
|
|
|
|
* mandatory on QEMU cmdline
|
|
|
|
*/
|
|
|
|
sevCaps = virQEMUCapsGetSEVCapabilities(qemuCaps);
|
|
|
|
if (!sev->haveCbitpos) {
|
|
|
|
sev->cbitpos = sevCaps->cbitpos;
|
|
|
|
sev->haveCbitpos = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sev->haveReducedPhysBits) {
|
|
|
|
sev->reduced_phys_bits = sevCaps->reduced_phys_bits;
|
|
|
|
sev->haveReducedPhysBits = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-11-18 14:16:20 +00:00
|
|
|
/* qemuProcessPrepareChardevSource:
|
|
|
|
* @def: live domain definition
|
|
|
|
* @cfg: driver configuration
|
|
|
|
*
|
|
|
|
* Iterate through all devices that use virDomainChrSourceDef as backend.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuProcessPrepareChardevSource(virDomainDef *def,
|
|
|
|
virQEMUDriverConfig *cfg)
|
|
|
|
{
|
|
|
|
struct qemuDomainPrepareChardevSourceData data = { .cfg = cfg };
|
|
|
|
|
|
|
|
return qemuDomainDeviceBackendChardevForeach(def,
|
|
|
|
qemuDomainPrepareChardevSourceOne,
|
|
|
|
&data);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
/**
|
2017-10-03 07:51:27 +00:00
|
|
|
* qemuProcessPrepareDomain:
|
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain object
|
|
|
|
* @flags: qemuProcessStartFlags
|
2016-03-15 12:00:59 +00:00
|
|
|
*
|
|
|
|
* This function groups all code that modifies only live XML of a domain which
|
|
|
|
* is about to start and it's the only place to do those modifications.
|
|
|
|
*
|
|
|
|
* Flag VIR_QEMU_PROCESS_START_PRETEND tells, that we don't want to actually
|
|
|
|
* start the domain but create a valid qemu command. If some code shouldn't be
|
|
|
|
* executed in this case, make sure to check this flag.
|
|
|
|
*
|
|
|
|
* TODO: move all XML modification from qemuBuildCommandLine into this function
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareDomain(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2016-03-15 12:00:59 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
size_t i;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-03-15 12:00:59 +00:00
|
|
|
|
2017-07-21 13:51:03 +00:00
|
|
|
priv->machineName = qemuDomainGetMachineName(vm);
|
|
|
|
if (!priv->machineName)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-07-21 13:51:03 +00:00
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_START_PRETEND)) {
|
|
|
|
/* If you are using a SecurityDriver with dynamic labelling,
|
|
|
|
then generate a security label for isolation */
|
|
|
|
VIR_DEBUG("Generating domain security label (if required)");
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityGenLabel(driver->securityManager, vm->def) < 0) {
|
2016-03-15 12:00:59 +00:00
|
|
|
virDomainAuditSecurityLabel(vm, false);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-15 12:00:59 +00:00
|
|
|
}
|
|
|
|
virDomainAuditSecurityLabel(vm, true);
|
|
|
|
|
2020-12-02 08:26:30 +00:00
|
|
|
if (qemuProcessPrepareDomainNUMAPlacement(vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-15 12:00:59 +00:00
|
|
|
}
|
|
|
|
|
2017-06-15 06:34:55 +00:00
|
|
|
/* Whether we should use virtlogd as stdio handler for character
|
|
|
|
* devices source backend. */
|
|
|
|
if (cfg->stdioLogD &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CHARDEV_FILE_APPEND)) {
|
|
|
|
priv->chardevStdioLogd = true;
|
|
|
|
}
|
|
|
|
|
2018-11-13 11:50:41 +00:00
|
|
|
/* Track if this domain remembers original owner */
|
|
|
|
priv->rememberOwner = cfg->rememberOwner;
|
|
|
|
|
2017-10-11 13:57:16 +00:00
|
|
|
qemuProcessPrepareAllowReboot(vm);
|
|
|
|
|
2016-04-13 07:38:29 +00:00
|
|
|
/*
|
|
|
|
* Normally PCI addresses are assigned in the virDomainCreate
|
|
|
|
* or virDomainDefine methods. We might still need to assign
|
|
|
|
* some here to cope with the question of upgrades. Regardless
|
|
|
|
* we also need to populate the PCI address set cache for later
|
|
|
|
* use in hotplug
|
|
|
|
*/
|
2016-05-20 11:22:26 +00:00
|
|
|
VIR_DEBUG("Assigning domain PCI addresses");
|
2016-11-03 20:33:32 +00:00
|
|
|
if ((qemuDomainAssignAddresses(vm->def, priv->qemuCaps, driver, vm,
|
|
|
|
!!(flags & VIR_QEMU_PROCESS_START_NEW))) < 0) {
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-11-03 20:33:32 +00:00
|
|
|
}
|
2016-04-13 07:38:29 +00:00
|
|
|
|
2016-03-15 12:00:59 +00:00
|
|
|
if (qemuAssignDeviceAliases(vm->def, priv->qemuCaps) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-15 12:00:59 +00:00
|
|
|
|
2021-10-01 13:22:17 +00:00
|
|
|
qemuProcessPrepareDeviceBootorder(vm->def);
|
|
|
|
|
2016-05-09 12:52:52 +00:00
|
|
|
VIR_DEBUG("Setting graphics devices");
|
2018-11-14 15:48:27 +00:00
|
|
|
if (qemuProcessSetupGraphics(driver, vm, priv->qemuCaps, flags) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-05-09 12:30:27 +00:00
|
|
|
|
2016-05-02 17:11:24 +00:00
|
|
|
VIR_DEBUG("Create domain masterKey");
|
|
|
|
if (qemuDomainMasterKeyCreate(vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-05-02 17:11:24 +00:00
|
|
|
|
2017-11-08 20:11:47 +00:00
|
|
|
VIR_DEBUG("Setting up storage");
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuProcessPrepareDomainStorage(driver, vm, priv, cfg, flags) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-11-08 20:11:47 +00:00
|
|
|
|
2020-09-10 10:32:04 +00:00
|
|
|
VIR_DEBUG("Setting up host devices");
|
|
|
|
if (qemuProcessPrepareDomainHostdevs(vm, priv) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2021-11-18 14:16:20 +00:00
|
|
|
VIR_DEBUG("Prepare chardev source backends");
|
|
|
|
if (qemuProcessPrepareChardevSource(vm->def, cfg) < 0)
|
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
|
2019-01-14 13:24:39 +00:00
|
|
|
VIR_DEBUG("Prepare device secrets");
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainSecretPrepare(driver, vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-05-02 17:20:55 +00:00
|
|
|
|
2019-02-25 13:13:46 +00:00
|
|
|
VIR_DEBUG("Prepare bios/uefi paths");
|
2019-12-13 14:41:16 +00:00
|
|
|
if (qemuFirmwareFillDomain(driver, vm->def, flags) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2019-11-15 11:27:42 +00:00
|
|
|
if (qemuDomainInitializePflashStorageSource(vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2019-02-25 13:13:46 +00:00
|
|
|
|
2019-09-23 10:44:40 +00:00
|
|
|
VIR_DEBUG("Preparing external devices");
|
|
|
|
if (qemuExtDevicesPrepareDomain(driver, vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2019-09-23 10:44:40 +00:00
|
|
|
|
2020-11-18 18:12:51 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_NEW) {
|
|
|
|
VIR_DEBUG("Aligning guest memory");
|
|
|
|
if (qemuDomainAlignMemorySizes(vm->def) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-03-30 14:34:17 +00:00
|
|
|
for (i = 0; i < vm->def->nchannels; i++) {
|
|
|
|
if (qemuDomainPrepareChannel(vm->def->channels[i],
|
|
|
|
priv->channelTargetDir) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-30 14:34:17 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 16:51:16 +00:00
|
|
|
if (!(priv->monConfig = virDomainChrSourceDefNew(driver->xmlopt)))
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-15 12:00:59 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Preparing monitor state");
|
|
|
|
if (qemuProcessPrepareMonitorChr(priv->monConfig, priv->libDir) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-15 12:00:59 +00:00
|
|
|
|
|
|
|
priv->monError = false;
|
|
|
|
priv->monStart = 0;
|
2018-09-10 17:41:53 +00:00
|
|
|
priv->runningReason = VIR_DOMAIN_RUNNING_UNKNOWN;
|
2018-10-09 13:41:51 +00:00
|
|
|
priv->pausedReason = VIR_DOMAIN_PAUSED_UNKNOWN;
|
2016-03-15 12:00:59 +00:00
|
|
|
|
2016-06-22 13:53:48 +00:00
|
|
|
VIR_DEBUG("Updating guest CPU definition");
|
2019-11-26 17:51:22 +00:00
|
|
|
if (qemuProcessUpdateGuestCPU(vm->def, priv->qemuCaps, driver->hostarch, flags) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-06-22 13:53:48 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
for (i = 0; i < vm->def->nshmems; i++)
|
|
|
|
qemuDomainPrepareShmemChardev(vm->def->shmems[i]);
|
2017-07-21 11:47:05 +00:00
|
|
|
|
2021-07-21 11:07:51 +00:00
|
|
|
if (vm->def->sec &&
|
|
|
|
vm->def->sec->sectype == VIR_DOMAIN_LAUNCH_SECURITY_SEV) {
|
2020-10-08 12:11:45 +00:00
|
|
|
VIR_DEBUG("Updating SEV platform info");
|
|
|
|
if (qemuProcessUpdateSEVInfo(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2016-03-15 12:00:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-08 14:40:58 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSEVCreateFile(virDomainObj *vm,
|
2018-06-12 11:39:21 +00:00
|
|
|
const char *name,
|
|
|
|
const char *data)
|
2018-06-08 14:40:58 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virQEMUDriver *driver = priv->driver;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *configFile = NULL;
|
2018-06-08 14:40:58 +00:00
|
|
|
|
2018-12-06 13:50:19 +00:00
|
|
|
if (!(configFile = virFileBuildPath(priv->libDir, name, ".base64")))
|
2018-06-08 14:40:58 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virFileRewriteStr(configFile, S_IRUSR | S_IWUSR, data) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to write data to config '%s'"),
|
|
|
|
configFile);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-06-08 14:40:58 +00:00
|
|
|
}
|
|
|
|
|
2018-12-06 13:59:15 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver, vm, configFile, true) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-12-06 13:59:15 +00:00
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2018-06-08 14:40:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareSEVGuestInput(virDomainObj *vm)
|
2018-06-08 14:40:58 +00:00
|
|
|
{
|
2021-07-21 11:07:51 +00:00
|
|
|
virDomainSEVDef *sev = &vm->def->sec->data.sev;
|
2018-06-08 14:40:58 +00:00
|
|
|
|
2018-06-20 09:05:03 +00:00
|
|
|
VIR_DEBUG("Preparing SEV guest");
|
2018-06-08 14:40:58 +00:00
|
|
|
|
|
|
|
if (sev->dh_cert) {
|
2018-12-06 13:50:19 +00:00
|
|
|
if (qemuProcessSEVCreateFile(vm, "dh_cert", sev->dh_cert) < 0)
|
2018-06-08 14:40:58 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sev->session) {
|
2018-12-06 13:50:19 +00:00
|
|
|
if (qemuProcessSEVCreateFile(vm, "session", sev->session) < 0)
|
2018-06-08 14:40:58 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-07-21 11:07:51 +00:00
|
|
|
static int
|
|
|
|
qemuProcessPrepareLaunchSecurityGuestInput(virDomainObj *vm)
|
|
|
|
{
|
|
|
|
virDomainSecDef *sec = vm->def->sec;
|
|
|
|
|
|
|
|
if (!sec)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch ((virDomainLaunchSecurity) sec->sectype) {
|
|
|
|
case VIR_DOMAIN_LAUNCH_SECURITY_SEV:
|
|
|
|
return qemuProcessPrepareSEVGuestInput(vm);
|
2021-07-21 11:17:40 +00:00
|
|
|
case VIR_DOMAIN_LAUNCH_SECURITY_PV:
|
|
|
|
return 0;
|
2021-07-21 11:07:51 +00:00
|
|
|
case VIR_DOMAIN_LAUNCH_SECURITY_NONE:
|
|
|
|
case VIR_DOMAIN_LAUNCH_SECURITY_LAST:
|
|
|
|
virReportEnumRangeError(virDomainLaunchSecurity, sec->sectype);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-03 10:38:19 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareHostStorage(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-10-03 10:38:19 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2017-10-03 10:38:19 +00:00
|
|
|
size_t i;
|
|
|
|
bool cold_boot = flags & VIR_QEMU_PROCESS_START_COLD;
|
2017-09-29 15:20:52 +00:00
|
|
|
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
|
2017-10-03 10:38:19 +00:00
|
|
|
|
|
|
|
for (i = vm->def->ndisks; i > 0; i--) {
|
|
|
|
size_t idx = i - 1;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[idx];
|
2017-10-03 10:38:19 +00:00
|
|
|
|
|
|
|
if (virStorageSourceIsEmpty(disk->src))
|
|
|
|
continue;
|
|
|
|
|
2017-09-29 15:20:52 +00:00
|
|
|
/* backing chain needs to be redetected if we aren't using blockdev */
|
2020-05-06 11:48:35 +00:00
|
|
|
if (!blockdev || qemuDiskBusIsSD(disk->bus))
|
2017-09-29 15:20:52 +00:00
|
|
|
virStorageSourceBackingStoreClear(disk->src);
|
2018-04-24 12:07:22 +00:00
|
|
|
|
2018-11-09 11:36:19 +00:00
|
|
|
/*
|
|
|
|
* Go to applying startup policy for optional disk with nonexistent
|
|
|
|
* source file immediately as determining chain will surely fail
|
|
|
|
* and we don't want noisy error notice in logs for this case.
|
|
|
|
*/
|
|
|
|
if (qemuDomainDiskIsMissingLocalOptional(disk) && cold_boot)
|
|
|
|
VIR_INFO("optional disk '%s' source file is missing, "
|
|
|
|
"skip checking disk chain", disk->dst);
|
2019-01-16 14:33:07 +00:00
|
|
|
else if (qemuDomainDetermineDiskChain(driver, vm, disk, NULL, true) >= 0)
|
2017-10-03 10:38:19 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuDomainCheckDiskStartupPolicy(driver, vm, idx, cold_boot) >= 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-30 11:53:52 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessOpenVhostVsock(virDomainVsockDef *vsock)
|
2018-05-22 13:57:47 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainVsockPrivate *priv = (qemuDomainVsockPrivate *)vsock->privateData;
|
2018-05-22 13:57:47 +00:00
|
|
|
const char *vsock_path = "/dev/vhost-vsock";
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if ((fd = open(vsock_path, O_RDWR)) < 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
"%s", _("unable to open vhost-vsock device"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vsock->auto_cid == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
if (virVsockAcquireGuestCid(fd, &vsock->guest_cid) < 0)
|
|
|
|
goto error;
|
|
|
|
} else {
|
|
|
|
if (virVsockSetGuestCid(fd, vsock->guest_cid) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->vhostfd = fd;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-10-25 10:42:16 +00:00
|
|
|
static int
|
|
|
|
qemuProcessPrepareHostBackendChardevFileHelper(const char *path,
|
|
|
|
virTristateSwitch append,
|
|
|
|
int *fd,
|
|
|
|
virLogManager *logManager,
|
|
|
|
virSecurityManager *secManager,
|
|
|
|
virQEMUCaps *qemuCaps,
|
|
|
|
virQEMUDriverConfig *cfg,
|
|
|
|
const virDomainDef *def)
|
|
|
|
{
|
|
|
|
/* Technically, to pass an FD via /dev/fdset we don't need
|
|
|
|
* any capability check because X_QEMU_CAPS_ADD_FD is already
|
|
|
|
* assumed. But keeping the old style is still handy when
|
|
|
|
* building a standalone command line (e.g. for tests). */
|
|
|
|
if (!logManager &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_CHARDEV_FD_PASS_COMMANDLINE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (logManager) {
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
if (append == VIR_TRISTATE_SWITCH_ABSENT ||
|
|
|
|
append == VIR_TRISTATE_SWITCH_OFF)
|
|
|
|
flags |= VIR_LOG_MANAGER_PROTOCOL_DOMAIN_OPEN_LOG_FILE_TRUNCATE;
|
|
|
|
|
|
|
|
if ((*fd = virLogManagerDomainOpenLogFile(logManager,
|
|
|
|
"qemu",
|
|
|
|
def->uuid,
|
|
|
|
def->name,
|
|
|
|
path,
|
|
|
|
flags,
|
|
|
|
NULL, NULL)) < 0)
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
int oflags = O_CREAT | O_WRONLY;
|
|
|
|
|
|
|
|
switch (append) {
|
|
|
|
case VIR_TRISTATE_SWITCH_ABSENT:
|
|
|
|
case VIR_TRISTATE_SWITCH_OFF:
|
|
|
|
oflags |= O_TRUNC;
|
|
|
|
break;
|
|
|
|
case VIR_TRISTATE_SWITCH_ON:
|
|
|
|
oflags |= O_APPEND;
|
|
|
|
break;
|
|
|
|
case VIR_TRISTATE_SWITCH_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((*fd = qemuDomainOpenFile(cfg, def, path, oflags, NULL)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuSecuritySetImageFDLabel(secManager, (virDomainDef*)def, *fd) < 0) {
|
|
|
|
VIR_FORCE_CLOSE(*fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct qemuProcessPrepareHostBackendChardevData {
|
|
|
|
virQEMUCaps *qemuCaps;
|
|
|
|
virLogManager *logManager;
|
|
|
|
virSecurityManager *secManager;
|
|
|
|
virQEMUDriverConfig *cfg;
|
|
|
|
virDomainDef *def;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuProcessPrepareHostBackendChardevOne(virDomainDeviceDef *dev,
|
|
|
|
virDomainChrSourceDef *chardev,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
struct qemuProcessPrepareHostBackendChardevData *data = opaque;
|
|
|
|
qemuDomainChrSourcePrivate *charpriv = QEMU_DOMAIN_CHR_SOURCE_PRIVATE(chardev);
|
|
|
|
|
|
|
|
/* this function is also called for the monitor backend which doesn't have
|
|
|
|
* a 'dev' */
|
|
|
|
if (dev) {
|
2021-12-08 09:07:44 +00:00
|
|
|
/* vhost-user disk doesn't use FD passing */
|
|
|
|
if (dev->type == VIR_DOMAIN_DEVICE_DISK)
|
|
|
|
return 0;
|
|
|
|
|
2021-10-25 10:42:16 +00:00
|
|
|
if (dev->type == VIR_DOMAIN_DEVICE_NET) {
|
|
|
|
/* due to a historical bug in qemu we don't use FD passtrhough for
|
|
|
|
* vhost-sockets for network devices */
|
|
|
|
return 0;
|
|
|
|
}
|
2021-12-08 09:07:44 +00:00
|
|
|
|
|
|
|
/* TPMs FD passing setup is special and handled separately */
|
|
|
|
if (dev->type == VIR_DOMAIN_DEVICE_TPM)
|
|
|
|
return 0;
|
2021-10-25 10:42:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch ((virDomainChrType) chardev->type) {
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_NULL:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_VC:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_PTY:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_DEV:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_PIPE:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_STDIO:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_UDP:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_TCP:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_SPICEVMC:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_SPICEPORT:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_FILE:
|
|
|
|
if (qemuProcessPrepareHostBackendChardevFileHelper(chardev->data.file.path,
|
|
|
|
chardev->data.file.append,
|
|
|
|
&charpriv->fd,
|
|
|
|
data->logManager,
|
|
|
|
data->secManager,
|
|
|
|
data->qemuCaps,
|
|
|
|
data->cfg,
|
|
|
|
data->def) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_UNIX:
|
|
|
|
if (chardev->data.nix.listen &&
|
|
|
|
virQEMUCapsGet(data->qemuCaps, QEMU_CAPS_CHARDEV_FD_PASS_COMMANDLINE)) {
|
|
|
|
|
|
|
|
if (qemuSecuritySetSocketLabel(data->secManager, data->def) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
charpriv->fd = qemuOpenChrChardevUNIXSocket(chardev);
|
|
|
|
|
|
|
|
if (qemuSecurityClearSocketLabel(data->secManager, data->def) < 0) {
|
|
|
|
VIR_FORCE_CLOSE(charpriv->fd);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (charpriv->fd < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_NMDM:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_LAST:
|
|
|
|
default:
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("unsupported chardev '%s'"),
|
|
|
|
virDomainChrTypeToString(chardev->type));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chardev->logfile) {
|
|
|
|
if (qemuProcessPrepareHostBackendChardevFileHelper(chardev->logfile,
|
|
|
|
chardev->logappend,
|
|
|
|
&charpriv->logfd,
|
|
|
|
data->logManager,
|
|
|
|
data->secManager,
|
|
|
|
data->qemuCaps,
|
|
|
|
data->cfg,
|
|
|
|
data->def) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* prepare the chardev backends for various devices:
|
|
|
|
* serial/parallel/channel chardevs, vhost-user disks, vhost-user network
|
|
|
|
* interfaces, smartcards, shared memory, and redirdevs */
|
|
|
|
static int
|
|
|
|
qemuProcessPrepareHostBackendChardev(virDomainObj *vm,
|
|
|
|
virQEMUCaps *qemuCaps,
|
|
|
|
virSecurityManager *secManager)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
|
|
|
|
struct qemuProcessPrepareHostBackendChardevData data = {
|
|
|
|
.qemuCaps = qemuCaps,
|
|
|
|
.logManager = NULL,
|
|
|
|
.secManager = secManager,
|
|
|
|
.cfg = cfg,
|
|
|
|
.def = vm->def,
|
|
|
|
};
|
|
|
|
g_autoptr(virLogManager) logManager = NULL;
|
|
|
|
|
|
|
|
if (cfg->stdioLogD) {
|
|
|
|
if (!(logManager = data.logManager = virLogManagerNew(priv->driver->privileged)))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainDeviceBackendChardevForeach(vm->def,
|
|
|
|
qemuProcessPrepareHostBackendChardevOne,
|
|
|
|
&data) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuProcessPrepareHostBackendChardevOne(NULL, priv->monConfig, &data) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
/**
|
2017-10-03 07:51:27 +00:00
|
|
|
* qemuProcessPrepareHost:
|
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain object
|
2017-10-03 08:14:21 +00:00
|
|
|
* @flags: qemuProcessStartFlags
|
2015-11-10 15:58:41 +00:00
|
|
|
*
|
2016-03-22 12:16:05 +00:00
|
|
|
* This function groups all code that modifies host system (which also may
|
|
|
|
* update live XML) to prepare environment for a domain which is about to start
|
|
|
|
* and it's the only place to do those modifications.
|
2015-11-10 15:58:41 +00:00
|
|
|
*
|
2016-03-22 12:16:05 +00:00
|
|
|
* TODO: move all host modification from qemuBuildCommandLine into this function
|
2015-11-10 15:58:41 +00:00
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessPrepareHost(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-10-03 08:14:21 +00:00
|
|
|
unsigned int flags)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
2014-03-05 10:56:26 +00:00
|
|
|
unsigned int hostdev_flags = 0;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-12-13 11:17:14 +00:00
|
|
|
/*
|
|
|
|
* Create all per-domain directories in order to make sure domain
|
|
|
|
* with any possible seclabels can access it.
|
|
|
|
*/
|
|
|
|
if (qemuProcessMakeDir(driver, vm, priv->libDir) < 0 ||
|
|
|
|
qemuProcessMakeDir(driver, vm, priv->channelTargetDir) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2022-02-09 08:31:54 +00:00
|
|
|
if (qemuPrepareNVRAM(driver, vm, !!(flags & VIR_QEMU_PROCESS_START_RESET_NVRAM)) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2013-02-01 17:04:15 +00:00
|
|
|
|
2018-05-22 13:57:47 +00:00
|
|
|
if (vm->def->vsock) {
|
|
|
|
if (qemuProcessOpenVhostVsock(vm->def->vsock) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-05-22 13:57:47 +00:00
|
|
|
}
|
qemu: allocate network connections sooner during domain startup
VFIO device assignment requires a cgroup ACL to be setup for access to
the /dev/vfio/nn "group" device for any devices that will be assigned
to a guest. In the case of a host device that is allocated from a
pool, it was being allocated during qemuBuildCommandLine(), which is
called by qemuProcessStart() *after* the all-encompassing
qemuSetupCgroup() was called, meaning that the standard Cgroup ACL
setup wasn't creating ACLs for these devices allocated from pools.
One possible solution was to manually add a single ACL down inside
qemuBuildCommandLine() when networkAllocateActualDevice() is called,
but that has two problems: 1) the function that adds the cgroup ACL
requires a virDomainObjPtr, which isn't available in
qemuBuildCommandLine(), and 2) we really shouldn't be doing network
device setup inside qemuBuildCommandLine() anyway.
Instead, I've created a new function called
qemuNetworkPrepareDevices() which is called just before
qemuPrepareHostDevices() during qemuProcessStart() (explanation of
ordering in the comments), i.e. well before the call to
qemuSetupCgroup(). To minimize code churn in a patch that will be
backported to 1.0.5-maint, qemuNetworkPrepareDevices only does
networkAllocateActualDevice() and the bare amount of setup required
for type='hostdev network devices, but it eventually should do *all*
device setup for guest network devices.
Note that some of the code that was previously needed in
qemuBuildCommandLine() is no longer required when
networkAllocateActualDevice() is called earlier:
* qemuAssignDeviceHostdevAlias() is already done further down in
qemuProcessStart().
* qemuPrepareHostdevPCIDevices() is called by
qemuPrepareHostDevices() which is called after
qemuNetworkPrepareDevices() in qemuProcessStart().
As hinted above, this new function should be moved into a separate
qemu_network.c (or similarly named) file along with
qemuPhysIfaceConnect(), qemuNetworkIfaceConnect(), and
qemuOpenVhostNet(), and expanded to call those functions as well, then
the nnets loop in qemuBuildCommandLine() should be reduced to only
build the commandline string (which itself can be in a separate
qemuInterfaceBuilldCommandLine() function as suggested by
Michal). However, this will require storing away an array of tapfd and
vhostfd that are needed for the commandline, so I would rather do that
in a separate patch and leave this patch at the minimum to fix the
bug.
2013-05-06 19:43:56 +00:00
|
|
|
/* network devices must be "prepared" before hostdevs, because
|
|
|
|
* setting up a network device might create a new hostdev that
|
|
|
|
* will need to be setup.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Preparing network devices");
|
2019-08-08 14:55:12 +00:00
|
|
|
if (qemuProcessNetworkPrepareDevices(driver, vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
qemu: allocate network connections sooner during domain startup
VFIO device assignment requires a cgroup ACL to be setup for access to
the /dev/vfio/nn "group" device for any devices that will be assigned
to a guest. In the case of a host device that is allocated from a
pool, it was being allocated during qemuBuildCommandLine(), which is
called by qemuProcessStart() *after* the all-encompassing
qemuSetupCgroup() was called, meaning that the standard Cgroup ACL
setup wasn't creating ACLs for these devices allocated from pools.
One possible solution was to manually add a single ACL down inside
qemuBuildCommandLine() when networkAllocateActualDevice() is called,
but that has two problems: 1) the function that adds the cgroup ACL
requires a virDomainObjPtr, which isn't available in
qemuBuildCommandLine(), and 2) we really shouldn't be doing network
device setup inside qemuBuildCommandLine() anyway.
Instead, I've created a new function called
qemuNetworkPrepareDevices() which is called just before
qemuPrepareHostDevices() during qemuProcessStart() (explanation of
ordering in the comments), i.e. well before the call to
qemuSetupCgroup(). To minimize code churn in a patch that will be
backported to 1.0.5-maint, qemuNetworkPrepareDevices only does
networkAllocateActualDevice() and the bare amount of setup required
for type='hostdev network devices, but it eventually should do *all*
device setup for guest network devices.
Note that some of the code that was previously needed in
qemuBuildCommandLine() is no longer required when
networkAllocateActualDevice() is called earlier:
* qemuAssignDeviceHostdevAlias() is already done further down in
qemuProcessStart().
* qemuPrepareHostdevPCIDevices() is called by
qemuPrepareHostDevices() which is called after
qemuNetworkPrepareDevices() in qemuProcessStart().
As hinted above, this new function should be moved into a separate
qemu_network.c (or similarly named) file along with
qemuPhysIfaceConnect(), qemuNetworkIfaceConnect(), and
qemuOpenVhostNet(), and expanded to call those functions as well, then
the nnets loop in qemuBuildCommandLine() should be reduced to only
build the commandline string (which itself can be in a separate
qemuInterfaceBuilldCommandLine() function as suggested by
Michal). However, this will require storing away an array of tapfd and
vhostfd that are needed for the commandline, so I would rather do that
in a separate patch and leave this patch at the minimum to fix the
bug.
2013-05-06 19:43:56 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Must be run before security labelling */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Preparing host devices");
|
2014-03-05 10:56:26 +00:00
|
|
|
if (!cfg->relaxedACS)
|
|
|
|
hostdev_flags |= VIR_HOSTDEV_STRICT_ACS_CHECK;
|
2017-10-03 08:14:21 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_NEW)
|
2014-03-05 11:12:04 +00:00
|
|
|
hostdev_flags |= VIR_HOSTDEV_COLD_BOOT;
|
2015-10-20 12:10:16 +00:00
|
|
|
if (qemuHostdevPrepareDomainDevices(driver, vm->def, priv->qemuCaps,
|
|
|
|
hostdev_flags) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-10-25 10:42:16 +00:00
|
|
|
VIR_DEBUG("Preparing chr device backends");
|
|
|
|
if (qemuProcessPrepareHostBackendChardev(vm,
|
|
|
|
priv->qemuCaps,
|
|
|
|
driver->securityManager) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-11-07 14:19:43 +00:00
|
|
|
if (qemuProcessBuildDestroyMemoryPaths(driver, vm, NULL, true) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2012-12-11 20:20:29 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Ensure no historical cgroup for this VM is lying around bogus
|
|
|
|
* settings */
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Ensuring no historical cgroup is lying around");
|
2022-01-25 16:19:53 +00:00
|
|
|
virDomainCgroupRemoveCgroup(vm, priv->cgroup, priv->machineName);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-02-26 08:37:10 +00:00
|
|
|
if (g_mkdir_with_parents(cfg->logDir, 0777) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot create log directory %s"),
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->logDir);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
VIR_FREE(priv->pidfile);
|
|
|
|
if (!(priv->pidfile = virPidFileBuildPath(cfg->stateDir, vm->def->name))) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
"%s", _("Failed to build pidfile path."));
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-22 12:16:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlink(priv->pidfile) < 0 &&
|
|
|
|
errno != ENOENT) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Cannot remove stale PID file %s"),
|
|
|
|
priv->pidfile);
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-22 12:16:05 +00:00
|
|
|
}
|
|
|
|
|
2016-05-02 17:11:24 +00:00
|
|
|
VIR_DEBUG("Write domain masterKey");
|
|
|
|
if (qemuDomainWriteMasterKeyFile(driver, vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2016-03-29 22:22:46 +00:00
|
|
|
|
2017-10-03 10:38:19 +00:00
|
|
|
VIR_DEBUG("Preparing disks (host)");
|
|
|
|
if (qemuProcessPrepareHostStorage(driver, vm, flags) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-10-03 10:38:19 +00:00
|
|
|
|
2020-10-15 13:06:01 +00:00
|
|
|
VIR_DEBUG("Preparing hostdevs (host-side)");
|
|
|
|
if (qemuProcessPrepareHostHostdevs(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-04-04 16:22:31 +00:00
|
|
|
VIR_DEBUG("Preparing external devices");
|
2019-08-08 14:55:10 +00:00
|
|
|
if (qemuExtDevicesPrepareHost(driver, vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2017-04-04 16:22:31 +00:00
|
|
|
|
2021-07-21 11:07:51 +00:00
|
|
|
if (qemuProcessPrepareLaunchSecurityGuestInput(vm) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2018-06-08 14:40:58 +00:00
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2016-03-22 12:16:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-09 14:31:17 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessGenID:
|
|
|
|
* @vm: Pointer to domain object
|
|
|
|
* @flags: qemuProcessStartFlags
|
|
|
|
*
|
|
|
|
* If this domain is requesting to use genid, then update the GUID
|
|
|
|
* value if the VIR_QEMU_PROCESS_START_GEN_VMID flag is set. This
|
|
|
|
* flag is set on specific paths during domain start processing when
|
|
|
|
* there is the possibility that the VM is potentially re-executing
|
|
|
|
* something that has already been executed before.
|
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessGenID(virDomainObj *vm,
|
2018-04-09 14:31:17 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
if (!vm->def->genidRequested)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If we are coming from a path where we must provide a new gen id
|
|
|
|
* value regardless of whether it was previously generated or provided,
|
|
|
|
* then generate a new GUID value before we build the command line. */
|
|
|
|
if (flags & VIR_QEMU_PROCESS_START_GEN_VMID) {
|
|
|
|
if (virUUIDGenerate(vm->def->genid) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to regenerate genid"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-13 15:21:02 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessSetupDiskThrottlingBlockdev:
|
|
|
|
*
|
|
|
|
* Sets up disk trottling for -blockdev via block_set_io_throttle monitor
|
|
|
|
* command. This hack should be replaced by proper use of the 'throttle'
|
|
|
|
* blockdev driver in qemu once it will support changing of the throttle group.
|
2019-09-05 13:09:50 +00:00
|
|
|
* Same hack is done in qemuDomainAttachDiskGeneric.
|
2018-06-13 15:21:02 +00:00
|
|
|
*/
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessSetupDiskThrottlingBlockdev(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2018-06-13 15:21:02 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-06-13 15:21:02 +00:00
|
|
|
size_t i;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Setting up disk throttling for -blockdev via block_set_io_throttle");
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivate *diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2018-06-13 15:21:02 +00:00
|
|
|
|
2020-05-06 11:48:35 +00:00
|
|
|
/* sd-cards are instantiated via -drive */
|
|
|
|
if (qemuDiskBusIsSD(disk->bus))
|
|
|
|
continue;
|
|
|
|
|
2021-01-05 11:06:32 +00:00
|
|
|
/* Setting throttling for empty drives fails */
|
|
|
|
if (virStorageSourceIsEmpty(disk->src))
|
|
|
|
continue;
|
|
|
|
|
2018-06-13 15:21:02 +00:00
|
|
|
if (!qemuDiskConfigBlkdeviotuneEnabled(disk))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuMonitorSetBlockIoThrottle(qemuDomainGetMonitor(vm), NULL,
|
2021-08-10 13:27:29 +00:00
|
|
|
diskPriv->qomName, &disk->blkdeviotune) < 0)
|
2018-06-13 15:21:02 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2018-06-13 15:21:02 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-07-20 14:54:11 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessEnableDomainNamespaces(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2020-07-20 14:54:11 +00:00
|
|
|
{
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
|
|
|
if (virBitmapIsBitSet(cfg->namespaces, QEMU_DOMAIN_NS_MOUNT) &&
|
|
|
|
qemuDomainEnableNamespace(vm, QEMU_DOMAIN_NS_MOUNT) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-09-07 14:44:50 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessEnablePerf(virDomainObj *vm)
|
2020-09-07 14:44:50 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-09-07 14:44:50 +00:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!(priv->perf = virPerfNew()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < VIR_PERF_EVENT_LAST; i++) {
|
|
|
|
if (vm->def->perf.events[i] == VIR_TRISTATE_BOOL_YES &&
|
|
|
|
virPerfEventEnable(priv->perf, i, vm->pid) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-04-27 18:31:11 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupDisksTransientSnapshot(virDomainObj *vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
g_autoptr(qemuSnapshotDiskContext) snapctxt = NULL;
|
|
|
|
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
snapctxt = qemuSnapshotDiskContextNew(vm->def->ndisks, vm, asyncJob);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDef *domdisk = vm->def->disks[i];
|
|
|
|
g_autoptr(virDomainSnapshotDiskDef) snapdisk = NULL;
|
|
|
|
|
2021-05-18 09:15:50 +00:00
|
|
|
if (!domdisk->transient ||
|
|
|
|
domdisk->transientShareBacking == VIR_TRISTATE_BOOL_YES)
|
2021-04-27 18:31:11 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* validation code makes sure that we do this only for local disks
|
|
|
|
* with a file source */
|
|
|
|
|
2021-03-25 00:54:11 +00:00
|
|
|
if (!(snapdisk = qemuSnapshotGetTransientDiskDef(domdisk, vm->def->name)))
|
2021-04-27 18:31:11 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuSnapshotDiskPrepareOne(snapctxt, domdisk, snapdisk,
|
|
|
|
blockNamedNodeData,
|
|
|
|
false,
|
|
|
|
false) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuSnapshotDiskCreate(snapctxt) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2021-05-18 15:47:48 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDef *domdisk = vm->def->disks[i];
|
|
|
|
|
2021-05-27 16:55:12 +00:00
|
|
|
if (!domdisk->transient ||
|
|
|
|
domdisk->transientShareBacking == VIR_TRISTATE_BOOL_YES)
|
2021-05-18 15:47:48 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
QEMU_DOMAIN_DISK_PRIVATE(domdisk)->transientOverlayCreated = true;
|
|
|
|
}
|
2021-04-27 18:31:11 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-05-18 09:15:50 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupDisksTransientHotplug(virDomainObj *vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
bool hasHotpluggedDisk = false;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDef *domdisk = vm->def->disks[i];
|
|
|
|
|
|
|
|
if (!domdisk->transient ||
|
|
|
|
domdisk->transientShareBacking != VIR_TRISTATE_BOOL_YES)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuDomainAttachDiskGeneric(priv->driver, vm, domdisk, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
hasHotpluggedDisk = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* in order to allow booting from such disks we need to issue a system-reset
|
|
|
|
* so that the firmware tables recording bootable devices are regerated */
|
|
|
|
if (hasHotpluggedDisk) {
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rc = qemuMonitorSystemReset(priv->mon);
|
|
|
|
|
2021-11-24 12:11:52 +00:00
|
|
|
qemuDomainObjExitMonitor(priv->driver, vm);
|
|
|
|
if (rc < 0)
|
2021-05-18 09:15:50 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-04-27 18:31:11 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupDisksTransient(virDomainObj *vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!(virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuProcessSetupDisksTransientSnapshot(vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2021-05-18 09:15:50 +00:00
|
|
|
if (qemuProcessSetupDisksTransientHotplug(vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2021-04-27 18:31:11 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-08-23 14:40:46 +00:00
|
|
|
static int
|
|
|
|
qemuProcessSetupLifecycleActions(virDomainObj *vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!(virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SET_ACTION)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* for now we handle only onReboot->destroy here as an alternative to
|
|
|
|
* '-no-reboot' on the commandline */
|
|
|
|
if (vm->def->onReboot != VIR_DOMAIN_LIFECYCLE_ACTION_DESTROY)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rc = qemuMonitorSetAction(priv->mon,
|
|
|
|
QEMU_MONITOR_ACTION_SHUTDOWN_KEEP,
|
|
|
|
QEMU_MONITOR_ACTION_REBOOT_SHUTDOWN,
|
|
|
|
QEMU_MONITOR_ACTION_WATCHDOG_KEEP,
|
|
|
|
QEMU_MONITOR_ACTION_PANIC_KEEP);
|
|
|
|
|
2021-11-24 12:11:52 +00:00
|
|
|
qemuDomainObjExitMonitor(priv->driver, vm);
|
|
|
|
if (rc < 0)
|
2021-08-23 14:40:46 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessLaunch:
|
|
|
|
*
|
|
|
|
* Launch a new QEMU process with stopped virtual CPUs.
|
|
|
|
*
|
|
|
|
* The caller is supposed to call qemuProcessStop with appropriate
|
|
|
|
* flags in case of failure.
|
|
|
|
*
|
|
|
|
* Returns 0 on success,
|
|
|
|
* -1 on error which happened before devices were labeled and thus
|
|
|
|
* there is no need to restore them,
|
|
|
|
* -2 on error requesting security labels to be restored.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuProcessLaunch(virConnectPtr conn,
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2016-03-22 12:16:05 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessIncomingDef *incoming,
|
|
|
|
virDomainMomentObj *snapshot,
|
2016-03-22 12:16:05 +00:00
|
|
|
virNetDevVPortProfileOp vmop,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
int rv;
|
|
|
|
int logfile = -1;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(qemuDomainLogContext) logCtxt = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virCommand) cmd = NULL;
|
2016-03-22 12:16:05 +00:00
|
|
|
struct qemuProcessHookData hookData;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = NULL;
|
2016-03-22 12:16:05 +00:00
|
|
|
size_t nnicindexes = 0;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree int *nicindexes = NULL;
|
2021-03-02 17:57:51 +00:00
|
|
|
unsigned long long maxMemLock = 0;
|
2016-03-22 12:16:05 +00:00
|
|
|
|
2018-02-26 08:37:31 +00:00
|
|
|
VIR_DEBUG("conn=%p driver=%p vm=%p name=%s if=%d asyncJob=%d "
|
2016-03-22 12:16:05 +00:00
|
|
|
"incoming.launchURI=%s incoming.deferredURI=%s "
|
|
|
|
"incoming.fd=%d incoming.path=%s "
|
|
|
|
"snapshot=%p vmop=%d flags=0x%x",
|
2018-02-26 08:37:31 +00:00
|
|
|
conn, driver, vm, vm->def->name, vm->def->id, asyncJob,
|
2016-03-22 12:16:05 +00:00
|
|
|
NULLSTR(incoming ? incoming->launchURI : NULL),
|
|
|
|
NULLSTR(incoming ? incoming->deferredURI : NULL),
|
|
|
|
incoming ? incoming->fd : -1,
|
|
|
|
NULLSTR(incoming ? incoming->path : NULL),
|
|
|
|
snapshot, vmop, flags);
|
|
|
|
|
|
|
|
/* Okay, these are just internal flags,
|
|
|
|
* but doesn't hurt to check */
|
|
|
|
virCheckFlags(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
2016-06-07 11:52:16 +00:00
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY |
|
2018-04-09 14:31:17 +00:00
|
|
|
VIR_QEMU_PROCESS_START_NEW |
|
2022-02-10 09:52:37 +00:00
|
|
|
VIR_QEMU_PROCESS_START_GEN_VMID |
|
|
|
|
VIR_QEMU_PROCESS_START_RESET_NVRAM, -1);
|
2016-03-22 12:16:05 +00:00
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
2019-05-17 11:35:57 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_AUTODESTROY) {
|
|
|
|
if (!conn) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Domain autodestroy requires a connection handle"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (driver->embeddedRoot) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Domain autodestroy not supported for embedded drivers yet"));
|
|
|
|
return -1;
|
|
|
|
}
|
2018-02-09 17:19:44 +00:00
|
|
|
}
|
|
|
|
|
2016-03-22 12:16:05 +00:00
|
|
|
hookData.vm = vm;
|
|
|
|
hookData.driver = driver;
|
|
|
|
/* We don't increase cfg's reference counter here. */
|
|
|
|
hookData.cfg = cfg;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Creating domain log file");
|
2015-11-12 14:28:36 +00:00
|
|
|
if (!(logCtxt = qemuDomainLogContextNew(driver, vm,
|
2019-06-11 11:58:29 +00:00
|
|
|
QEMU_DOMAIN_LOG_CONTEXT_MODE_START))) {
|
|
|
|
virLastErrorPrefixMessage("%s", _("can't connect to virtlogd"));
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2019-06-11 11:58:29 +00:00
|
|
|
}
|
2015-11-12 14:28:36 +00:00
|
|
|
logfile = qemuDomainLogContextGetWriteFD(logCtxt);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-04-09 14:31:17 +00:00
|
|
|
if (qemuProcessGenID(vm, flags) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2021-10-06 09:48:38 +00:00
|
|
|
if (qemuExtDevicesStart(driver, vm, incoming != NULL) < 0)
|
2017-04-04 16:22:31 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Building emulator command line");
|
2016-04-06 14:41:33 +00:00
|
|
|
if (!(cmd = qemuBuildCommandLine(driver,
|
2017-10-11 10:44:30 +00:00
|
|
|
vm,
|
2015-11-06 17:41:37 +00:00
|
|
|
incoming ? incoming->launchURI : NULL,
|
|
|
|
snapshot, vmop,
|
2016-04-13 06:10:24 +00:00
|
|
|
false,
|
2020-10-20 16:48:59 +00:00
|
|
|
qemuCheckFips(vm),
|
2020-05-15 12:24:21 +00:00
|
|
|
&nnicindexes, &nicindexes, 0)))
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-06 17:41:37 +00:00
|
|
|
if (incoming && incoming->fd != -1)
|
|
|
|
virCommandPassFD(cmd, incoming->fd, 0);
|
2015-08-07 12:42:31 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* now that we know it is about to start call the hook if present */
|
2015-10-30 16:59:43 +00:00
|
|
|
if (qemuProcessStartHook(driver, vm,
|
|
|
|
VIR_HOOK_QEMU_OP_START,
|
|
|
|
VIR_HOOK_SUBOP_BEGIN) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-12 13:02:46 +00:00
|
|
|
qemuLogOperation(vm, "starting up", cmd, logCtxt);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2020-10-04 17:51:27 +00:00
|
|
|
qemuDomainObjCheckTaint(driver, vm, logCtxt, incoming != NULL);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2015-11-12 14:28:36 +00:00
|
|
|
qemuDomainLogContextMarkPosition(logCtxt);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-11-15 10:30:18 +00:00
|
|
|
VIR_DEBUG("Building mount namespace");
|
|
|
|
|
2020-07-20 14:54:11 +00:00
|
|
|
if (qemuProcessEnableDomainNamespaces(driver, vm) < 0)
|
2016-11-15 10:30:18 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2015-11-02 09:35:58 +00:00
|
|
|
VIR_DEBUG("Setting up raw IO");
|
2022-01-13 17:26:14 +00:00
|
|
|
if (qemuProcessSetupRawIO(vm, cmd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2014-09-09 22:51:02 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandSetPreExecHook(cmd, qemuProcessHook, &hookData);
|
2021-03-02 17:56:06 +00:00
|
|
|
virCommandSetUmask(cmd, 0x002);
|
|
|
|
|
|
|
|
VIR_DEBUG("Setting up process limits");
|
|
|
|
|
2021-03-02 17:57:51 +00:00
|
|
|
/* In some situations, eg. VFIO passthrough, QEMU might need to lock a
|
|
|
|
* significant amount of memory, so we need to set the limit accordingly */
|
|
|
|
maxMemLock = qemuDomainGetMemLockLimitBytes(vm->def, false);
|
|
|
|
|
2021-03-02 17:20:47 +00:00
|
|
|
/* For all these settings, zero indicates that the limit should
|
|
|
|
* not be set explicitly and the default/inherited limit should
|
|
|
|
* be applied instead */
|
|
|
|
if (maxMemLock > 0)
|
|
|
|
virCommandSetMaxMemLock(cmd, maxMemLock);
|
|
|
|
if (cfg->maxProcesses > 0)
|
|
|
|
virCommandSetMaxProcesses(cmd, cfg->maxProcesses);
|
|
|
|
if (cfg->maxFiles > 0)
|
|
|
|
virCommandSetMaxFiles(cmd, cfg->maxFiles);
|
|
|
|
|
|
|
|
/* In this case, however, zero means that core dumps should be
|
|
|
|
* disabled, and so we always need to set the limit explicitly */
|
2015-03-18 11:14:55 +00:00
|
|
|
virCommandSetMaxCoreSize(cmd, cfg->maxCore);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2013-02-01 20:20:22 +00:00
|
|
|
VIR_DEBUG("Setting up security labelling");
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetChildProcessLabel(driver->securityManager,
|
|
|
|
vm->def, cmd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-02-01 20:20:22 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandSetOutputFD(cmd, &logfile);
|
|
|
|
virCommandSetErrorFD(cmd, &logfile);
|
|
|
|
virCommandNonblockingFDs(cmd);
|
2011-06-17 13:43:54 +00:00
|
|
|
virCommandSetPidFile(cmd, priv->pidfile);
|
2011-02-14 16:09:39 +00:00
|
|
|
virCommandDaemonize(cmd);
|
2010-10-26 14:04:46 +00:00
|
|
|
virCommandRequireHandshake(cmd);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityPreFork(driver->securityManager) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-10-20 12:26:46 +00:00
|
|
|
rv = virCommandRun(cmd, NULL);
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecurityPostFork(driver->securityManager);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-04-11 22:25:25 +00:00
|
|
|
/* wait for qemu process to show up */
|
2015-10-20 12:26:46 +00:00
|
|
|
if (rv == 0) {
|
2019-05-23 09:00:27 +00:00
|
|
|
if ((rv = virPidFileReadPath(priv->pidfile, &vm->pid)) < 0) {
|
|
|
|
virReportSystemError(-rv,
|
|
|
|
_("Domain %s didn't show up"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2016-10-06 14:54:41 +00:00
|
|
|
VIR_DEBUG("QEMU vm=%p name=%s running with pid=%lld",
|
2018-04-25 12:42:34 +00:00
|
|
|
vm, vm->def->name, (long long)vm->pid);
|
2013-10-31 11:28:46 +00:00
|
|
|
} else {
|
|
|
|
VIR_DEBUG("QEMU vm=%p name=%s failed to spawn",
|
|
|
|
vm, vm->def->name);
|
2019-05-23 09:00:27 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2011-07-12 09:45:16 +00:00
|
|
|
VIR_DEBUG("Writing early domain status to disk");
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-07-12 09:45:16 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Waiting for handshake from child");
|
|
|
|
if (virCommandHandshakeWait(cmd) < 0) {
|
2013-12-03 16:38:14 +00:00
|
|
|
/* Read errors from child that occurred between fork and exec. */
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuProcessReportLogError(logCtxt,
|
|
|
|
_("Process exited prior to exec"));
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
|
qemu_domain_namespace: Repurpose qemuDomainBuildNamespace()
Okay, here is the deal. Currently, the way we build namespace is
very fragile. It is done from pre-exec hook when starting a
domain, after we mass closed all FDs and before we drop
privileges and exec() QEMU. This fact poses some limitations onto
the namespace build code, e.g. it has to make sure not to keep
any FD opened (not even through a library call), because it would
be leaked to QEMU. Also, it has to call only async signal safe
functions. These requirements are hard to meet - in fact as of my
commit v6.2.0-rc1~235 we are leaking a FD into QEMU by calling
libdevmapper functions.
To solve this issue and avoid similar problems in the future, we
should change our paradigm. We already have functions which can
populate domain's namespace with nodes from the daemon context.
If we use them to populate the namespace and keep only the bare
minimum in the pre-exec hook, we've mitigated the risk.
Therefore, the old qemuDomainBuildNamespace() is renamed to
qemuDomainUnshareNamespace() and new qemuDomainBuildNamespace()
function is introduced. So far, the new function is basically a
NOP and domain's namespace is still populated from the pre-exec
hook - next patches will fix it.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2020-07-21 16:12:26 +00:00
|
|
|
VIR_DEBUG("Building domain mount namespace (if required)");
|
2020-07-21 15:13:11 +00:00
|
|
|
if (qemuDomainBuildNamespace(cfg, vm) < 0)
|
qemu_domain_namespace: Repurpose qemuDomainBuildNamespace()
Okay, here is the deal. Currently, the way we build namespace is
very fragile. It is done from pre-exec hook when starting a
domain, after we mass closed all FDs and before we drop
privileges and exec() QEMU. This fact poses some limitations onto
the namespace build code, e.g. it has to make sure not to keep
any FD opened (not even through a library call), because it would
be leaked to QEMU. Also, it has to call only async signal safe
functions. These requirements are hard to meet - in fact as of my
commit v6.2.0-rc1~235 we are leaking a FD into QEMU by calling
libdevmapper functions.
To solve this issue and avoid similar problems in the future, we
should change our paradigm. We already have functions which can
populate domain's namespace with nodes from the daemon context.
If we use them to populate the namespace and keep only the bare
minimum in the pre-exec hook, we've mitigated the risk.
Therefore, the old qemuDomainBuildNamespace() is renamed to
qemuDomainUnshareNamespace() and new qemuDomainBuildNamespace()
function is introduced. So far, the new function is basically a
NOP and domain's namespace is still populated from the pre-exec
hook - next patches will fix it.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
2020-07-21 16:12:26 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-07-22 14:21:15 +00:00
|
|
|
VIR_DEBUG("Setting up domain cgroup (if required)");
|
2017-07-25 15:49:43 +00:00
|
|
|
if (qemuSetupCgroup(vm, nnicindexes, nicindexes) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2013-07-22 14:21:15 +00:00
|
|
|
|
2020-09-07 14:44:50 +00:00
|
|
|
VIR_DEBUG("Setting up domain perf (if required)");
|
|
|
|
if (qemuProcessEnablePerf(vm) < 0)
|
2016-04-27 12:40:23 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-01-14 15:56:53 +00:00
|
|
|
/* This must be done after cgroup placement to avoid resetting CPU
|
|
|
|
* affinity */
|
2019-01-30 08:46:23 +00:00
|
|
|
if (qemuProcessInitCpuAffinity(vm) < 0)
|
2016-02-26 15:34:23 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2019-04-10 15:14:25 +00:00
|
|
|
VIR_DEBUG("Setting emulator tuning/settings");
|
|
|
|
if (qemuProcessSetupEmulator(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-04-05 19:06:55 +00:00
|
|
|
VIR_DEBUG("Setting cgroup for external devices (if required)");
|
|
|
|
if (qemuSetupCgroupForExtDevices(vm, driver) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-01-27 18:01:36 +00:00
|
|
|
VIR_DEBUG("Setting up resctrl");
|
2019-12-10 10:24:19 +00:00
|
|
|
if (qemuProcessResctrlCreate(driver, vm) < 0)
|
2017-11-10 12:21:51 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-05-11 13:40:34 +00:00
|
|
|
VIR_DEBUG("Setting up managed PR daemon");
|
2018-05-14 05:53:09 +00:00
|
|
|
if (virDomainDefHasManagedPR(vm->def) &&
|
|
|
|
qemuProcessStartManagedPRDaemon(vm) < 0)
|
2018-04-19 08:00:36 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Setting domain security labels");
|
2017-01-05 13:19:21 +00:00
|
|
|
if (qemuSecuritySetAllLabel(driver,
|
|
|
|
vm,
|
2019-09-11 05:53:09 +00:00
|
|
|
incoming ? incoming->path : NULL,
|
|
|
|
incoming != NULL) < 0)
|
2017-01-05 13:19:21 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* Security manager labeled all devices, therefore
|
2015-11-10 15:58:41 +00:00
|
|
|
* if any operation from now on fails, we need to ask the caller to
|
|
|
|
* restore labels.
|
|
|
|
*/
|
|
|
|
ret = -2;
|
2012-06-11 13:57:19 +00:00
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if (incoming && incoming->fd != -1) {
|
2010-10-26 14:04:46 +00:00
|
|
|
/* if there's an fd to migrate from, and it's a pipe, put the
|
|
|
|
* proper security label on it
|
|
|
|
*/
|
|
|
|
struct stat stdin_sb;
|
|
|
|
|
|
|
|
VIR_DEBUG("setting security label on pipe used for migration");
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if (fstat(incoming->fd, &stdin_sb) < 0) {
|
2010-10-26 14:04:46 +00:00
|
|
|
virReportSystemError(errno,
|
2015-11-10 15:58:41 +00:00
|
|
|
_("cannot stat fd %d"), incoming->fd);
|
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
if (S_ISFIFO(stdin_sb.st_mode) &&
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecuritySetImageFDLabel(driver->securityManager,
|
|
|
|
vm->def, incoming->fd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Labelling done, completing handshake to child");
|
2014-11-13 14:25:30 +00:00
|
|
|
if (virCommandHandshakeNotify(cmd) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_DEBUG("Handshake complete, child running");
|
|
|
|
|
2020-02-12 12:26:11 +00:00
|
|
|
if (qemuDomainObjStartWorker(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Waiting for monitor to show up");
|
2017-07-10 21:30:03 +00:00
|
|
|
if (qemuProcessWaitForMonitor(driver, vm, asyncJob, logCtxt) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-11-16 13:43:01 +00:00
|
|
|
if (qemuConnectAgent(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
VIR_DEBUG("setting up hotpluggable cpus");
|
|
|
|
if (qemuDomainHasHotpluggableStartupVcpus(vm->def)) {
|
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessValidateHotpluggableVcpus(vm->def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessSetupHotpluggableVcpus(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-07-19 14:00:29 +00:00
|
|
|
VIR_DEBUG("Refreshing VCPU info");
|
2016-08-05 12:48:27 +00:00
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, vm, asyncJob, false) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-08-01 05:35:50 +00:00
|
|
|
if (qemuDomainValidateVcpuInfo(vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-04 12:23:25 +00:00
|
|
|
qemuDomainVcpuPersistOrder(vm->def);
|
|
|
|
|
2022-02-07 09:54:43 +00:00
|
|
|
VIR_DEBUG("Verifying and updating provided guest CPU");
|
|
|
|
if (qemuProcessUpdateAndVerifyCPU(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2014-09-03 13:07:38 +00:00
|
|
|
VIR_DEBUG("Detecting IOThread PIDs");
|
|
|
|
if (qemuProcessDetectIOThreadPIDs(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2014-09-03 13:07:38 +00:00
|
|
|
|
2016-02-16 13:43:37 +00:00
|
|
|
VIR_DEBUG("Setting global CPU cgroup (if required)");
|
2022-01-25 16:19:53 +00:00
|
|
|
if (virDomainCgroupSetupGlobalCpuCgroup(vm, priv->cgroup, priv->autoNodeset) < 0)
|
2016-02-16 13:43:37 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-01-13 15:36:52 +00:00
|
|
|
VIR_DEBUG("Setting vCPU tuning/settings");
|
|
|
|
if (qemuProcessSetupVcpus(vm) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-07-21 02:10:31 +00:00
|
|
|
|
2016-01-14 09:38:02 +00:00
|
|
|
VIR_DEBUG("Setting IOThread tuning/settings");
|
|
|
|
if (qemuProcessSetupIOThreads(vm) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-01-08 14:37:50 +00:00
|
|
|
|
2019-05-22 08:40:58 +00:00
|
|
|
VIR_DEBUG("Setting emulator scheduler");
|
|
|
|
if (vm->def->cputune.emulatorsched &&
|
|
|
|
virProcessSetScheduler(vm->pid,
|
|
|
|
vm->def->cputune.emulatorsched->policy,
|
|
|
|
vm->def->cputune.emulatorsched->priority) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting any required VM passwords");
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuProcessInitPasswords(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2011-09-06 08:23:47 +00:00
|
|
|
/* set default link states */
|
|
|
|
/* qemu doesn't support setting this on the command line, so
|
|
|
|
* enter the monitor */
|
|
|
|
VIR_DEBUG("Setting network link states");
|
2015-11-02 09:50:21 +00:00
|
|
|
if (qemuProcessSetLinkStates(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-09-06 08:23:47 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Setting initial memory amount");
|
2015-11-02 10:00:49 +00:00
|
|
|
if (qemuProcessSetupBalloon(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2018-06-13 15:21:02 +00:00
|
|
|
if (qemuProcessSetupDiskThrottlingBlockdev(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2015-06-30 14:31:24 +00:00
|
|
|
/* Since CPUs were not started yet, the balloon could not return the memory
|
2015-05-27 13:04:14 +00:00
|
|
|
* to the host and thus cur_balloon needs to be updated so that GetXMLdesc
|
|
|
|
* and friends return the correct size in case they can't grab the job */
|
2015-11-06 17:41:37 +00:00
|
|
|
if (!incoming && !snapshot &&
|
2015-09-23 12:19:06 +00:00
|
|
|
qemuProcessRefreshBalloonState(driver, vm, asyncJob) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
2015-05-27 13:04:14 +00:00
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_START_AUTODESTROY &&
|
|
|
|
qemuProcessAutoDestroyAdd(driver, vm, conn) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2021-04-28 09:03:19 +00:00
|
|
|
if (!incoming && !snapshot) {
|
|
|
|
VIR_DEBUG("Setting up transient disk");
|
2021-04-27 18:31:11 +00:00
|
|
|
if (qemuProcessSetupDisksTransient(vm, asyncJob) < 0)
|
2021-04-28 09:03:19 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2020-09-17 13:30:45 +00:00
|
|
|
|
2021-08-23 14:40:46 +00:00
|
|
|
VIR_DEBUG("Setting handling of lifecycle actions");
|
|
|
|
if (qemuProcessSetupLifecycleActions(vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2016-05-02 17:31:47 +00:00
|
|
|
qemuDomainSecretDestroy(vm);
|
2015-11-10 15:58:41 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-25 14:16:08 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessRefreshState:
|
|
|
|
* @driver: qemu driver data
|
|
|
|
* @vm: domain to refresh
|
|
|
|
* @asyncJob: async job type
|
|
|
|
*
|
|
|
|
* This function gathers calls to refresh qemu state after startup. This
|
|
|
|
* function is called after a deferred migration finishes so that we can update
|
|
|
|
* state influenced by the migration stream.
|
|
|
|
*/
|
2018-02-01 14:02:17 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshState(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-09-25 14:16:08 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-07-24 03:04:25 +00:00
|
|
|
|
2017-09-25 14:16:08 +00:00
|
|
|
VIR_DEBUG("Fetching list of active devices");
|
|
|
|
if (qemuDomainUpdateDeviceList(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("Updating info of memory devices");
|
|
|
|
if (qemuDomainUpdateMemoryDeviceInfo(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("Detecting actual memory size for video device");
|
|
|
|
if (qemuProcessUpdateVideoRamSize(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("Updating disk data");
|
|
|
|
if (qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2019-07-24 03:04:25 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
|
|
|
qemuBlockNodeNamesDetect(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2017-09-25 14:16:08 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 12:29:40 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessFinishStartup:
|
|
|
|
*
|
|
|
|
* Finish starting a new domain.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessFinishStartup(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2015-11-10 12:29:40 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
bool startCPUs,
|
|
|
|
virDomainPausedReason pausedReason)
|
|
|
|
{
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2015-11-10 12:29:40 +00:00
|
|
|
|
|
|
|
if (startCPUs) {
|
|
|
|
VIR_DEBUG("Starting domain CPUs");
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2015-11-10 12:29:40 +00:00
|
|
|
VIR_DOMAIN_RUNNING_BOOTED,
|
|
|
|
asyncJob) < 0) {
|
2018-05-05 12:04:21 +00:00
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
2015-11-10 12:29:40 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("resume operation failed"));
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2015-11-10 12:29:40 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, pausedReason);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Writing domain status to disk");
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2015-11-10 12:29:40 +00:00
|
|
|
|
|
|
|
if (qemuProcessStartHook(driver, vm,
|
|
|
|
VIR_HOOK_QEMU_OP_STARTED,
|
|
|
|
VIR_HOOK_SUBOP_BEGIN) < 0)
|
2019-12-20 21:16:31 +00:00
|
|
|
return -1;
|
2015-11-10 12:29:40 +00:00
|
|
|
|
2019-12-20 21:16:31 +00:00
|
|
|
return 0;
|
2015-11-10 12:29:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
int
|
|
|
|
qemuProcessStart(virConnectPtr conn,
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
|
|
|
virCPUDef *updatedCPU,
|
2015-11-10 15:58:41 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
const char *migrateFrom,
|
|
|
|
int migrateFd,
|
|
|
|
const char *migratePath,
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainMomentObj *snapshot,
|
2015-11-10 15:58:41 +00:00
|
|
|
virNetDevVPortProfileOp vmop,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
qemuProcessIncomingDef *incoming = NULL;
|
2015-11-10 15:58:41 +00:00
|
|
|
unsigned int stopFlags;
|
|
|
|
bool relabel = false;
|
qemu: Use qemuSecuritySetSavedStateLabel() to label restore path
Currently, when restoring from a domain the path that the domain
restores from is labelled under qemuSecuritySetAllLabel() (and after
v6.3.0-rc1~108 even outside transactions). While this grants QEMU
the access, it has a flaw, because once the domain is restored, up
and running then qemuSecurityDomainRestorePathLabel() is called,
which is not real counterpart. In case of DAC driver the
SetAllLabel() does nothing with the restore path but
RestorePathLabel() does - it chown()-s the file back and since there
is no original label remembered, the file is chown()-ed to
root:root. While the apparent solution is to have DAC driver set the
label (and thus remember the original one) in SetAllLabel(), we can
do better.
Turns out, we are opening the file ourselves (because it may live on
a root squashed NFS) and then are just passing the FD to QEMU. But
this means, that we don't have to chown() the file at all, we need
to set SELinux labels and/or add the path to AppArmor profile.
And since we want to restore labels right after QEMU is done loading
the migration stream (we don't want to wait until
qemuSecurityRestoreAllLabel()), the best way to approach this is to
have separate APIs for labelling and restoring label on the restore
file.
I will investigate whether AppArmor can use the SavedStateLabel()
API instead of passing the restore path to SetAllLabel().
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1851016
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2020-06-27 04:28:17 +00:00
|
|
|
bool relabelSavedState = false;
|
2015-11-10 15:58:41 +00:00
|
|
|
int ret = -1;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
VIR_DEBUG("conn=%p driver=%p vm=%p name=%s id=%d asyncJob=%s "
|
|
|
|
"migrateFrom=%s migrateFd=%d migratePath=%s "
|
|
|
|
"snapshot=%p vmop=%d flags=0x%x",
|
|
|
|
conn, driver, vm, vm->def->name, vm->def->id,
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob),
|
|
|
|
NULLSTR(migrateFrom), migrateFd, NULLSTR(migratePath),
|
|
|
|
snapshot, vmop, flags);
|
|
|
|
|
|
|
|
virCheckFlagsGoto(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
2018-04-09 14:31:17 +00:00
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY |
|
2022-02-10 09:52:37 +00:00
|
|
|
VIR_QEMU_PROCESS_START_GEN_VMID |
|
|
|
|
VIR_QEMU_PROCESS_START_RESET_NVRAM, cleanup);
|
2015-11-10 15:58:41 +00:00
|
|
|
|
2016-05-27 11:45:05 +00:00
|
|
|
if (!migrateFrom && !snapshot)
|
|
|
|
flags |= VIR_QEMU_PROCESS_START_NEW;
|
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuProcessInit(driver, vm, updatedCPU,
|
|
|
|
asyncJob, !!migrateFrom, flags) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (migrateFrom) {
|
2016-01-07 23:07:37 +00:00
|
|
|
incoming = qemuProcessIncomingDefNew(priv->qemuCaps, NULL, migrateFrom,
|
2015-11-10 15:58:41 +00:00
|
|
|
migrateFd, migratePath);
|
|
|
|
if (!incoming)
|
|
|
|
goto stop;
|
|
|
|
}
|
|
|
|
|
2018-02-09 16:36:24 +00:00
|
|
|
if (qemuProcessPrepareDomain(driver, vm, flags) < 0)
|
2016-03-15 12:00:59 +00:00
|
|
|
goto stop;
|
|
|
|
|
2017-10-03 08:14:21 +00:00
|
|
|
if (qemuProcessPrepareHost(driver, vm, flags) < 0)
|
2016-03-22 12:16:05 +00:00
|
|
|
goto stop;
|
|
|
|
|
qemu: Use qemuSecuritySetSavedStateLabel() to label restore path
Currently, when restoring from a domain the path that the domain
restores from is labelled under qemuSecuritySetAllLabel() (and after
v6.3.0-rc1~108 even outside transactions). While this grants QEMU
the access, it has a flaw, because once the domain is restored, up
and running then qemuSecurityDomainRestorePathLabel() is called,
which is not real counterpart. In case of DAC driver the
SetAllLabel() does nothing with the restore path but
RestorePathLabel() does - it chown()-s the file back and since there
is no original label remembered, the file is chown()-ed to
root:root. While the apparent solution is to have DAC driver set the
label (and thus remember the original one) in SetAllLabel(), we can
do better.
Turns out, we are opening the file ourselves (because it may live on
a root squashed NFS) and then are just passing the FD to QEMU. But
this means, that we don't have to chown() the file at all, we need
to set SELinux labels and/or add the path to AppArmor profile.
And since we want to restore labels right after QEMU is done loading
the migration stream (we don't want to wait until
qemuSecurityRestoreAllLabel()), the best way to approach this is to
have separate APIs for labelling and restoring label on the restore
file.
I will investigate whether AppArmor can use the SavedStateLabel()
API instead of passing the restore path to SetAllLabel().
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1851016
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2020-06-27 04:28:17 +00:00
|
|
|
if (migratePath) {
|
|
|
|
if (qemuSecuritySetSavedStateLabel(driver->securityManager,
|
|
|
|
vm->def, migratePath) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
relabelSavedState = true;
|
|
|
|
}
|
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
if ((rv = qemuProcessLaunch(conn, driver, vm, asyncJob, incoming,
|
|
|
|
snapshot, vmop, flags)) < 0) {
|
2016-03-14 15:54:03 +00:00
|
|
|
if (rv == -2)
|
2015-11-10 15:58:41 +00:00
|
|
|
relabel = true;
|
|
|
|
goto stop;
|
|
|
|
}
|
|
|
|
relabel = true;
|
2014-12-10 14:31:23 +00:00
|
|
|
|
2019-02-04 12:36:24 +00:00
|
|
|
if (incoming) {
|
|
|
|
if (incoming->deferredURI &&
|
|
|
|
qemuMigrationDstRun(driver, vm, incoming->deferredURI, asyncJob) < 0)
|
|
|
|
goto stop;
|
|
|
|
} else {
|
|
|
|
/* Refresh state of devices from QEMU. During migration this happens
|
|
|
|
* in qemuMigrationDstFinish to ensure that state information is fully
|
|
|
|
* transferred. */
|
|
|
|
if (qemuProcessRefreshState(driver, vm, asyncJob) < 0)
|
|
|
|
goto stop;
|
|
|
|
}
|
2015-11-11 17:02:23 +00:00
|
|
|
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessFinishStartup(driver, vm, asyncJob,
|
2015-11-10 12:29:40 +00:00
|
|
|
!(flags & VIR_QEMU_PROCESS_START_PAUSED),
|
|
|
|
incoming ?
|
|
|
|
VIR_DOMAIN_PAUSED_MIGRATION :
|
|
|
|
VIR_DOMAIN_PAUSED_USER) < 0)
|
2015-11-10 15:58:41 +00:00
|
|
|
goto stop;
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
|
2018-02-01 14:02:17 +00:00
|
|
|
if (!incoming) {
|
|
|
|
/* Keep watching qemu log for errors during incoming migration, otherwise
|
|
|
|
* unset reporting errors from qemu log. */
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
|
2018-02-01 14:02:17 +00:00
|
|
|
}
|
|
|
|
|
2015-10-20 12:30:52 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
qemu: Use qemuSecuritySetSavedStateLabel() to label restore path
Currently, when restoring from a domain the path that the domain
restores from is labelled under qemuSecuritySetAllLabel() (and after
v6.3.0-rc1~108 even outside transactions). While this grants QEMU
the access, it has a flaw, because once the domain is restored, up
and running then qemuSecurityDomainRestorePathLabel() is called,
which is not real counterpart. In case of DAC driver the
SetAllLabel() does nothing with the restore path but
RestorePathLabel() does - it chown()-s the file back and since there
is no original label remembered, the file is chown()-ed to
root:root. While the apparent solution is to have DAC driver set the
label (and thus remember the original one) in SetAllLabel(), we can
do better.
Turns out, we are opening the file ourselves (because it may live on
a root squashed NFS) and then are just passing the FD to QEMU. But
this means, that we don't have to chown() the file at all, we need
to set SELinux labels and/or add the path to AppArmor profile.
And since we want to restore labels right after QEMU is done loading
the migration stream (we don't want to wait until
qemuSecurityRestoreAllLabel()), the best way to approach this is to
have separate APIs for labelling and restoring label on the restore
file.
I will investigate whether AppArmor can use the SavedStateLabel()
API instead of passing the restore path to SetAllLabel().
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1851016
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2020-06-27 04:28:17 +00:00
|
|
|
if (relabelSavedState &&
|
|
|
|
qemuSecurityRestoreSavedStateLabel(driver->securityManager,
|
|
|
|
vm->def, migratePath) < 0)
|
|
|
|
VIR_WARN("failed to restore save state label on %s", migratePath);
|
2015-11-06 17:41:37 +00:00
|
|
|
qemuProcessIncomingDefFree(incoming);
|
2015-10-20 12:30:52 +00:00
|
|
|
return ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2015-11-10 15:58:41 +00:00
|
|
|
stop:
|
|
|
|
stopFlags = 0;
|
|
|
|
if (!relabel)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
|
|
|
if (migrateFrom)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2015-11-12 11:01:07 +00:00
|
|
|
if (priv->mon)
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, asyncJob, stopFlags);
|
2015-10-20 12:30:52 +00:00
|
|
|
goto cleanup;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-15 12:39:16 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessCreatePretendCmdPrepare(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2020-10-15 12:39:16 +00:00
|
|
|
const char *migrateURI,
|
|
|
|
unsigned int flags)
|
2016-03-22 12:17:27 +00:00
|
|
|
{
|
2019-12-20 21:16:32 +00:00
|
|
|
virCheckFlags(VIR_QEMU_PROCESS_START_COLD |
|
|
|
|
VIR_QEMU_PROCESS_START_PAUSED |
|
2020-10-15 12:39:16 +00:00
|
|
|
VIR_QEMU_PROCESS_START_AUTODESTROY, -1);
|
2016-03-22 12:17:27 +00:00
|
|
|
|
|
|
|
flags |= VIR_QEMU_PROCESS_START_PRETEND;
|
2020-11-18 17:56:24 +00:00
|
|
|
|
|
|
|
if (!migrateURI)
|
|
|
|
flags |= VIR_QEMU_PROCESS_START_NEW;
|
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuProcessInit(driver, vm, NULL, QEMU_ASYNC_JOB_NONE,
|
|
|
|
!!migrateURI, flags) < 0)
|
2020-10-15 12:39:16 +00:00
|
|
|
return -1;
|
2016-03-22 12:17:27 +00:00
|
|
|
|
2018-02-09 16:36:24 +00:00
|
|
|
if (qemuProcessPrepareDomain(driver, vm, flags) < 0)
|
2020-10-15 12:39:16 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
virCommand *
|
|
|
|
qemuProcessCreatePretendCmdBuild(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2020-10-15 12:39:16 +00:00
|
|
|
const char *migrateURI,
|
|
|
|
bool enableFips,
|
2021-09-24 17:15:22 +00:00
|
|
|
bool standalone)
|
2020-10-15 12:39:16 +00:00
|
|
|
{
|
2016-03-22 12:17:27 +00:00
|
|
|
VIR_DEBUG("Building emulator command line");
|
2019-12-20 21:16:32 +00:00
|
|
|
return qemuBuildCommandLine(driver,
|
|
|
|
vm,
|
|
|
|
migrateURI,
|
|
|
|
NULL,
|
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_NO_OP,
|
|
|
|
standalone,
|
|
|
|
enableFips,
|
|
|
|
NULL,
|
2020-05-15 12:24:21 +00:00
|
|
|
NULL,
|
2021-09-24 17:15:22 +00:00
|
|
|
0);
|
2016-03-22 12:17:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: drop driver lock while trying to terminate qemu process
This patch is based on an earlier patch by Eric Blake which was never
committed:
https://www.redhat.com/archives/libvir-list/2011-November/msg00243.html
Aside from rebasing, this patch only drops the driver lock once (prior
to the first time the function sleeps), then leaves it dropped until
it returns (Eric's patch would drop and re-acquire the lock around
each call to sleep).
At the time Eric sent his patch, the response (from Dan Berrange) was
that, while it wasn't a good thing to be holding the driver lock while
sleeping, we really need to rethink locking wrt the driver object,
switching to a finer-grained approach that locks individual items
within the driver object separately to allow for greater concurrency.
This is a good plan, and at the time it made sense to not apply the
patch because there was no known bug related to the driver lock being
held in this function.
However, we now know that the length of the wait in qemuProcessKill is
sometimes too short to allow the qemu process to fully flush its disk
cache before SIGKILL is sent, so we need to lengthen the timeout (in
order to improve the situation with management applications until they
can be updated to use the new VIR_DOMAIN_DESTROY_GRACEFUL flag added
in commit 72f8a7f19753506ed957b78ad800c0f3892c9304). But, if we
lengthen the timeout, we also lengthen the amount of time that all
other threads in libvirtd are essentially blocked from doing anything
(since just about everything needs to acquire the driver lock, if only
for long enough to get a pointer to a domain).
The solution is to modify qemuProcessKill to drop the driver lock
while sleeping, as proposed in Eric's patch. Then we can increase the
timeout with a clear conscience, and thus at least lower the chances
that someone running with existing management software will suffer the
consequence's of qemu's disk cache not being flushed.
In the meantime, we still should work on Dan's proposal to make
locking within the driver object more fine grained.
(NB: although I couldn't find any instance where qemuProcessKill() was
called with no jobs active for the domain (or some other guarantee
that the current thread had at least one refcount on the domain
object), this patch still follows Eric's method of temporarily adding
a ref prior to unlocking the domain object, because I couldn't
convince myself 100% that this was the case.)
2012-02-07 16:13:57 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessKill(virDomainObj *vm, unsigned int flags)
|
2011-04-21 15:19:06 +00:00
|
|
|
{
|
2017-09-25 10:43:33 +00:00
|
|
|
VIR_DEBUG("vm=%p name=%s pid=%lld flags=0x%x",
|
2013-10-31 11:28:46 +00:00
|
|
|
vm, vm->def->name,
|
2018-04-25 12:42:34 +00:00
|
|
|
(long long)vm->pid, flags);
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2012-03-30 06:21:49 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_KILL_NOCHECK)) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("VM '%s' not active", vm->def->name);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
2013-05-17 14:22:46 +00:00
|
|
|
if (flags & VIR_QEMU_PROCESS_KILL_NOWAIT) {
|
2012-09-26 14:42:58 +00:00
|
|
|
virProcessKill(vm->pid,
|
|
|
|
(flags & VIR_QEMU_PROCESS_KILL_FORCE) ?
|
|
|
|
SIGKILL : SIGTERM);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2018-08-02 07:05:18 +00:00
|
|
|
/* Request an extra delay of two seconds per current nhostdevs
|
|
|
|
* to be safe against stalls by the kernel freeing up the resources */
|
2019-10-17 08:10:10 +00:00
|
|
|
return virProcessKillPainfullyDelay(vm->pid,
|
|
|
|
!!(flags & VIR_QEMU_PROCESS_KILL_FORCE),
|
2021-06-23 09:34:57 +00:00
|
|
|
vm->def->nhostdevs * 2,
|
|
|
|
false);
|
2011-04-21 15:19:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-11 14:13:09 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessBeginStopJob:
|
|
|
|
*
|
|
|
|
* Stop all current jobs by killing the domain and start a new one for
|
|
|
|
* qemuProcessStop.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessBeginStopJob(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2016-02-11 14:13:09 +00:00
|
|
|
qemuDomainJob job,
|
|
|
|
bool forceKill)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-02-11 14:13:09 +00:00
|
|
|
unsigned int killFlags = forceKill ? VIR_QEMU_PROCESS_KILL_FORCE : 0;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* We need to prevent monitor EOF callback from doing our work (and
|
|
|
|
* sending misleading events) while the vm is unlocked inside
|
|
|
|
* BeginJob/ProcessKill API
|
|
|
|
*/
|
|
|
|
priv->beingDestroyed = true;
|
|
|
|
|
|
|
|
if (qemuProcessKill(vm, killFlags) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Wake up anything waiting on domain condition */
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, job) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
priv->beingDestroyed = false;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
void qemuProcessStop(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2012-06-11 13:20:44 +00:00
|
|
|
virDomainShutoffReason reason,
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
2012-06-11 13:20:44 +00:00
|
|
|
unsigned int flags)
|
2011-02-14 16:09:39 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int retries = 0;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2011-02-14 16:09:39 +00:00
|
|
|
virErrorPtr orig_err;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDef *def = vm->def;
|
2019-10-01 17:56:35 +00:00
|
|
|
const virNetDevVPortProfile *vport = NULL;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *timestamp = NULL;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
g_autoptr(virConnect) conn = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-10-06 14:54:41 +00:00
|
|
|
VIR_DEBUG("Shutting down vm=%p name=%s id=%d pid=%lld, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"reason=%s, asyncJob=%s, flags=0x%x",
|
2013-10-31 11:28:46 +00:00
|
|
|
vm, vm->def->name, vm->def->id,
|
2018-04-25 12:42:34 +00:00
|
|
|
(long long)vm->pid,
|
2016-02-11 10:20:28 +00:00
|
|
|
virDomainShutoffReasonTypeToString(reason),
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob),
|
|
|
|
flags);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-01-07 13:31:13 +00:00
|
|
|
/* This method is routinely used in clean up paths. Disable error
|
|
|
|
* reporting so we don't squash a legit error. */
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2014-01-07 13:31:13 +00:00
|
|
|
|
2016-02-11 10:20:28 +00:00
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
|
|
|
|
if (qemuDomainObjBeginNestedJob(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else if (priv->job.asyncJob != QEMU_ASYNC_JOB_NONE &&
|
|
|
|
priv->job.asyncOwner == virThreadSelfID() &&
|
|
|
|
priv->job.active != QEMU_JOB_ASYNC_NESTED) {
|
|
|
|
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
|
|
|
|
qemuDomainAsyncJobTypeToString(asyncJob));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
VIR_DEBUG("VM '%s' not active", vm->def->name);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2017-11-07 14:19:43 +00:00
|
|
|
qemuProcessBuildDestroyMemoryPaths(driver, vm, NULL, false);
|
2016-11-22 12:21:51 +00:00
|
|
|
|
2020-01-31 16:12:11 +00:00
|
|
|
if (!!g_atomic_int_dec_and_test(&driver->nactive) && driver->inhibitCallback)
|
2012-10-31 19:03:55 +00:00
|
|
|
driver->inhibitCallback(false, driver->inhibitOpaque);
|
|
|
|
|
2016-06-07 14:31:15 +00:00
|
|
|
if ((timestamp = virTimeStringNow()) != NULL) {
|
2016-09-19 08:17:57 +00:00
|
|
|
qemuDomainLogAppendMessage(driver, vm, "%s: shutting down, reason=%s\n",
|
|
|
|
timestamp,
|
|
|
|
virDomainShutoffReasonTypeToString(reason));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2014-11-18 23:55:48 +00:00
|
|
|
/* Clear network bandwidth */
|
2020-02-17 17:37:25 +00:00
|
|
|
virDomainClearNetBandwidth(vm->def);
|
2014-11-18 23:55:48 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainConfVMNWFilterTeardown(vm);
|
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
if (cfg->macFilter) {
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainNetDef *net = def->nets[i];
|
2011-02-14 16:09:39 +00:00
|
|
|
if (net->ifname == NULL)
|
|
|
|
continue;
|
2014-03-07 17:34:54 +00:00
|
|
|
ignore_value(ebtablesRemoveForwardAllowIn(driver->ebtables,
|
|
|
|
net->ifname,
|
|
|
|
&net->mac));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(priv->nbdPort);
|
2013-07-04 19:16:57 +00:00
|
|
|
priv->nbdPort = 0;
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
if (priv->agent) {
|
2022-01-28 17:42:45 +00:00
|
|
|
g_clear_pointer(&priv->agent, qemuAgentClose);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
2016-11-16 13:43:03 +00:00
|
|
|
priv->agentError = false;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
Fix (rare) deadlock in QEMU monitor callbacks
Some users report (very rarely) seeing a deadlock in the QEMU
monitor callbacks
Thread 10 (Thread 0x7fcd11e20700 (LWP 26753)):
#0 0x00000030d0e0de4d in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00000030d0e09ca6 in _L_lock_840 () from /lib64/libpthread.so.0
#2 0x00000030d0e09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3 0x00007fcd162f416d in virMutexLock (m=<optimized out>)
at util/threads-pthread.c:85
#4 0x00007fcd1632c651 in virDomainObjLock (obj=<optimized out>)
at conf/domain_conf.c:14256
#5 0x00007fcd0daf05cc in qemuProcessHandleMonitorDestroy (mon=0x7fcccc0029e0,
vm=0x7fcccc00a850) at qemu/qemu_process.c:1026
#6 0x00007fcd0db01710 in qemuMonitorDispose (obj=0x7fcccc0029e0)
at qemu/qemu_monitor.c:249
#7 0x00007fcd162fd4e3 in virObjectUnref (anyobj=<optimized out>)
at util/virobject.c:139
#8 0x00007fcd0db027a9 in qemuMonitorClose (mon=<optimized out>)
at qemu/qemu_monitor.c:860
#9 0x00007fcd0daf61ad in qemuProcessStop (driver=driver@entry=0x7fcd04079d50,
vm=vm@entry=0x7fcccc00a850,
reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED, flags=flags@entry=0)
at qemu/qemu_process.c:4057
#10 0x00007fcd0db323cf in qemuDomainDestroyFlags (dom=<optimized out>,
flags=<optimized out>) at qemu/qemu_driver.c:1977
#11 0x00007fcd1637ff51 in virDomainDestroyFlags (
domain=domain@entry=0x7fccf00c1830, flags=1) at libvirt.c:2256
At frame #10 we are holding the domain lock, we call into
qemuProcessStop() to cleanup QEMU, which triggers the monitor
to close, which invokes qemuProcessHandleMonitorDestroy() which
tries to obtain the domain lock again. This is a non-recursive
lock, hence hang.
Since qemuMonitorPtr is a virObject, the unref call in
qemuProcessHandleMonitorDestroy no longer needs mutex
protection. The assignment of priv->mon = NULL, can be
instead done by the caller of qemuMonitorClose(), thus
removing all need for locking.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:54:58 +00:00
|
|
|
if (priv->mon) {
|
2022-01-28 17:42:45 +00:00
|
|
|
g_clear_pointer(&priv->mon, qemuMonitorClose);
|
Fix (rare) deadlock in QEMU monitor callbacks
Some users report (very rarely) seeing a deadlock in the QEMU
monitor callbacks
Thread 10 (Thread 0x7fcd11e20700 (LWP 26753)):
#0 0x00000030d0e0de4d in __lll_lock_wait () from /lib64/libpthread.so.0
#1 0x00000030d0e09ca6 in _L_lock_840 () from /lib64/libpthread.so.0
#2 0x00000030d0e09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3 0x00007fcd162f416d in virMutexLock (m=<optimized out>)
at util/threads-pthread.c:85
#4 0x00007fcd1632c651 in virDomainObjLock (obj=<optimized out>)
at conf/domain_conf.c:14256
#5 0x00007fcd0daf05cc in qemuProcessHandleMonitorDestroy (mon=0x7fcccc0029e0,
vm=0x7fcccc00a850) at qemu/qemu_process.c:1026
#6 0x00007fcd0db01710 in qemuMonitorDispose (obj=0x7fcccc0029e0)
at qemu/qemu_monitor.c:249
#7 0x00007fcd162fd4e3 in virObjectUnref (anyobj=<optimized out>)
at util/virobject.c:139
#8 0x00007fcd0db027a9 in qemuMonitorClose (mon=<optimized out>)
at qemu/qemu_monitor.c:860
#9 0x00007fcd0daf61ad in qemuProcessStop (driver=driver@entry=0x7fcd04079d50,
vm=vm@entry=0x7fcccc00a850,
reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED, flags=flags@entry=0)
at qemu/qemu_process.c:4057
#10 0x00007fcd0db323cf in qemuDomainDestroyFlags (dom=<optimized out>,
flags=<optimized out>) at qemu/qemu_driver.c:1977
#11 0x00007fcd1637ff51 in virDomainDestroyFlags (
domain=domain@entry=0x7fccf00c1830, flags=1) at libvirt.c:2256
At frame #10 we are holding the domain lock, we call into
qemuProcessStop() to cleanup QEMU, which triggers the monitor
to close, which invokes qemuProcessHandleMonitorDestroy() which
tries to obtain the domain lock again. This is a non-recursive
lock, hence hang.
Since qemuMonitorPtr is a virObject, the unref call in
qemuProcessHandleMonitorDestroy no longer needs mutex
protection. The assignment of priv->mon = NULL, can be
instead done by the caller of qemuMonitorClose(), thus
removing all need for locking.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:54:58 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
if (priv->monConfig) {
|
|
|
|
if (priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX)
|
|
|
|
unlink(priv->monConfig->data.nix.path);
|
2022-01-28 17:42:45 +00:00
|
|
|
g_clear_pointer(&priv->monConfig, virObjectUnref);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2020-03-20 11:06:17 +00:00
|
|
|
qemuDomainObjStopWorker(vm);
|
2020-02-12 12:26:11 +00:00
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
/* Remove the master key */
|
|
|
|
qemuDomainMasterKeyRemove(priv);
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
/* Do this before we delete the tree and remove pidfile. */
|
2018-05-11 13:40:34 +00:00
|
|
|
qemuProcessKillManagedPRDaemon(vm);
|
2018-04-19 08:00:36 +00:00
|
|
|
|
2014-05-13 06:54:20 +00:00
|
|
|
ignore_value(virDomainChrDefForeach(vm->def,
|
|
|
|
false,
|
|
|
|
qemuProcessCleanupChardevDevice,
|
|
|
|
NULL));
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* shut it off for sure */
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuProcessKill(vm,
|
|
|
|
VIR_QEMU_PROCESS_KILL_FORCE|
|
|
|
|
VIR_QEMU_PROCESS_KILL_NOCHECK));
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2012-03-16 06:52:26 +00:00
|
|
|
qemuDomainCleanupRun(driver, vm);
|
|
|
|
|
2021-02-19 15:57:41 +00:00
|
|
|
qemuExtDevicesStop(driver, vm);
|
|
|
|
|
2020-02-25 09:55:10 +00:00
|
|
|
qemuDBusStop(driver, vm);
|
|
|
|
|
2019-08-08 14:54:58 +00:00
|
|
|
vm->def->id = -1;
|
|
|
|
|
2021-07-16 13:52:50 +00:00
|
|
|
/* Wake up anything waiting on domain condition */
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
|
2021-02-19 15:57:41 +00:00
|
|
|
virFileDeleteTree(priv->libDir);
|
|
|
|
virFileDeleteTree(priv->channelTargetDir);
|
|
|
|
|
2011-06-23 09:37:57 +00:00
|
|
|
/* Stop autodestroy in case guest is restarted */
|
2013-02-28 16:43:43 +00:00
|
|
|
qemuProcessAutoDestroyRemove(driver, vm);
|
2011-06-23 09:37:57 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* now that we know it's stopped call the hook if present */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *xml = qemuDomainDefFormatXML(driver, NULL, vm->def, 0);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
/* we can't stop the operation even if the script raised an error */
|
2015-09-23 22:13:57 +00:00
|
|
|
ignore_value(virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_STOPPED, VIR_HOOK_SUBOP_END,
|
|
|
|
NULL, xml, NULL));
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2012-06-11 13:57:19 +00:00
|
|
|
/* Reset Security Labels unless caller don't want us to */
|
|
|
|
if (!(flags & VIR_QEMU_PROCESS_STOP_NO_RELABEL))
|
2016-11-23 10:52:57 +00:00
|
|
|
qemuSecurityRestoreAllLabel(driver, vm,
|
|
|
|
!!(flags & VIR_QEMU_PROCESS_STOP_MIGRATED));
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Clear out dynamically assigned labels */
|
2012-08-15 22:10:37 +00:00
|
|
|
for (i = 0; i < vm->def->nseclabels; i++) {
|
2014-11-13 14:25:30 +00:00
|
|
|
if (vm->def->seclabels[i]->type == VIR_DOMAIN_SECLABEL_DYNAMIC)
|
2012-08-15 22:10:37 +00:00
|
|
|
VIR_FREE(vm->def->seclabels[i]->label);
|
|
|
|
VIR_FREE(vm->def->seclabels[i]->imagelabel);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2015-10-20 12:12:48 +00:00
|
|
|
qemuHostdevReAttachDomainDevices(driver, vm->def);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainNetDef *net = def->nets[i];
|
2014-03-11 07:17:26 +00:00
|
|
|
vport = virDomainNetGetActualVirtPortProfile(net);
|
|
|
|
switch (virDomainNetGetActualType(net)) {
|
|
|
|
case VIR_DOMAIN_NET_TYPE_DIRECT:
|
2011-11-02 17:19:48 +00:00
|
|
|
ignore_value(virNetDevMacVLanDeleteWithVPortProfile(
|
2012-07-17 12:07:59 +00:00
|
|
|
net->ifname, &net->mac,
|
2011-11-02 17:19:48 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
|
|
|
virDomainNetGetActualDirectMode(net),
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->stateDir));
|
2014-03-11 07:17:26 +00:00
|
|
|
break;
|
2016-03-23 11:37:59 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_ETHERNET:
|
2019-08-26 04:24:34 +00:00
|
|
|
if (net->managed_tap != VIR_TRISTATE_BOOL_NO && net->ifname) {
|
2016-03-23 11:37:59 +00:00
|
|
|
ignore_value(virNetDevTapDelete(net->ifname, net->backend.tap));
|
|
|
|
VIR_FREE(net->ifname);
|
|
|
|
}
|
|
|
|
break;
|
2014-03-11 07:17:26 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_BRIDGE:
|
2019-04-30 12:26:25 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_NETWORK:
|
2014-03-11 07:17:26 +00:00
|
|
|
#ifdef VIR_NETDEV_TAP_REQUIRE_MANUAL_CLEANUP
|
|
|
|
if (!(vport && vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH))
|
2014-09-11 15:15:24 +00:00
|
|
|
ignore_value(virNetDevTapDelete(net->ifname, net->backend.tap));
|
2014-03-11 07:17:26 +00:00
|
|
|
#endif
|
|
|
|
break;
|
2016-09-23 15:04:53 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_USER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_VHOSTUSER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_SERVER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_CLIENT:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_MCAST:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_INTERNAL:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_HOSTDEV:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_UDP:
|
2020-10-14 17:08:25 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_VDPA:
|
2016-09-23 15:04:53 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_LAST:
|
|
|
|
/* No special cleanup procedure for these types. */
|
|
|
|
break;
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-07-04 06:27:12 +00:00
|
|
|
/* release the physical device (or any other resources used by
|
|
|
|
* this interface in the network driver
|
|
|
|
*/
|
2015-02-23 20:54:56 +00:00
|
|
|
if (vport) {
|
|
|
|
if (vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_MIDONET) {
|
|
|
|
ignore_value(virNetDevMidonetUnbindPort(vport));
|
|
|
|
} else if (vport->virtPortType == VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH) {
|
|
|
|
ignore_value(virNetDevOpenvswitchRemovePort(
|
|
|
|
virDomainNetGetActualBridgeName(net),
|
|
|
|
net->ifname));
|
|
|
|
}
|
|
|
|
}
|
2012-02-10 21:09:00 +00:00
|
|
|
|
2013-08-27 17:06:18 +00:00
|
|
|
/* kick the device out of the hostdev list too */
|
|
|
|
virDomainNetRemoveHostdev(def, net);
|
2018-07-26 14:32:04 +00:00
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_NETWORK) {
|
|
|
|
if (conn || (conn = virGetConnectNetwork()))
|
|
|
|
virDomainNetReleaseActualDevice(conn, vm->def, net);
|
|
|
|
else
|
|
|
|
VIR_WARN("Unable to release network device '%s'", NULLSTR(net->ifname));
|
|
|
|
}
|
2021-07-07 09:18:19 +00:00
|
|
|
|
|
|
|
if (virDomainNetDefIsOvsport(net) &&
|
|
|
|
virNetDevOpenvswitchInterfaceClearQos(net->ifname, vm->def->uuid) < 0) {
|
|
|
|
VIR_WARN("cannot clear bandwidth setting for ovs device : %s",
|
|
|
|
net->ifname);
|
|
|
|
}
|
2011-07-04 06:27:12 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
retry:
|
2022-01-25 16:19:53 +00:00
|
|
|
if ((ret = virDomainCgroupRemoveCgroup(vm, priv->cgroup, priv->machineName)) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
if (ret == -EBUSY && (retries++ < 5)) {
|
2019-10-02 17:01:11 +00:00
|
|
|
g_usleep(200*1000);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
VIR_WARN("Failed to remove cgroup for %s",
|
|
|
|
vm->def->name);
|
|
|
|
}
|
2016-03-28 13:30:29 +00:00
|
|
|
|
2017-11-10 12:21:51 +00:00
|
|
|
/* Remove resctrl allocation after cgroups are cleaned up which makes it
|
|
|
|
* kind of safer (although removing the allocation should work even with
|
|
|
|
* pids in tasks file */
|
2018-11-12 13:31:45 +00:00
|
|
|
for (i = 0; i < vm->def->nresctrls; i++) {
|
|
|
|
size_t j = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < vm->def->resctrls[i]->nmonitors; j++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainResctrlMonDef *mon = NULL;
|
2018-11-12 13:31:45 +00:00
|
|
|
|
|
|
|
mon = vm->def->resctrls[i]->monitors[j];
|
|
|
|
virResctrlMonitorRemove(mon->instance);
|
|
|
|
}
|
|
|
|
|
2018-07-30 03:12:35 +00:00
|
|
|
virResctrlAllocRemove(vm->def->resctrls[i]->alloc);
|
2018-11-12 13:31:45 +00:00
|
|
|
}
|
2017-11-10 12:21:51 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuProcessRemoveDomainStatus(driver, vm);
|
|
|
|
|
2012-11-10 01:40:23 +00:00
|
|
|
/* Remove VNC and Spice ports from port reservation bitmap, but only if
|
|
|
|
they were reserved by the driver (autoport=yes)
|
2011-02-14 16:09:39 +00:00
|
|
|
*/
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; ++i) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainGraphicsDef *graphics = vm->def->graphics[i];
|
2013-04-30 14:26:43 +00:00
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
|
|
|
if (graphics->data.vnc.autoport) {
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.vnc.port);
|
2014-09-03 19:32:36 +00:00
|
|
|
} else if (graphics->data.vnc.portReserved) {
|
2018-07-04 07:30:43 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.vnc.port);
|
2014-06-24 11:34:18 +00:00
|
|
|
graphics->data.vnc.portReserved = false;
|
|
|
|
}
|
2016-11-22 11:09:32 +00:00
|
|
|
if (graphics->data.vnc.websocketGenerated) {
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.vnc.websocket);
|
2016-11-22 11:09:32 +00:00
|
|
|
graphics->data.vnc.websocketGenerated = false;
|
|
|
|
graphics->data.vnc.websocket = -1;
|
|
|
|
} else if (graphics->data.vnc.websocket) {
|
2018-02-06 09:09:10 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.vnc.websocket);
|
2016-11-22 11:09:32 +00:00
|
|
|
}
|
2012-11-10 01:40:23 +00:00
|
|
|
}
|
2014-06-24 11:34:18 +00:00
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
if (graphics->data.spice.autoport) {
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.spice.port);
|
|
|
|
virPortAllocatorRelease(graphics->data.spice.tlsPort);
|
2014-06-24 11:34:18 +00:00
|
|
|
} else {
|
|
|
|
if (graphics->data.spice.portReserved) {
|
2018-02-06 09:09:10 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.spice.port);
|
2014-06-24 11:34:18 +00:00
|
|
|
graphics->data.spice.portReserved = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (graphics->data.spice.tlsPortReserved) {
|
2018-02-06 09:09:10 +00:00
|
|
|
virPortAllocatorRelease(graphics->data.spice.tlsPort);
|
2014-06-24 11:34:18 +00:00
|
|
|
graphics->data.spice.tlsPortReserved = false;
|
|
|
|
}
|
|
|
|
}
|
2012-11-10 01:40:23 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
|
|
|
|
2021-01-22 14:48:03 +00:00
|
|
|
for (i = 0; i < vm->ndeprecations; i++)
|
|
|
|
g_free(vm->deprecations[i]);
|
2022-01-28 17:42:45 +00:00
|
|
|
g_clear_pointer(&vm->deprecations, g_free);
|
2021-01-22 14:48:03 +00:00
|
|
|
vm->ndeprecations = 0;
|
2011-05-04 10:59:20 +00:00
|
|
|
vm->taint = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
vm->pid = -1;
|
2011-05-04 09:07:01 +00:00
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_SHUTOFF, reason);
|
2015-04-10 13:21:23 +00:00
|
|
|
for (i = 0; i < vm->def->niothreadids; i++)
|
|
|
|
vm->def->iothreadids[i]->thread_id = 0;
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2021-03-17 15:30:31 +00:00
|
|
|
/* clean up a possible backup job */
|
|
|
|
if (priv->backup)
|
|
|
|
qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED);
|
|
|
|
|
qemuProcessStop: Remove image metadata for running mirror jobs
If user starts a blockcommit or a blockcopy then we modify access
for qemu on both images and leave it like that until the job
terminates. So far so good. Problem is, if user instead of
terminating the job (where we would modify the access again so
that the state before the job is restored) calls destroy on the
domain or if qemu dies whilst executing the block job. In this
case we don't ever clear the access we granted at the beginning.
To fix this, maybe a bit harsh approach is used, but it works:
after all labels were restored (that is after
qemuSecurityRestoreAllLabel() was called), we iterate over each
disk in the domain and remove XATTRs from the whole backing chain
and also from any file the disk is being mirrored to.
This would have been done at the time of pivot, but it isn't
because user decided to kill the domain instead. If we don't do
this and leave some XATTRs behind the domain might be unable to
start.
Also, secdriver can't do this because it doesn't know if there is
any job running. It's outside of its scope - the hypervisor
driver is responsible for calling secdriver's APIs.
Moreover, this is safe to call because we don't remember labels
for any member of a backing chain except of the top layer. But
that one was restored in qemuSecurityRestoreAllLabel() call done
earlier. Therefore, not only we don't remember labels (and thus
this is basically a NOP for other images in the backing chain) it
is also safe to call this when no blockjob was started in the
first place, or if some parts of the backing chain are shared
with some other domains - this is NOP, unless a block job is
active at the time of domain destroy.
https://bugzilla.redhat.com/show_bug.cgi?id=1741456#c19
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
2019-11-18 16:40:01 +00:00
|
|
|
/* Do this explicitly after vm->pid is reset so that security drivers don't
|
|
|
|
* try to enter the domain's namespace which is non-existent by now as qemu
|
|
|
|
* is no longer running. */
|
2019-12-04 16:36:33 +00:00
|
|
|
if (!(flags & VIR_QEMU_PROCESS_STOP_NO_RELABEL)) {
|
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = def->disks[i];
|
qemuProcessStop: Remove image metadata for running mirror jobs
If user starts a blockcommit or a blockcopy then we modify access
for qemu on both images and leave it like that until the job
terminates. So far so good. Problem is, if user instead of
terminating the job (where we would modify the access again so
that the state before the job is restored) calls destroy on the
domain or if qemu dies whilst executing the block job. In this
case we don't ever clear the access we granted at the beginning.
To fix this, maybe a bit harsh approach is used, but it works:
after all labels were restored (that is after
qemuSecurityRestoreAllLabel() was called), we iterate over each
disk in the domain and remove XATTRs from the whole backing chain
and also from any file the disk is being mirrored to.
This would have been done at the time of pivot, but it isn't
because user decided to kill the domain instead. If we don't do
this and leave some XATTRs behind the domain might be unable to
start.
Also, secdriver can't do this because it doesn't know if there is
any job running. It's outside of its scope - the hypervisor
driver is responsible for calling secdriver's APIs.
Moreover, this is safe to call because we don't remember labels
for any member of a backing chain except of the top layer. But
that one was restored in qemuSecurityRestoreAllLabel() call done
earlier. Therefore, not only we don't remember labels (and thus
this is basically a NOP for other images in the backing chain) it
is also safe to call this when no blockjob was started in the
first place, or if some parts of the backing chain are shared
with some other domains - this is NOP, unless a block job is
active at the time of domain destroy.
https://bugzilla.redhat.com/show_bug.cgi?id=1741456#c19
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
2019-11-18 16:40:01 +00:00
|
|
|
|
2020-05-18 13:07:46 +00:00
|
|
|
if (disk->mirror) {
|
|
|
|
if (qemuSecurityRestoreImageLabel(driver, vm, disk->mirror, false) < 0)
|
|
|
|
VIR_WARN("Unable to restore security label on %s", disk->dst);
|
2020-05-18 13:11:49 +00:00
|
|
|
|
|
|
|
if (virStorageSourceChainHasNVMe(disk->mirror))
|
|
|
|
qemuHostdevReAttachOneNVMeDisk(driver, vm->def->name, disk->mirror);
|
2020-05-18 13:07:46 +00:00
|
|
|
}
|
qemuProcessStop: Remove image metadata for running mirror jobs
If user starts a blockcommit or a blockcopy then we modify access
for qemu on both images and leave it like that until the job
terminates. So far so good. Problem is, if user instead of
terminating the job (where we would modify the access again so
that the state before the job is restored) calls destroy on the
domain or if qemu dies whilst executing the block job. In this
case we don't ever clear the access we granted at the beginning.
To fix this, maybe a bit harsh approach is used, but it works:
after all labels were restored (that is after
qemuSecurityRestoreAllLabel() was called), we iterate over each
disk in the domain and remove XATTRs from the whole backing chain
and also from any file the disk is being mirrored to.
This would have been done at the time of pivot, but it isn't
because user decided to kill the domain instead. If we don't do
this and leave some XATTRs behind the domain might be unable to
start.
Also, secdriver can't do this because it doesn't know if there is
any job running. It's outside of its scope - the hypervisor
driver is responsible for calling secdriver's APIs.
Moreover, this is safe to call because we don't remember labels
for any member of a backing chain except of the top layer. But
that one was restored in qemuSecurityRestoreAllLabel() call done
earlier. Therefore, not only we don't remember labels (and thus
this is basically a NOP for other images in the backing chain) it
is also safe to call this when no blockjob was started in the
first place, or if some parts of the backing chain are shared
with some other domains - this is NOP, unless a block job is
active at the time of domain destroy.
https://bugzilla.redhat.com/show_bug.cgi?id=1741456#c19
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
2019-11-18 16:40:01 +00:00
|
|
|
|
2019-12-04 16:36:33 +00:00
|
|
|
qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->src);
|
2020-09-22 12:39:27 +00:00
|
|
|
|
|
|
|
/* for now transient disks are forbidden with migration so they
|
|
|
|
* can be handled here */
|
|
|
|
if (disk->transient &&
|
2021-05-18 15:47:48 +00:00
|
|
|
QEMU_DOMAIN_DISK_PRIVATE(disk)->transientOverlayCreated) {
|
2020-09-22 12:39:27 +00:00
|
|
|
VIR_DEBUG("Removing transient overlay '%s' of disk '%s'",
|
|
|
|
disk->src->path, disk->dst);
|
|
|
|
if (qemuDomainStorageFileInit(driver, vm, disk->src, NULL) >= 0) {
|
2021-01-21 15:46:14 +00:00
|
|
|
virStorageSourceUnlink(disk->src);
|
|
|
|
virStorageSourceDeinit(disk->src);
|
2020-09-22 12:39:27 +00:00
|
|
|
}
|
|
|
|
}
|
2019-12-04 16:36:33 +00:00
|
|
|
}
|
qemuProcessStop: Remove image metadata for running mirror jobs
If user starts a blockcommit or a blockcopy then we modify access
for qemu on both images and leave it like that until the job
terminates. So far so good. Problem is, if user instead of
terminating the job (where we would modify the access again so
that the state before the job is restored) calls destroy on the
domain or if qemu dies whilst executing the block job. In this
case we don't ever clear the access we granted at the beginning.
To fix this, maybe a bit harsh approach is used, but it works:
after all labels were restored (that is after
qemuSecurityRestoreAllLabel() was called), we iterate over each
disk in the domain and remove XATTRs from the whole backing chain
and also from any file the disk is being mirrored to.
This would have been done at the time of pivot, but it isn't
because user decided to kill the domain instead. If we don't do
this and leave some XATTRs behind the domain might be unable to
start.
Also, secdriver can't do this because it doesn't know if there is
any job running. It's outside of its scope - the hypervisor
driver is responsible for calling secdriver's APIs.
Moreover, this is safe to call because we don't remember labels
for any member of a backing chain except of the top layer. But
that one was restored in qemuSecurityRestoreAllLabel() call done
earlier. Therefore, not only we don't remember labels (and thus
this is basically a NOP for other images in the backing chain) it
is also safe to call this when no blockjob was started in the
first place, or if some parts of the backing chain are shared
with some other domains - this is NOP, unless a block job is
active at the time of domain destroy.
https://bugzilla.redhat.com/show_bug.cgi?id=1741456#c19
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
2019-11-18 16:40:01 +00:00
|
|
|
}
|
|
|
|
|
2020-12-09 10:06:29 +00:00
|
|
|
qemuSecurityReleaseLabel(driver->securityManager, vm->def);
|
|
|
|
|
2017-08-23 12:19:36 +00:00
|
|
|
/* clear all private data entries which are no longer needed */
|
|
|
|
qemuDomainObjPrivateDataClear(priv);
|
2017-05-22 11:36:55 +00:00
|
|
|
|
2011-03-23 20:50:29 +00:00
|
|
|
/* The "release" hook cleans up additional resources */
|
2011-03-22 13:12:36 +00:00
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *xml = qemuDomainDefFormatXML(driver, NULL, vm->def, 0);
|
2011-03-22 13:12:36 +00:00
|
|
|
|
|
|
|
/* we can't stop the operation even if the script raised an error */
|
|
|
|
virHookCall(VIR_HOOK_DRIVER_QEMU, vm->def->name,
|
2012-02-27 16:06:22 +00:00
|
|
|
VIR_HOOK_QEMU_OP_RELEASE, VIR_HOOK_SUBOP_END,
|
|
|
|
NULL, xml, NULL);
|
2011-03-22 13:12:36 +00:00
|
|
|
}
|
|
|
|
|
2016-09-08 13:16:58 +00:00
|
|
|
virDomainObjRemoveTransientDef(vm);
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2016-02-11 10:20:28 +00:00
|
|
|
endjob:
|
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_NONE)
|
|
|
|
qemuDomainObjEndJob(driver, vm);
|
|
|
|
|
|
|
|
cleanup:
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-06-23 09:37:57 +00:00
|
|
|
|
|
|
|
|
2018-03-27 15:39:53 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessAutoDestroy(virDomainObj *dom,
|
2017-07-11 13:53:58 +00:00
|
|
|
virConnectPtr conn,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = opaque;
|
|
|
|
qemuDomainObjPrivate *priv = dom->privateData;
|
|
|
|
virObjectEvent *event = NULL;
|
2017-07-11 13:53:58 +00:00
|
|
|
unsigned int stopFlags = 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
|
|
|
|
|
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
|
|
|
|
|
|
|
if (priv->job.asyncJob) {
|
|
|
|
VIR_DEBUG("vm=%s has long-term job active, cancelling",
|
|
|
|
dom->def->name);
|
|
|
|
qemuDomainObjDiscardAsyncJob(driver, dom);
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Killing domain");
|
|
|
|
|
|
|
|
if (qemuProcessBeginStopJob(driver, dom, QEMU_JOB_DESTROY, true) < 0)
|
2018-03-27 15:39:53 +00:00
|
|
|
return;
|
2017-07-11 13:53:58 +00:00
|
|
|
|
|
|
|
qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
|
|
|
|
QEMU_ASYNC_JOB_NONE, stopFlags);
|
|
|
|
|
|
|
|
virDomainAuditStop(dom, "destroyed");
|
|
|
|
event = virDomainEventLifecycleNewFromObj(dom,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
|
|
|
|
|
|
|
|
qemuDomainRemoveInactive(driver, dom);
|
|
|
|
|
2017-08-15 07:12:43 +00:00
|
|
|
qemuDomainObjEndJob(driver, dom);
|
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuProcessAutoDestroyAdd(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-07-11 13:53:58 +00:00
|
|
|
virConnectPtr conn)
|
|
|
|
{
|
|
|
|
VIR_DEBUG("vm=%s, conn=%p", vm->def->name, conn);
|
|
|
|
return virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
|
|
|
|
qemuProcessAutoDestroy);
|
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
int qemuProcessAutoDestroyRemove(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2017-07-11 13:53:58 +00:00
|
|
|
{
|
|
|
|
VIR_DEBUG("vm=%s", vm->def->name);
|
2019-10-17 08:10:10 +00:00
|
|
|
return virCloseCallbacksUnset(driver->closeCallbacks, vm,
|
|
|
|
qemuProcessAutoDestroy);
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
bool qemuProcessAutoDestroyActive(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2017-07-11 13:53:58 +00:00
|
|
|
{
|
|
|
|
virCloseCallback cb;
|
|
|
|
VIR_DEBUG("vm=%s", vm->def->name);
|
|
|
|
cb = virCloseCallbacksGet(driver->closeCallbacks, vm, NULL);
|
|
|
|
return cb == qemuProcessAutoDestroy;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshDisks(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-06-26 16:15:09 +00:00
|
|
|
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
|
2021-11-30 10:49:24 +00:00
|
|
|
g_autoptr(GHashTable) table = NULL;
|
2017-07-11 13:53:58 +00:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
|
|
|
table = qemuMonitorGetBlockInfo(priv->mon);
|
2021-11-24 12:09:32 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!table)
|
2021-11-30 10:49:24 +00:00
|
|
|
return -1;
|
2017-07-11 13:53:58 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivate *diskpriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2017-07-11 13:53:58 +00:00
|
|
|
struct qemuDomainDiskInfo *info;
|
2018-06-26 16:15:09 +00:00
|
|
|
const char *entryname = disk->info.alias;
|
|
|
|
|
2020-04-17 10:36:07 +00:00
|
|
|
if (blockdev && diskpriv->qomName)
|
2018-06-26 16:15:09 +00:00
|
|
|
entryname = diskpriv->qomName;
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2018-06-26 16:15:09 +00:00
|
|
|
if (!(info = virHashLookup(table, entryname)))
|
2017-07-11 13:53:58 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (info->removable) {
|
|
|
|
if (info->empty)
|
|
|
|
virDomainDiskEmptySource(disk);
|
|
|
|
|
|
|
|
if (info->tray) {
|
|
|
|
if (info->tray_open)
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_OPEN;
|
|
|
|
else
|
|
|
|
disk->tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fill in additional data */
|
|
|
|
diskpriv->removable = info->removable;
|
|
|
|
diskpriv->tray = info->tray;
|
|
|
|
}
|
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
return 0;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-07-15 15:54:07 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshCPUMigratability(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm,
|
2020-07-15 15:54:07 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virDomainDef *def = vm->def;
|
2022-02-07 11:29:47 +00:00
|
|
|
const char *cpuQOMPath = qemuProcessGetVCPUQOMPath(vm);
|
2020-07-15 15:54:07 +00:00
|
|
|
bool migratable;
|
|
|
|
int rc;
|
|
|
|
|
2021-02-05 18:35:10 +00:00
|
|
|
if (def->cpu->mode != VIR_CPU_MODE_HOST_PASSTHROUGH &&
|
|
|
|
def->cpu->mode != VIR_CPU_MODE_MAXIMUM)
|
2020-07-15 15:54:07 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If the cpu.migratable capability is present, the migratable attribute
|
|
|
|
* is set correctly. */
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CPU_MIGRATABLE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!ARCH_IS_X86(def->os.arch))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2022-02-07 10:45:18 +00:00
|
|
|
rc = qemuMonitorGetCPUMigratable(priv->mon, cpuQOMPath, &migratable);
|
2020-07-15 15:54:07 +00:00
|
|
|
|
2021-11-24 12:11:52 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
if (rc < 0)
|
2020-07-15 15:54:07 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (rc == 1)
|
|
|
|
migratable = false;
|
|
|
|
|
|
|
|
/* Libvirt 6.5.0 would set migratable='off' for running domains even though
|
|
|
|
* the actual default used by QEMU was 'on'. */
|
|
|
|
if (def->cpu->migratable == VIR_TRISTATE_SWITCH_OFF && migratable) {
|
|
|
|
VIR_DEBUG("Fixing CPU migratable attribute");
|
|
|
|
def->cpu->migratable = VIR_TRISTATE_SWITCH_ON;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (def->cpu->migratable == VIR_TRISTATE_SWITCH_ABSENT)
|
|
|
|
def->cpu->migratable = virTristateSwitchFromBool(migratable);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-06 10:57:15 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshCPU(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2017-10-06 10:57:15 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-11-29 10:40:39 +00:00
|
|
|
g_autoptr(virCPUDef) host = NULL;
|
|
|
|
g_autoptr(virCPUDef) hostmig = NULL;
|
|
|
|
g_autoptr(virCPUDef) cpu = NULL;
|
2017-10-06 10:57:15 +00:00
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!virQEMUCapsGuestIsNative(driver->hostarch, vm->def->os.arch))
|
|
|
|
return 0;
|
2017-10-06 10:57:15 +00:00
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!vm->def->cpu)
|
|
|
|
return 0;
|
|
|
|
|
2020-07-15 15:54:07 +00:00
|
|
|
if (qemuProcessRefreshCPUMigratability(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!(host = virQEMUDriverGetHostCPU(driver))) {
|
|
|
|
virResetLastError();
|
|
|
|
return 0;
|
2017-10-06 10:57:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If the domain with a host-model CPU was started by an old libvirt
|
|
|
|
* (< 2.3) which didn't replace the CPU with a custom one, let's do it now
|
|
|
|
* since the rest of our code does not really expect a host-model CPU in a
|
|
|
|
* running domain.
|
|
|
|
*/
|
|
|
|
if (vm->def->cpu->mode == VIR_CPU_MODE_HOST_MODEL) {
|
2020-05-25 12:39:45 +00:00
|
|
|
/*
|
|
|
|
* PSeries domains are able to run with host-model CPU by design,
|
|
|
|
* even on Libvirt newer than 2.3, never replacing host-model with
|
|
|
|
* custom in the virCPUUpdate() call. It is not needed to call
|
|
|
|
* virCPUUpdate() and qemuProcessUpdateCPU() in this case.
|
|
|
|
*/
|
|
|
|
if (qemuDomainIsPSeries(vm->def))
|
|
|
|
return 0;
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!(hostmig = virCPUCopyMigratable(host->arch, host)))
|
|
|
|
return -1;
|
2017-10-06 10:57:15 +00:00
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!(cpu = virCPUDefCopyWithoutModel(hostmig)) ||
|
|
|
|
virCPUDefCopyModelFilter(cpu, hostmig, false,
|
2017-10-06 11:23:36 +00:00
|
|
|
virQEMUCapsCPUFilterFeatures,
|
2019-11-29 10:40:39 +00:00
|
|
|
&host->arch) < 0)
|
|
|
|
return -1;
|
2017-10-06 11:23:36 +00:00
|
|
|
|
|
|
|
if (virCPUUpdate(vm->def->os.arch, vm->def->cpu, cpu) < 0)
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
2017-10-06 10:57:15 +00:00
|
|
|
|
|
|
|
if (qemuProcessUpdateCPU(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
2017-10-06 12:49:07 +00:00
|
|
|
} else if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION)) {
|
|
|
|
/* We only try to fix CPUs when the libvirt/QEMU combo used to start
|
|
|
|
* the domain did not know about query-cpu-model-expansion in which
|
|
|
|
* case the host-model is known to not contain features which QEMU
|
|
|
|
* doesn't know about.
|
|
|
|
*/
|
|
|
|
if (qemuDomainFixupCPUs(vm, &priv->origCPU) < 0)
|
2019-11-29 10:40:39 +00:00
|
|
|
return -1;
|
2017-10-06 10:57:15 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
return 0;
|
2017-10-06 10:57:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-10-18 10:34:49 +00:00
|
|
|
static int
|
|
|
|
qemuProcessRefreshLegacyBlockjob(void *payload,
|
2020-10-21 11:31:16 +00:00
|
|
|
const char *name,
|
2018-10-18 10:34:49 +00:00
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
const char *jobname = name;
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm = opaque;
|
|
|
|
qemuMonitorBlockJobInfo *info = payload;
|
|
|
|
virDomainDiskDef *disk;
|
|
|
|
qemuBlockJobData *job;
|
2018-11-23 11:45:32 +00:00
|
|
|
qemuBlockJobType jobtype = info->type;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-10-18 10:34:49 +00:00
|
|
|
|
|
|
|
if (!(disk = qemuProcessFindDomainDiskByAliasOrQOM(vm, jobname, jobname))) {
|
|
|
|
VIR_DEBUG("could not find disk for block job '%s'", jobname);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-18 07:39:40 +00:00
|
|
|
if (jobtype == QEMU_BLOCKJOB_TYPE_COMMIT &&
|
2018-11-23 11:45:32 +00:00
|
|
|
disk->mirrorJob == VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT)
|
|
|
|
jobtype = disk->mirrorJob;
|
|
|
|
|
2018-11-29 16:35:52 +00:00
|
|
|
if (!(job = qemuBlockJobDiskNew(vm, disk, jobtype, jobname)))
|
2018-11-19 15:48:09 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-10-18 10:34:49 +00:00
|
|
|
if (disk->mirror) {
|
2020-12-04 15:07:58 +00:00
|
|
|
if ((!info->ready_present && info->end == info->cur) ||
|
|
|
|
info->ready) {
|
2018-10-18 10:34:49 +00:00
|
|
|
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
|
2019-01-17 15:34:11 +00:00
|
|
|
job->state = VIR_DOMAIN_BLOCK_JOB_READY;
|
|
|
|
}
|
2019-01-23 14:54:53 +00:00
|
|
|
|
|
|
|
/* Pre-blockdev block copy labelled the chain of the mirrored device
|
|
|
|
* just before pivoting. At that point it was no longer known whether
|
|
|
|
* it's even necessary (e.g. disk is being reused). This code fixes
|
|
|
|
* the labelling in case the job was started in a libvirt version
|
|
|
|
* which did not label the chain when the block copy is being started.
|
|
|
|
* Note that we can't do much on failure. */
|
|
|
|
if (disk->mirrorJob == VIR_DOMAIN_BLOCK_JOB_TYPE_COPY) {
|
|
|
|
if (qemuDomainDetermineDiskChain(priv->driver, vm, disk,
|
|
|
|
disk->mirror, true) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (disk->mirror->format &&
|
|
|
|
disk->mirror->format != VIR_STORAGE_FILE_RAW &&
|
2021-07-14 14:46:54 +00:00
|
|
|
(qemuDomainNamespaceSetupDisk(vm, disk->mirror, NULL) < 0 ||
|
2019-01-23 14:54:53 +00:00
|
|
|
qemuSetupImageChainCgroup(vm, disk->mirror) < 0 ||
|
|
|
|
qemuSecuritySetImageLabel(priv->driver, vm, disk->mirror,
|
2020-02-27 10:20:51 +00:00
|
|
|
true, true) < 0))
|
2019-01-23 14:54:53 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2018-10-18 10:34:49 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 08:58:42 +00:00
|
|
|
qemuBlockJobStarted(job, vm);
|
|
|
|
|
2019-01-23 14:54:53 +00:00
|
|
|
cleanup:
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobStartupFinalize(vm, job);
|
2018-10-18 10:34:49 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshLegacyBlockjobs(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2018-10-18 10:34:49 +00:00
|
|
|
{
|
2021-11-30 10:49:24 +00:00
|
|
|
g_autoptr(GHashTable) blockJobs = NULL;
|
2018-10-18 10:34:49 +00:00
|
|
|
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2019-06-11 14:42:53 +00:00
|
|
|
blockJobs = qemuMonitorGetAllBlockJobInfo(qemuDomainGetMonitor(vm), true);
|
2021-11-24 12:11:52 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
if (!blockJobs)
|
2021-11-30 10:49:24 +00:00
|
|
|
return -1;
|
2018-10-18 10:34:49 +00:00
|
|
|
|
|
|
|
if (virHashForEach(blockJobs, qemuProcessRefreshLegacyBlockjob, vm) < 0)
|
2021-11-30 10:49:24 +00:00
|
|
|
return -1;
|
2018-10-18 10:34:49 +00:00
|
|
|
|
2021-11-30 10:49:24 +00:00
|
|
|
return 0;
|
2018-10-18 10:34:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessRefreshBlockjobs(virQEMUDriver *driver,
|
|
|
|
virDomainObj *vm)
|
2018-10-18 10:34:49 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-12-11 17:13:35 +00:00
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
|
|
|
return qemuBlockJobRefreshJobs(driver, vm);
|
|
|
|
else
|
|
|
|
return qemuProcessRefreshLegacyBlockjobs(driver, vm);
|
2018-10-18 10:34:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
struct qemuProcessReconnectData {
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver;
|
|
|
|
virDomainObj *obj;
|
|
|
|
virIdentity *identity;
|
2017-07-11 13:53:58 +00:00
|
|
|
};
|
|
|
|
/*
|
|
|
|
* Open an existing VM's monitor, re-detect VCPU threads
|
|
|
|
* and re-reserve the security labels in use
|
|
|
|
*
|
|
|
|
* This function also inherits a locked and ref'd domain object.
|
|
|
|
*
|
|
|
|
* This function needs to:
|
|
|
|
* 1. Enter job
|
|
|
|
* 1. just before monitor reconnect do lightweight MonitorEnter
|
|
|
|
* (increase VM refcount and unlock VM)
|
|
|
|
* 2. reconnect to monitor
|
|
|
|
* 3. do lightweight MonitorExit (lock VM)
|
|
|
|
* 4. continue reconnect process
|
|
|
|
* 5. EndJob
|
|
|
|
*
|
|
|
|
* We can't do normal MonitorEnter & MonitorExit because these two lock the
|
|
|
|
* monitor lock, which does not exists in this early phase.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuProcessReconnect(void *opaque)
|
2011-05-05 16:32:21 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
struct qemuProcessReconnectData *data = opaque;
|
2021-03-11 07:16:13 +00:00
|
|
|
virQEMUDriver *driver = data->driver;
|
|
|
|
virDomainObj *obj = data->obj;
|
|
|
|
qemuDomainObjPrivate *priv;
|
2020-09-14 11:29:23 +00:00
|
|
|
g_auto(qemuDomainJobObj) oldjob = {
|
|
|
|
.cb = NULL,
|
|
|
|
};
|
2017-07-11 13:53:58 +00:00
|
|
|
int state;
|
|
|
|
int reason;
|
2019-12-20 21:16:30 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = NULL;
|
2012-08-15 22:10:37 +00:00
|
|
|
size_t i;
|
2017-07-11 13:53:58 +00:00
|
|
|
unsigned int stopFlags = 0;
|
|
|
|
bool jobStarted = false;
|
2018-03-14 17:27:49 +00:00
|
|
|
bool retry = true;
|
2018-11-01 15:19:02 +00:00
|
|
|
bool tryMonReconn = false;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2018-11-12 13:27:26 +00:00
|
|
|
virIdentitySetCurrent(data->identity);
|
2019-09-19 14:38:03 +00:00
|
|
|
g_clear_object(&data->identity);
|
2017-07-11 13:53:58 +00:00
|
|
|
VIR_FREE(data);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2021-03-11 15:14:17 +00:00
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
priv = obj->privateData;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainObjRestoreJob(obj, &oldjob);
|
|
|
|
if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
2021-03-11 15:14:17 +00:00
|
|
|
if (oldjob.asyncJob == QEMU_ASYNC_JOB_BACKUP && priv->backup)
|
|
|
|
priv->backup->apiFlags = oldjob.apiFlags;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, obj, QEMU_JOB_MODIFY) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2017-07-11 13:53:58 +00:00
|
|
|
jobStarted = true;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* XXX If we ever gonna change pid file pattern, come up with
|
|
|
|
* some intelligence here to deal with old paths. */
|
|
|
|
if (!(priv->pidfile = virPidFileBuildPath(cfg->stateDir, obj->def->name)))
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* Restore the masterKey */
|
|
|
|
if (qemuDomainMasterKeyReadFile(priv) < 0)
|
|
|
|
goto error;
|
2012-10-31 19:03:55 +00:00
|
|
|
|
2017-10-11 13:57:16 +00:00
|
|
|
/* If we are connecting to a guest started by old libvirt there is no
|
|
|
|
* allowReboot in status XML and we need to initialize it. */
|
|
|
|
qemuProcessPrepareAllowReboot(obj);
|
|
|
|
|
2018-09-21 02:33:53 +00:00
|
|
|
if (qemuHostdevUpdateActiveDomainDevices(driver, obj->def) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2021-07-26 12:31:45 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_CHARDEV_FD_PASS_COMMANDLINE))
|
2018-03-14 17:27:49 +00:00
|
|
|
retry = false;
|
|
|
|
|
2020-02-12 12:26:11 +00:00
|
|
|
if (qemuDomainObjStartWorker(obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2018-03-14 17:27:49 +00:00
|
|
|
VIR_DEBUG("Reconnect monitor to def=%p name='%s' retry=%d",
|
|
|
|
obj, obj->def->name, retry);
|
2017-07-11 13:53:58 +00:00
|
|
|
|
2018-11-01 15:19:02 +00:00
|
|
|
tryMonReconn = true;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* XXX check PID liveliness & EXE path */
|
2018-03-14 17:27:49 +00:00
|
|
|
if (qemuConnectMonitor(driver, obj, QEMU_ASYNC_JOB_NONE, retry, NULL) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2018-01-30 22:57:24 +00:00
|
|
|
priv->machineName = qemuDomainGetMachineName(obj);
|
|
|
|
if (!priv->machineName)
|
|
|
|
goto error;
|
|
|
|
|
2022-01-25 16:19:53 +00:00
|
|
|
if (virDomainCgroupConnectCgroup("qemu",
|
|
|
|
obj,
|
|
|
|
&priv->cgroup,
|
|
|
|
cfg->cgroupControllers,
|
|
|
|
priv->driver->privileged,
|
|
|
|
priv->machineName) < 0)
|
2017-07-11 13:53:58 +00:00
|
|
|
goto error;
|
2014-12-01 09:54:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuDomainPerfRestart(obj) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2012-08-15 22:10:37 +00:00
|
|
|
|
2019-11-15 11:27:42 +00:00
|
|
|
/* recreate the pflash storage sources */
|
|
|
|
if (qemuDomainInitializePflashStorageSource(obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
for (i = 0; i < obj->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainDiskDef *disk = obj->def->disks[i];
|
2012-08-15 22:10:37 +00:00
|
|
|
|
2018-02-09 16:06:43 +00:00
|
|
|
if (virDomainDiskTranslateSourcePool(disk) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2012-08-15 22:10:37 +00:00
|
|
|
|
2017-09-27 16:06:44 +00:00
|
|
|
/* backing chains need to be refreshed only if they could change */
|
2017-09-29 15:20:52 +00:00
|
|
|
if (priv->reconnectBlockjobs != VIR_TRISTATE_BOOL_NO &&
|
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
2017-09-27 16:06:44 +00:00
|
|
|
/* This should be the only place that calls
|
|
|
|
* qemuDomainDetermineDiskChain with @report_broken == false
|
|
|
|
* to guarantee best-effort domain reconnect */
|
2018-04-24 12:07:22 +00:00
|
|
|
virStorageSourceBackingStoreClear(disk->src);
|
2019-01-16 14:33:07 +00:00
|
|
|
if (qemuDomainDetermineDiskChain(driver, obj, disk, NULL, false) < 0)
|
2017-09-27 16:06:44 +00:00
|
|
|
goto error;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("skipping backing chain detection for '%s'", disk->dst);
|
|
|
|
}
|
2017-09-18 13:39:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < obj->def->ngraphics; i++) {
|
2018-02-06 09:09:07 +00:00
|
|
|
if (qemuProcessGraphicsReservePorts(obj->def->graphics[i], true) < 0)
|
2017-07-11 13:53:58 +00:00
|
|
|
goto error;
|
2012-08-15 22:10:37 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessUpdateState(driver, obj) < 0)
|
2016-03-28 13:30:31 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
state = virDomainObjGetState(obj, &reason);
|
|
|
|
if (state == VIR_DOMAIN_SHUTOFF ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_STARTING_UP)) {
|
|
|
|
VIR_DEBUG("Domain '%s' wasn't fully started yet, killing it",
|
|
|
|
obj->def->name);
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2017-07-11 13:53:58 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2019-11-24 07:35:59 +00:00
|
|
|
if (!priv->qemuCaps) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("domain '%s' has no capabilities recorded"),
|
|
|
|
obj->def->name);
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2019-11-24 07:35:59 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2021-10-25 09:04:55 +00:00
|
|
|
/* In case the domain shutdown or fake reboot while we were not running,
|
|
|
|
* we need to finish the shutdown or fake reboot process. And we need to
|
|
|
|
* do it after we have virQEMUCaps filled in.
|
2017-07-11 13:53:58 +00:00
|
|
|
*/
|
|
|
|
if (state == VIR_DOMAIN_SHUTDOWN ||
|
|
|
|
(state == VIR_DOMAIN_PAUSED &&
|
2021-10-25 09:04:55 +00:00
|
|
|
reason == VIR_DOMAIN_PAUSED_SHUTTING_DOWN) ||
|
|
|
|
(priv->fakeReboot && state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_USER)) {
|
2017-07-11 13:53:58 +00:00
|
|
|
VIR_DEBUG("Finishing shutdown sequence for domain %s",
|
|
|
|
obj->def->name);
|
2021-12-14 15:36:15 +00:00
|
|
|
qemuProcessShutdownOrReboot(obj);
|
2017-07-11 13:53:58 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-11-07 14:19:43 +00:00
|
|
|
if (qemuProcessBuildDestroyMemoryPaths(driver, obj, NULL, true) < 0)
|
2017-07-11 13:53:58 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if ((qemuDomainAssignAddresses(obj->def, priv->qemuCaps,
|
|
|
|
driver, obj, false)) < 0) {
|
2016-01-22 19:09:22 +00:00
|
|
|
goto error;
|
2016-11-03 20:33:32 +00:00
|
|
|
}
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* if domain requests security driver we haven't loaded, report error, but
|
|
|
|
* do not kill the domain
|
|
|
|
*/
|
|
|
|
ignore_value(qemuSecurityCheckAllLabel(driver->securityManager,
|
|
|
|
obj->def));
|
|
|
|
|
|
|
|
if (qemuDomainRefreshVcpuInfo(driver, obj, QEMU_ASYNC_JOB_NONE, true) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainVcpuPersistOrder(obj->def);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2022-02-07 11:42:34 +00:00
|
|
|
if (qemuProcessRefreshCPU(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2020-11-25 10:35:00 +00:00
|
|
|
if (qemuDomainUpdateMemoryDeviceInfo(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2018-10-17 17:14:42 +00:00
|
|
|
if (qemuProcessDetectIOThreadPIDs(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuSecurityReserveLabel(driver->securityManager, obj->def, obj->pid) < 0)
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessNotifyNets(obj->def);
|
|
|
|
|
2018-08-24 13:29:24 +00:00
|
|
|
qemuProcessFiltersInstantiate(obj->def);
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2016-11-16 13:43:01 +00:00
|
|
|
goto error;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2021-05-18 15:47:48 +00:00
|
|
|
/* At this point we've already checked that the startup of the VM was
|
|
|
|
* completed successfully before, thus that also implies that all transient
|
|
|
|
* disk overlays were created. */
|
|
|
|
for (i = 0; i < obj->def->ndisks; i++) {
|
|
|
|
virDomainDiskDef *disk = obj->def->disks[i];
|
|
|
|
|
|
|
|
if (disk->transient)
|
|
|
|
QEMU_DOMAIN_DISK_PRIVATE(disk)->transientOverlayCreated = true;
|
|
|
|
}
|
|
|
|
|
2018-06-15 13:11:35 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
|
|
|
qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2014-09-03 13:07:38 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2016-08-01 05:35:50 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* If querying of guest's RTC failed, report error, but do not kill the domain. */
|
|
|
|
qemuRefreshRTC(driver, obj);
|
|
|
|
|
|
|
|
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2019-11-26 12:04:30 +00:00
|
|
|
if (qemuProcessRefreshBlockjobs(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
if (qemuProcessUpdateDevices(driver, obj) < 0)
|
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
2018-06-27 13:57:30 +00:00
|
|
|
if (qemuRefreshPRManagerState(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuProcessReconnectCheckMemAliasOrderMismatch(obj);
|
|
|
|
|
|
|
|
if (qemuConnectAgent(driver, obj) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2018-07-30 03:12:35 +00:00
|
|
|
for (i = 0; i < obj->def->nresctrls; i++) {
|
2018-11-12 13:31:45 +00:00
|
|
|
size_t j = 0;
|
|
|
|
|
2018-07-30 03:12:35 +00:00
|
|
|
if (virResctrlAllocDeterminePath(obj->def->resctrls[i]->alloc,
|
2018-01-30 22:57:52 +00:00
|
|
|
priv->machineName) < 0)
|
|
|
|
goto error;
|
2018-11-12 13:31:45 +00:00
|
|
|
|
|
|
|
for (j = 0; j < obj->def->resctrls[i]->nmonitors; j++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainResctrlMonDef *mon = NULL;
|
2018-11-12 13:31:45 +00:00
|
|
|
|
|
|
|
mon = obj->def->resctrls[i]->monitors[j];
|
|
|
|
if (virResctrlMonitorDeterminePath(mon->instance,
|
|
|
|
priv->machineName) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
2018-01-30 22:57:52 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* update domain state XML with possibly updated state in virDomainObj */
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(obj, driver->xmlopt, cfg->stateDir) < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
2011-05-05 16:32:21 +00:00
|
|
|
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
/* Run an hook to allow admins to do some magic */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *xml = qemuDomainDefFormatXML(driver,
|
|
|
|
priv->qemuCaps,
|
|
|
|
obj->def, 0);
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
int hookret;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, obj->def->name,
|
|
|
|
VIR_HOOK_QEMU_OP_RECONNECT, VIR_HOOK_SUBOP_BEGIN,
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
NULL, xml, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the script raised an error abort the launch
|
|
|
|
*/
|
|
|
|
if (hookret < 0)
|
2013-08-28 22:22:46 +00:00
|
|
|
goto error;
|
Add some missing hook functions
A core use case of the hook scripts is to be able to do things
to a guest's network configuration. It is possible to hook into
the 'start' operation for a QEMU guest which runs just before
the guest is started. The TAP devices will exist at this point,
but the QEMU process will not. It can be desirable to have a
'started' hook too, which runs once QEMU has started.
If libvirtd is restarted it will re-populate firewall rules,
but there is no QEMU hook to trigger for existing domains.
This is solved with a 'reconnect' hook.
Finally, if attaching to an external QEMU process there needs
to be an 'attach' hook script.
This all also applies to the LXC driver
* docs/hooks.html.in: Document new operations
* src/util/hooks.c, src/util/hooks.c: Add 'started', 'reconnect'
and 'attach' operations for QEMU. Add 'prepare', 'started',
'release' and 'reconnect' operations for LXC
* src/lxc/lxc_driver.c: Add hooks for 'prepare', 'started',
'release' and 'reconnect' operations
* src/qemu/qemu_process.c: Add hooks for 'started', 'reconnect'
and 'reconnect' operations
2012-05-28 14:04:31 +00:00
|
|
|
}
|
|
|
|
|
2020-01-31 16:04:24 +00:00
|
|
|
if (g_atomic_int_add(&driver->nactive, 1) == 0 && driver->inhibitCallback)
|
2017-07-11 13:53:58 +00:00
|
|
|
driver->inhibitCallback(true, driver->inhibitOpaque);
|
2016-10-03 11:11:47 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
cleanup:
|
2017-08-15 07:12:43 +00:00
|
|
|
if (jobStarted) {
|
|
|
|
if (!virDomainObjIsActive(obj))
|
|
|
|
qemuDomainRemoveInactive(driver, obj);
|
2017-07-11 13:53:58 +00:00
|
|
|
qemuDomainObjEndJob(driver, obj);
|
2017-08-15 07:12:43 +00:00
|
|
|
} else {
|
|
|
|
if (!virDomainObjIsActive(obj))
|
|
|
|
qemuDomainRemoveInactiveJob(driver, obj);
|
|
|
|
}
|
2017-07-11 13:53:58 +00:00
|
|
|
virDomainObjEndAPI(&obj);
|
|
|
|
virNWFilterUnlockFilterUpdates();
|
2018-11-12 13:27:26 +00:00
|
|
|
virIdentitySetCurrent(NULL);
|
2017-07-11 13:53:58 +00:00
|
|
|
return;
|
2015-07-30 14:42:43 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
error:
|
|
|
|
if (virDomainObjIsActive(obj)) {
|
|
|
|
/* We can't get the monitor back, so must kill the VM
|
|
|
|
* to remove danger of it ending up running twice if
|
2018-10-16 12:38:27 +00:00
|
|
|
* user tries to start it again later.
|
|
|
|
*
|
|
|
|
* If we cannot get to the monitor when the QEMU command
|
|
|
|
* line used -no-shutdown, then we can safely say that the
|
2018-10-08 11:21:08 +00:00
|
|
|
* domain crashed; otherwise, if the monitor was started,
|
|
|
|
* then we can blame ourselves, else we failed before the
|
|
|
|
* monitor started so we don't really know. */
|
2021-08-19 11:50:16 +00:00
|
|
|
if (!priv->mon && tryMonReconn &&
|
|
|
|
(priv->allowReboot == VIR_TRISTATE_BOOL_YES ||
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_SET_ACTION)))
|
2018-10-16 12:38:27 +00:00
|
|
|
state = VIR_DOMAIN_SHUTOFF_CRASHED;
|
2018-10-08 11:21:08 +00:00
|
|
|
else if (priv->mon)
|
|
|
|
state = VIR_DOMAIN_SHUTOFF_DAEMON;
|
2018-10-16 12:38:27 +00:00
|
|
|
else
|
|
|
|
state = VIR_DOMAIN_SHUTOFF_UNKNOWN;
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* If BeginJob failed, we jumped here without a job, let's hope another
|
|
|
|
* thread didn't have a chance to start playing with the domain yet
|
|
|
|
* (it's all we can do anyway).
|
|
|
|
*/
|
|
|
|
qemuProcessStop(driver, obj, state, QEMU_ASYNC_JOB_NONE, stopFlags);
|
2011-06-23 09:37:57 +00:00
|
|
|
}
|
2017-07-11 13:53:58 +00:00
|
|
|
goto cleanup;
|
2011-06-23 10:41:57 +00:00
|
|
|
}
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessReconnectHelper(virDomainObj *obj,
|
2017-07-11 13:53:58 +00:00
|
|
|
void *opaque)
|
2016-05-23 12:00:35 +00:00
|
|
|
{
|
2017-07-11 13:53:58 +00:00
|
|
|
virThread thread;
|
|
|
|
struct qemuProcessReconnectData *src = opaque;
|
|
|
|
struct qemuProcessReconnectData *data;
|
2020-02-14 11:20:10 +00:00
|
|
|
g_autofree char *name = NULL;
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* If the VM was inactive, we don't need to reconnect */
|
|
|
|
if (!obj->pid)
|
|
|
|
return 0;
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
data = g_new0(struct qemuProcessReconnectData, 1);
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
memcpy(data, src, sizeof(*data));
|
|
|
|
data->obj = obj;
|
2018-11-12 13:27:26 +00:00
|
|
|
data->identity = virIdentityGetCurrent();
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-08-07 12:42:58 +00:00
|
|
|
virNWFilterReadLockFilterUpdates();
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/* this lock and reference will be eventually transferred to the thread
|
|
|
|
* that handles the reconnect */
|
|
|
|
virObjectLock(obj);
|
|
|
|
virObjectRef(obj);
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2020-02-14 11:20:10 +00:00
|
|
|
name = g_strdup_printf("init-%s", obj->def->name);
|
|
|
|
|
|
|
|
if (virThreadCreateFull(&thread, false, qemuProcessReconnect,
|
|
|
|
name, false, data) < 0) {
|
2017-07-11 13:53:58 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Could not create thread. QEMU initialization "
|
|
|
|
"might be incomplete"));
|
|
|
|
/* We can't spawn a thread and thus connect to monitor. Kill qemu.
|
|
|
|
* It's safe to call qemuProcessStop without a job here since there
|
|
|
|
* is no thread that could be doing anything else with the same domain
|
|
|
|
* object.
|
|
|
|
*/
|
|
|
|
qemuProcessStop(src->driver, obj, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
QEMU_ASYNC_JOB_NONE, 0);
|
2018-09-21 04:35:11 +00:00
|
|
|
qemuDomainRemoveInactiveJobLocked(src->driver, obj);
|
2016-05-19 13:29:02 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
virDomainObjEndAPI(&obj);
|
2017-08-07 12:42:58 +00:00
|
|
|
virNWFilterUnlockFilterUpdates();
|
2019-09-19 14:38:03 +00:00
|
|
|
g_clear_object(&data->identity);
|
2017-07-11 13:53:58 +00:00
|
|
|
VIR_FREE(data);
|
|
|
|
return -1;
|
2016-05-23 12:00:35 +00:00
|
|
|
}
|
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-05-23 12:00:35 +00:00
|
|
|
|
2017-07-11 13:53:58 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessReconnectAll
|
|
|
|
*
|
|
|
|
* Try to re-open the resources for live VMs that we care
|
|
|
|
* about.
|
|
|
|
*/
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessReconnectAll(virQEMUDriver *driver)
|
2017-07-11 13:53:58 +00:00
|
|
|
{
|
2018-02-09 17:19:44 +00:00
|
|
|
struct qemuProcessReconnectData data = {.driver = driver};
|
2019-09-06 11:59:59 +00:00
|
|
|
virDomainObjListForEach(driver->domains, true,
|
|
|
|
qemuProcessReconnectHelper, &data);
|
2016-05-23 12:00:35 +00:00
|
|
|
}
|
2019-01-13 00:50:00 +00:00
|
|
|
|
|
|
|
|
2021-03-11 07:16:13 +00:00
|
|
|
static void virQEMUCapsMonitorNotify(qemuMonitor *mon G_GNUC_UNUSED,
|
|
|
|
virDomainObj *vm G_GNUC_UNUSED,
|
2019-10-14 12:45:33 +00:00
|
|
|
void *opaque G_GNUC_UNUSED)
|
2019-01-13 00:50:00 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static qemuMonitorCallbacks callbacks = {
|
|
|
|
.eofNotify = virQEMUCapsMonitorNotify,
|
|
|
|
.errorNotify = virQEMUCapsMonitorNotify,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2019-02-13 16:18:51 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPStop(qemuProcessQMP *proc)
|
2019-02-13 16:18:51 +00:00
|
|
|
{
|
|
|
|
if (proc->mon) {
|
|
|
|
virObjectUnlock(proc->mon);
|
2022-01-28 17:42:45 +00:00
|
|
|
g_clear_pointer(&proc->mon, qemuMonitorClose);
|
2019-02-13 16:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (proc->cmd) {
|
|
|
|
virCommandAbort(proc->cmd);
|
2022-01-28 17:42:45 +00:00
|
|
|
g_clear_pointer(&proc->cmd, virCommandFree);
|
2019-02-13 16:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (proc->monpath)
|
|
|
|
unlink(proc->monpath);
|
|
|
|
|
|
|
|
virDomainObjEndAPI(&proc->vm);
|
|
|
|
|
|
|
|
if (proc->pid != 0) {
|
|
|
|
VIR_DEBUG("Killing QMP caps process %lld", (long long)proc->pid);
|
|
|
|
if (virProcessKill(proc->pid, SIGKILL) < 0 && errno != ESRCH)
|
|
|
|
VIR_ERROR(_("Failed to kill process %lld: %s"),
|
|
|
|
(long long)proc->pid,
|
2020-02-26 17:57:34 +00:00
|
|
|
g_strerror(errno));
|
2019-02-13 16:18:51 +00:00
|
|
|
|
|
|
|
proc->pid = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (proc->pidfile)
|
|
|
|
unlink(proc->pidfile);
|
2019-02-13 16:22:31 +00:00
|
|
|
|
|
|
|
if (proc->uniqDir)
|
|
|
|
rmdir(proc->uniqDir);
|
2019-02-13 16:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuProcessQMPFree:
|
|
|
|
* @proc: Stores process and connection state
|
|
|
|
*
|
|
|
|
* Kill QEMU process and free process data structure.
|
|
|
|
*/
|
2019-01-13 00:50:00 +00:00
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPFree(qemuProcessQMP *proc)
|
2019-01-13 00:50:00 +00:00
|
|
|
{
|
2019-01-13 00:50:03 +00:00
|
|
|
if (!proc)
|
2019-01-13 00:50:00 +00:00
|
|
|
return;
|
|
|
|
|
2019-01-13 00:50:04 +00:00
|
|
|
qemuProcessQMPStop(proc);
|
2020-02-12 12:51:30 +00:00
|
|
|
|
|
|
|
g_object_unref(proc->eventThread);
|
|
|
|
|
2021-02-03 19:36:01 +00:00
|
|
|
g_free(proc->binary);
|
|
|
|
g_free(proc->libDir);
|
|
|
|
g_free(proc->uniqDir);
|
|
|
|
g_free(proc->monpath);
|
|
|
|
g_free(proc->monarg);
|
|
|
|
g_free(proc->pidfile);
|
|
|
|
g_free(proc->stdErr);
|
|
|
|
g_free(proc);
|
2019-01-13 00:50:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-13 00:50:15 +00:00
|
|
|
/**
|
|
|
|
* qemuProcessQMPNew:
|
|
|
|
* @binary: QEMU binary
|
|
|
|
* @libDir: Directory for process and connection artifacts
|
|
|
|
* @runUid: UserId for QEMU process
|
|
|
|
* @runGid: GroupId for QEMU process
|
|
|
|
* @forceTCG: Force TCG mode if true
|
|
|
|
*
|
|
|
|
* Allocate and initialize domain structure encapsulating QEMU process state
|
|
|
|
* and monitor connection for completing QMP queries.
|
|
|
|
*/
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMP *
|
2019-01-13 00:50:02 +00:00
|
|
|
qemuProcessQMPNew(const char *binary,
|
2019-01-13 00:50:01 +00:00
|
|
|
const char *libDir,
|
|
|
|
uid_t runUid,
|
|
|
|
gid_t runGid,
|
2019-01-13 00:50:06 +00:00
|
|
|
bool forceTCG)
|
2019-01-13 00:50:00 +00:00
|
|
|
{
|
2020-07-17 21:15:53 +00:00
|
|
|
g_autoptr(qemuProcessQMP) proc = NULL;
|
2020-02-12 12:51:30 +00:00
|
|
|
const char *threadSuffix;
|
|
|
|
g_autofree char *threadName = NULL;
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-01-13 00:50:15 +00:00
|
|
|
VIR_DEBUG("exec=%s, libDir=%s, runUid=%u, runGid=%u, forceTCG=%d",
|
|
|
|
binary, libDir, runUid, runGid, forceTCG);
|
|
|
|
|
2020-10-05 10:27:13 +00:00
|
|
|
proc = g_new0(qemuProcessQMP, 1);
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
proc->binary = g_strdup(binary);
|
|
|
|
proc->libDir = g_strdup(libDir);
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-01-13 00:50:03 +00:00
|
|
|
proc->runUid = runUid;
|
|
|
|
proc->runGid = runGid;
|
2019-01-13 00:50:06 +00:00
|
|
|
proc->forceTCG = forceTCG;
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2020-02-12 12:51:30 +00:00
|
|
|
threadSuffix = strrchr(binary, '-');
|
|
|
|
if (threadSuffix)
|
|
|
|
threadSuffix++;
|
|
|
|
else
|
|
|
|
threadSuffix = binary;
|
|
|
|
threadName = g_strdup_printf("qmp-%s", threadSuffix);
|
|
|
|
|
|
|
|
if (!(proc->eventThread = virEventThreadNew(threadName)))
|
2020-07-17 21:15:53 +00:00
|
|
|
return NULL;
|
2019-01-13 00:50:13 +00:00
|
|
|
|
2020-07-17 21:15:53 +00:00
|
|
|
return g_steal_pointer(&proc);
|
2019-01-13 00:50:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-12 13:22:48 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQEMULabelUniqPath(qemuProcessQMP *proc)
|
2019-04-12 13:22:48 +00:00
|
|
|
{
|
|
|
|
/* We cannot use the security driver here, but we should not need to. */
|
|
|
|
if (chown(proc->uniqDir, proc->runUid, -1) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Cannot chown uniq path: %s"),
|
|
|
|
proc->uniqDir);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-13 00:50:13 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPInit(qemuProcessQMP *proc)
|
2019-01-13 00:50:13 +00:00
|
|
|
{
|
2019-12-20 21:16:28 +00:00
|
|
|
g_autofree char *template = NULL;
|
2019-01-13 00:50:13 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("proc=%p, emulator=%s", proc, proc->binary);
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
template = g_strdup_printf("%s/qmp-XXXXXX", proc->libDir);
|
2019-02-13 16:22:31 +00:00
|
|
|
|
2019-11-13 21:35:47 +00:00
|
|
|
if (!(proc->uniqDir = g_mkdtemp(template))) {
|
2019-02-13 16:22:31 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Failed to create unique directory with "
|
|
|
|
"template '%s' for probing QEMU"),
|
|
|
|
template);
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-02-13 16:22:31 +00:00
|
|
|
}
|
2019-12-20 21:16:28 +00:00
|
|
|
/* if g_mkdtemp succeeds, proc->uniqDir is now the owner of
|
|
|
|
* the string. Set template to NULL to avoid freeing
|
|
|
|
* the memory in this case */
|
|
|
|
template = NULL;
|
2019-02-13 16:22:31 +00:00
|
|
|
|
2019-04-12 13:22:48 +00:00
|
|
|
if (qemuProcessQEMULabelUniqPath(proc) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-04-12 13:22:48 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
proc->monpath = g_strdup_printf("%s/%s", proc->uniqDir, "qmp.monitor");
|
2019-01-13 00:50:13 +00:00
|
|
|
|
2021-09-10 12:16:50 +00:00
|
|
|
proc->monarg = g_strdup_printf("unix:%s,server=on,wait=off", proc->monpath);
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-02-13 16:22:31 +00:00
|
|
|
/*
|
2019-01-13 00:50:00 +00:00
|
|
|
* Normally we'd use runDir for pid files, but because we're using
|
|
|
|
* -daemonize we need QEMU to be allowed to create them, rather
|
|
|
|
* than libvirtd. So we're using libDir which QEMU can write to
|
|
|
|
*/
|
2019-10-22 13:26:14 +00:00
|
|
|
proc->pidfile = g_strdup_printf("%s/%s", proc->uniqDir, "qmp.pid");
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2019-01-13 00:50:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-01-05 15:37:51 +00:00
|
|
|
#if defined(__linux__)
|
|
|
|
# define hwaccel "kvm:tcg"
|
2018-10-19 18:32:03 +00:00
|
|
|
#elif defined(__APPLE__)
|
|
|
|
# define hwaccel "hvf:tcg"
|
2022-01-05 15:37:51 +00:00
|
|
|
#else
|
|
|
|
# define hwaccel "tcg"
|
|
|
|
#endif
|
|
|
|
|
2019-01-13 00:50:10 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPLaunch(qemuProcessQMP *proc)
|
2019-01-13 00:50:00 +00:00
|
|
|
{
|
|
|
|
const char *machine;
|
|
|
|
int status = 0;
|
2019-02-12 13:38:40 +00:00
|
|
|
int rc;
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-01-13 00:50:06 +00:00
|
|
|
if (proc->forceTCG)
|
2019-01-13 00:50:00 +00:00
|
|
|
machine = "none,accel=tcg";
|
|
|
|
else
|
2022-01-05 15:37:51 +00:00
|
|
|
machine = "none,accel=" hwaccel;
|
2019-01-13 00:50:00 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Try to probe capabilities of '%s' via QMP, machine %s",
|
2019-01-13 00:50:03 +00:00
|
|
|
proc->binary, machine);
|
2019-01-13 00:50:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We explicitly need to use -daemonize here, rather than
|
|
|
|
* virCommandDaemonize, because we need to synchronize
|
|
|
|
* with QEMU creating its monitor socket API. Using
|
|
|
|
* daemonize guarantees control won't return to libvirt
|
|
|
|
* until the socket is present.
|
|
|
|
*/
|
2019-01-13 00:50:03 +00:00
|
|
|
proc->cmd = virCommandNewArgList(proc->binary,
|
|
|
|
"-S",
|
|
|
|
"-no-user-config",
|
|
|
|
"-nodefaults",
|
|
|
|
"-nographic",
|
|
|
|
"-machine", machine,
|
|
|
|
"-qmp", proc->monarg,
|
|
|
|
"-pidfile", proc->pidfile,
|
|
|
|
"-daemonize",
|
2019-01-13 00:50:00 +00:00
|
|
|
NULL);
|
2019-01-13 00:50:03 +00:00
|
|
|
virCommandAddEnvPassCommon(proc->cmd);
|
|
|
|
virCommandClearCaps(proc->cmd);
|
2019-01-13 00:50:00 +00:00
|
|
|
|
|
|
|
#if WITH_CAPNG
|
|
|
|
/* QEMU might run into permission issues, e.g. /dev/sev (0600), override
|
|
|
|
* them just for the purpose of probing */
|
|
|
|
if (geteuid() == 0)
|
2019-01-13 00:50:03 +00:00
|
|
|
virCommandAllowCap(proc->cmd, CAP_DAC_OVERRIDE);
|
2019-01-13 00:50:00 +00:00
|
|
|
#endif
|
|
|
|
|
2019-01-13 00:50:03 +00:00
|
|
|
virCommandSetGID(proc->cmd, proc->runGid);
|
|
|
|
virCommandSetUID(proc->cmd, proc->runUid);
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-10-16 11:22:13 +00:00
|
|
|
virCommandSetErrorBuffer(proc->cmd, &(proc->stdErr));
|
2019-01-13 00:50:00 +00:00
|
|
|
|
2019-01-13 00:50:03 +00:00
|
|
|
if (virCommandRun(proc->cmd, &status) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:00 +00:00
|
|
|
|
|
|
|
if (status != 0) {
|
2019-02-12 13:38:40 +00:00
|
|
|
VIR_DEBUG("QEMU %s exited with status %d", proc->binary, status);
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Failed to start QEMU binary %s for probing: %s"),
|
|
|
|
proc->binary,
|
2019-10-16 11:22:13 +00:00
|
|
|
proc->stdErr ? proc->stdErr : _("unknown error"));
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:00 +00:00
|
|
|
}
|
|
|
|
|
2019-02-12 13:38:40 +00:00
|
|
|
if ((rc = virPidFileReadPath(proc->pidfile, &proc->pid)) < 0) {
|
|
|
|
virReportSystemError(-rc, _("Failed to read pidfile %s"), proc->pidfile);
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:00 +00:00
|
|
|
}
|
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2019-01-13 00:50:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-02-14 10:25:50 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPInitMonitor(qemuMonitor *mon)
|
2019-02-14 10:25:50 +00:00
|
|
|
{
|
|
|
|
if (qemuMonitorSetCapabilities(mon) < 0) {
|
|
|
|
VIR_DEBUG("Failed to set monitor capabilities %s",
|
|
|
|
virGetLastErrorMessage());
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-13 00:50:10 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPConnectMonitor(qemuProcessQMP *proc)
|
2019-01-13 00:50:10 +00:00
|
|
|
{
|
2021-11-18 14:20:23 +00:00
|
|
|
g_autoptr(virDomainXMLOption) xmlopt = NULL;
|
2019-01-13 00:50:14 +00:00
|
|
|
virDomainChrSourceDef monConfig;
|
2019-01-13 00:50:10 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("proc=%p, emulator=%s, proc->pid=%lld",
|
|
|
|
proc, proc->binary, (long long)proc->pid);
|
|
|
|
|
2019-01-13 00:50:14 +00:00
|
|
|
monConfig.type = VIR_DOMAIN_CHR_TYPE_UNIX;
|
|
|
|
monConfig.data.nix.path = proc->monpath;
|
|
|
|
monConfig.data.nix.listen = false;
|
2019-01-13 00:50:11 +00:00
|
|
|
|
|
|
|
if (!(xmlopt = virDomainXMLOptionNew(NULL, NULL, NULL, NULL, NULL)) ||
|
2019-10-10 07:44:12 +00:00
|
|
|
!(proc->vm = virDomainObjNew(xmlopt)) ||
|
2021-08-03 09:00:48 +00:00
|
|
|
!(proc->vm->def = virDomainDefNew(xmlopt)))
|
2021-11-18 14:20:23 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:11 +00:00
|
|
|
|
|
|
|
proc->vm->pid = proc->pid;
|
|
|
|
|
2020-02-12 14:54:19 +00:00
|
|
|
if (!(proc->mon = qemuMonitorOpen(proc->vm, &monConfig, true, 0,
|
|
|
|
virEventThreadGetContext(proc->eventThread),
|
|
|
|
&callbacks, NULL)))
|
2021-11-18 14:20:23 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:11 +00:00
|
|
|
|
|
|
|
virObjectLock(proc->mon);
|
|
|
|
|
2019-02-14 10:25:50 +00:00
|
|
|
if (qemuProcessQMPInitMonitor(proc->mon) < 0)
|
2021-11-18 14:20:23 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:10 +00:00
|
|
|
|
2021-11-18 14:20:23 +00:00
|
|
|
return 0;
|
2019-01-13 00:50:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuProcessQMPStart:
|
|
|
|
* @proc: QEMU process and connection state created by qemuProcessQMPNew()
|
|
|
|
*
|
|
|
|
* Start and connect to QEMU binary so QMP queries can be made.
|
|
|
|
*
|
|
|
|
* Usage:
|
|
|
|
* proc = qemuProcessQMPNew(binary, libDir, runUid, runGid, forceTCG);
|
|
|
|
* qemuProcessQMPStart(proc);
|
|
|
|
* ** Send QMP Queries to QEMU using monitor (proc->mon) **
|
|
|
|
* qemuProcessQMPFree(proc);
|
|
|
|
*
|
2019-10-16 11:22:13 +00:00
|
|
|
* Process error output (proc->stdErr) remains available in qemuProcessQMP
|
2019-01-13 00:50:10 +00:00
|
|
|
* struct until qemuProcessQMPFree is called.
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuProcessQMPStart(qemuProcessQMP *proc)
|
2019-01-13 00:50:10 +00:00
|
|
|
{
|
|
|
|
VIR_DEBUG("proc=%p, emulator=%s", proc, proc->binary);
|
|
|
|
|
|
|
|
if (qemuProcessQMPInit(proc) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:10 +00:00
|
|
|
|
|
|
|
if (qemuProcessQMPLaunch(proc) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:10 +00:00
|
|
|
|
|
|
|
if (qemuProcessQMPConnectMonitor(proc) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2019-01-13 00:50:10 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2019-01-13 00:50:00 +00:00
|
|
|
}
|