2010-12-16 15:23:41 +00:00
|
|
|
/*
|
2014-03-07 13:38:51 +00:00
|
|
|
* qemu_domain.c: QEMU domain private state
|
2010-12-16 15:23:41 +00:00
|
|
|
*
|
2019-03-27 07:12:37 +00:00
|
|
|
* Copyright (C) 2006-2019 Red Hat, Inc.
|
2010-12-16 15:23:41 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2010-12-16 15:23:41 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include "qemu_domain.h"
|
2016-02-16 15:11:34 +00:00
|
|
|
#include "qemu_alias.h"
|
2017-11-14 14:37:09 +00:00
|
|
|
#include "qemu_block.h"
|
2016-02-29 14:39:57 +00:00
|
|
|
#include "qemu_cgroup.h"
|
2010-12-16 15:23:41 +00:00
|
|
|
#include "qemu_command.h"
|
2016-08-01 15:51:28 +00:00
|
|
|
#include "qemu_process.h"
|
2011-05-04 11:55:38 +00:00
|
|
|
#include "qemu_capabilities.h"
|
2019-09-16 15:14:48 +00:00
|
|
|
#include "qemu_hostdev.h"
|
2011-07-19 00:27:30 +00:00
|
|
|
#include "qemu_migration.h"
|
2018-03-13 15:08:49 +00:00
|
|
|
#include "qemu_migration_params.h"
|
2017-02-03 16:09:33 +00:00
|
|
|
#include "qemu_security.h"
|
2019-08-08 14:55:06 +00:00
|
|
|
#include "qemu_slirp.h"
|
2017-04-04 16:22:31 +00:00
|
|
|
#include "qemu_extdevice.h"
|
2018-11-30 09:36:22 +00:00
|
|
|
#include "qemu_blockjob.h"
|
2019-09-20 11:47:04 +00:00
|
|
|
#include "qemu_checkpoint.h"
|
2020-03-26 21:31:11 +00:00
|
|
|
#include "qemu_validate.h"
|
2020-07-20 14:18:57 +00:00
|
|
|
#include "qemu_namespace.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
#include "cpu/cpu.h"
|
2012-12-13 18:01:25 +00:00
|
|
|
#include "viruuid.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2014-05-13 16:10:40 +00:00
|
|
|
#include "domain_addr.h"
|
2019-03-31 21:42:08 +00:00
|
|
|
#include "domain_capabilities.h"
|
2020-03-20 17:14:22 +00:00
|
|
|
#include "domain_driver.h"
|
2011-10-18 14:15:42 +00:00
|
|
|
#include "domain_event.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2016-12-22 09:33:28 +00:00
|
|
|
#include "virnetdevopenvswitch.h"
|
2012-12-13 15:25:48 +00:00
|
|
|
#include "virstoragefile.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2015-03-11 12:58:42 +00:00
|
|
|
#include "virthreadjob.h"
|
2015-12-10 17:39:14 +00:00
|
|
|
#include "virprocess.h"
|
2016-05-19 17:05:36 +00:00
|
|
|
#include "vircrypto.h"
|
2018-05-29 08:01:38 +00:00
|
|
|
#include "virrandom.h"
|
2017-07-21 13:51:03 +00:00
|
|
|
#include "virsystemd.h"
|
2019-10-24 16:00:55 +00:00
|
|
|
#include "virsecret.h"
|
2015-11-03 11:13:25 +00:00
|
|
|
#include "logging/log_manager.h"
|
2016-02-29 14:39:57 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2019-03-15 02:19:18 +00:00
|
|
|
#include "virdomainsnapshotobjlist.h"
|
2019-04-10 15:42:11 +00:00
|
|
|
#include "virdomaincheckpointobjlist.h"
|
2019-09-18 09:27:05 +00:00
|
|
|
#include "backup_conf.h"
|
2020-02-16 21:59:28 +00:00
|
|
|
#include "virutil.h"
|
2020-07-16 09:17:47 +00:00
|
|
|
#include "virqemu.h"
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
#include <sys/time.h>
|
2011-05-05 11:38:04 +00:00
|
|
|
#include <fcntl.h>
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2019-06-17 07:50:08 +00:00
|
|
|
#define QEMU_QXL_VGAMEM_DEFAULT 16 * 1024
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_domain");
|
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
|
|
|
|
static void *
|
|
|
|
qemuJobAllocPrivate(void)
|
|
|
|
{
|
|
|
|
return g_new0(qemuDomainJobPrivate, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuJobFreePrivate(void *opaque)
|
|
|
|
{
|
|
|
|
qemuDomainJobPrivatePtr priv = opaque;
|
|
|
|
|
|
|
|
if (!priv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qemuMigrationParamsFree(priv->migParams);
|
|
|
|
VIR_FREE(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuJobResetPrivate(void *opaque)
|
|
|
|
{
|
|
|
|
qemuDomainJobPrivatePtr priv = opaque;
|
|
|
|
|
|
|
|
priv->spiceMigration = false;
|
|
|
|
priv->spiceMigrated = false;
|
|
|
|
priv->dumpCompleted = false;
|
|
|
|
qemuMigrationParamsFree(priv->migParams);
|
|
|
|
priv->migParams = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-08-17 05:07:21 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLFormatNBDMigrationSource(virBufferPtr buf,
|
|
|
|
virStorageSourcePtr src,
|
|
|
|
virDomainXMLOptionPtr xmlopt)
|
|
|
|
{
|
|
|
|
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
|
|
|
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
|
|
|
|
|
|
|
virBufferAsprintf(&attrBuf, " type='%s' format='%s'",
|
|
|
|
virStorageTypeToString(src->type),
|
|
|
|
virStorageFileFormatTypeToString(src->format));
|
|
|
|
|
|
|
|
if (virDomainDiskSourceFormat(&childBuf, src, "source", 0, false,
|
|
|
|
VIR_DOMAIN_DEF_FORMAT_STATUS,
|
|
|
|
false, false, xmlopt) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
virXMLFormatElement(buf, "migrationSource", &attrBuf, &childBuf);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLFormatNBDMigration(virBufferPtr buf,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
size_t i;
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
|
|
|
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
|
|
|
disk = vm->def->disks[i];
|
|
|
|
diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
|
|
|
virBufferAsprintf(&attrBuf, " dev='%s' migrating='%s'",
|
|
|
|
disk->dst, diskPriv->migrating ? "yes" : "no");
|
|
|
|
|
|
|
|
if (diskPriv->migrSource &&
|
|
|
|
qemuDomainObjPrivateXMLFormatNBDMigrationSource(&childBuf,
|
|
|
|
diskPriv->migrSource,
|
|
|
|
priv->driver->xmlopt) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
virXMLFormatElement(buf, "disk", &attrBuf, &childBuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
static int
|
|
|
|
qemuDomainFormatJobPrivate(virBufferPtr buf,
|
2020-08-17 05:07:21 +00:00
|
|
|
qemuDomainJobObjPtr job,
|
|
|
|
virDomainObjPtr vm)
|
2020-07-16 11:48:34 +00:00
|
|
|
{
|
|
|
|
qemuDomainJobPrivatePtr priv = job->privateData;
|
|
|
|
|
2020-08-17 05:07:21 +00:00
|
|
|
if (job->asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT &&
|
|
|
|
qemuDomainObjPrivateXMLFormatNBDMigration(buf, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
if (priv->migParams)
|
|
|
|
qemuMigrationParamsFormat(buf, priv->migParams);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-08-17 05:07:21 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseJobNBDSource(xmlNodePtr node,
|
|
|
|
xmlXPathContextPtr ctxt,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virDomainXMLOptionPtr xmlopt)
|
|
|
|
{
|
2020-07-28 19:47:48 +00:00
|
|
|
VIR_XPATH_NODE_AUTORESTORE(ctxt)
|
2020-08-17 05:07:21 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
g_autofree char *format = NULL;
|
|
|
|
g_autofree char *type = NULL;
|
|
|
|
g_autoptr(virStorageSource) migrSource = NULL;
|
|
|
|
xmlNodePtr sourceNode;
|
|
|
|
|
|
|
|
ctxt->node = node;
|
|
|
|
|
|
|
|
if (!(ctxt->node = virXPathNode("./migrationSource", ctxt)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(type = virXMLPropString(ctxt->node, "type"))) {
|
|
|
|
virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
|
|
_("missing storage source type"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(format = virXMLPropString(ctxt->node, "format"))) {
|
|
|
|
virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
|
|
_("missing storage source format"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(migrSource = virDomainStorageSourceParseBase(type, format, NULL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* newer libvirt uses the <source> subelement instead of formatting the
|
|
|
|
* source directly into <migrationSource> */
|
|
|
|
if ((sourceNode = virXPathNode("./source", ctxt)))
|
|
|
|
ctxt->node = sourceNode;
|
|
|
|
|
|
|
|
if (virDomainStorageSourceParse(ctxt->node, ctxt, migrSource,
|
|
|
|
VIR_DOMAIN_DEF_PARSE_STATUS, xmlopt) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
diskPriv->migrSource = g_steal_pointer(&migrSource);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseJobNBD(virDomainObjPtr vm,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
|
|
|
size_t i;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if ((n = virXPathNodeSet("./disk[@migrating='yes']", ctxt, &nodes)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (n > 0) {
|
|
|
|
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
|
|
|
VIR_WARN("Found disks marked for migration but we were not "
|
|
|
|
"migrating");
|
|
|
|
n = 0;
|
|
|
|
}
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
g_autofree char *dst = NULL;
|
|
|
|
|
|
|
|
if ((dst = virXMLPropString(nodes[i], "dev")) &&
|
|
|
|
(disk = virDomainDiskByTarget(vm->def, dst))) {
|
|
|
|
QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating = true;
|
|
|
|
|
|
|
|
if (qemuDomainObjPrivateXMLParseJobNBDSource(nodes[i], ctxt,
|
|
|
|
disk,
|
|
|
|
priv->driver->xmlopt) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
static int
|
|
|
|
qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt,
|
2020-08-17 05:07:21 +00:00
|
|
|
qemuDomainJobObjPtr job,
|
|
|
|
virDomainObjPtr vm)
|
2020-07-16 11:48:34 +00:00
|
|
|
{
|
|
|
|
qemuDomainJobPrivatePtr priv = job->privateData;
|
|
|
|
|
2020-08-17 05:07:21 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseJobNBD(vm, ctxt) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
if (qemuMigrationParamsParse(ctxt, &priv->migParams) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = {
|
|
|
|
.allocJobPrivate = qemuJobAllocPrivate,
|
|
|
|
.freeJobPrivate = qemuJobFreePrivate,
|
|
|
|
.resetJobPrivate = qemuJobResetPrivate,
|
|
|
|
.formatJob = qemuDomainFormatJobPrivate,
|
|
|
|
.parseJob = qemuDomainParseJobPrivate,
|
|
|
|
};
|
|
|
|
|
2019-09-20 09:03:08 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainObjFromDomain:
|
|
|
|
* @domain: Domain pointer that has to be looked up
|
|
|
|
*
|
|
|
|
* This function looks up @domain and returns the appropriate virDomainObjPtr
|
|
|
|
* that has to be released by calling virDomainObjEndAPI().
|
|
|
|
*
|
|
|
|
* Returns the domain object with incremented reference counter which is locked
|
|
|
|
* on success, NULL otherwise.
|
|
|
|
*/
|
|
|
|
virDomainObjPtr
|
|
|
|
qemuDomainObjFromDomain(virDomainPtr domain)
|
|
|
|
{
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virQEMUDriverPtr driver = domain->conn->privateData;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
|
|
|
|
vm = virDomainObjListFindByUUID(driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
virReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s' (%s)"),
|
|
|
|
uuidstr, domain->name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return vm;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-12 12:43:29 +00:00
|
|
|
struct _qemuDomainLogContext {
|
2020-03-16 12:10:24 +00:00
|
|
|
GObject parent;
|
2017-04-03 08:24:36 +00:00
|
|
|
|
2015-11-12 12:43:29 +00:00
|
|
|
int writefd;
|
2015-11-03 11:13:25 +00:00
|
|
|
int readfd; /* Only used if manager == NULL */
|
2015-11-12 12:43:29 +00:00
|
|
|
off_t pos;
|
2015-11-03 11:13:25 +00:00
|
|
|
ino_t inode; /* Only used if manager != NULL */
|
2015-12-03 17:20:35 +00:00
|
|
|
char *path;
|
2015-11-03 11:13:25 +00:00
|
|
|
virLogManagerPtr manager;
|
2015-11-12 12:43:29 +00:00
|
|
|
};
|
|
|
|
|
2020-03-16 12:10:24 +00:00
|
|
|
G_DEFINE_TYPE(qemuDomainLogContext, qemu_domain_log_context, G_TYPE_OBJECT);
|
2017-06-02 20:50:18 +00:00
|
|
|
static virClassPtr qemuDomainSaveCookieClass;
|
2017-04-03 08:24:36 +00:00
|
|
|
|
2020-03-16 12:10:24 +00:00
|
|
|
static void qemuDomainLogContextFinalize(GObject *obj);
|
2017-06-02 20:50:18 +00:00
|
|
|
static void qemuDomainSaveCookieDispose(void *obj);
|
2017-04-03 08:24:36 +00:00
|
|
|
|
2017-10-17 11:33:12 +00:00
|
|
|
|
2017-04-03 08:24:36 +00:00
|
|
|
static int
|
2017-06-02 20:50:18 +00:00
|
|
|
qemuDomainOnceInit(void)
|
2017-04-03 08:24:36 +00:00
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuDomainSaveCookie, virClassForObject()))
|
2017-06-02 20:50:18 +00:00
|
|
|
return -1;
|
|
|
|
|
2017-04-03 08:24:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-16 12:10:24 +00:00
|
|
|
static void qemu_domain_log_context_init(qemuDomainLogContext *logctxt G_GNUC_UNUSED)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qemu_domain_log_context_class_init(qemuDomainLogContextClass *klass)
|
|
|
|
{
|
|
|
|
GObjectClass *obj = G_OBJECT_CLASS(klass);
|
|
|
|
|
|
|
|
obj->finalize = qemuDomainLogContextFinalize;
|
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomain);
|
2017-04-03 08:24:36 +00:00
|
|
|
|
|
|
|
static void
|
2020-03-16 12:10:24 +00:00
|
|
|
qemuDomainLogContextFinalize(GObject *object)
|
2017-04-03 08:24:36 +00:00
|
|
|
{
|
2020-03-16 12:10:24 +00:00
|
|
|
qemuDomainLogContextPtr ctxt = QEMU_DOMAIN_LOG_CONTEXT(object);
|
2017-04-03 08:24:36 +00:00
|
|
|
VIR_DEBUG("ctxt=%p", ctxt);
|
|
|
|
|
|
|
|
virLogManagerFree(ctxt->manager);
|
|
|
|
VIR_FREE(ctxt->path);
|
|
|
|
VIR_FORCE_CLOSE(ctxt->writefd);
|
|
|
|
VIR_FORCE_CLOSE(ctxt->readfd);
|
2020-03-16 12:10:24 +00:00
|
|
|
G_OBJECT_CLASS(qemu_domain_log_context_parent_class)->finalize(object);
|
2017-04-03 08:24:36 +00:00
|
|
|
}
|
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
/* qemuDomainGetMasterKeyFilePath:
|
|
|
|
* @libDir: Directory path to domain lib files
|
|
|
|
*
|
|
|
|
* Generate a path to the domain master key file for libDir.
|
|
|
|
* It's up to the caller to handle checking if path exists.
|
|
|
|
*
|
|
|
|
* Returns path to memory containing the name of the file. It is up to the
|
|
|
|
* caller to free; otherwise, NULL on failure.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
qemuDomainGetMasterKeyFilePath(const char *libDir)
|
|
|
|
{
|
|
|
|
if (!libDir) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("invalid path for master key file"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return virFileBuildPath(libDir, "master-key.aes", NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* qemuDomainWriteMasterKeyFile:
|
2016-05-02 17:11:24 +00:00
|
|
|
* @driver: qemu driver data
|
|
|
|
* @vm: Pointer to the vm object
|
2016-03-29 22:22:46 +00:00
|
|
|
*
|
|
|
|
* Get the desired path to the masterKey file and store it in the path.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on failure with error message indicating failure
|
|
|
|
*/
|
2016-05-02 17:11:24 +00:00
|
|
|
int
|
2016-04-13 15:17:35 +00:00
|
|
|
qemuDomainWriteMasterKeyFile(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
2016-03-29 22:22:46 +00:00
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *path = NULL;
|
2020-11-12 21:14:54 +00:00
|
|
|
VIR_AUTOCLOSE fd = -1;
|
2016-04-13 15:17:35 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2016-03-29 22:22:46 +00:00
|
|
|
|
2016-05-02 17:11:24 +00:00
|
|
|
/* Only gets filled in if we have the capability */
|
|
|
|
if (!priv->masterKey)
|
|
|
|
return 0;
|
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
if (!(path = qemuDomainGetMasterKeyFilePath(priv->libDir)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if ((fd = open(path, O_WRONLY|O_TRUNC|O_CREAT, 0600)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to open domain master key file for write"));
|
2020-11-12 21:14:54 +00:00
|
|
|
return -1;
|
2016-03-29 22:22:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (safewrite(fd, priv->masterKey, priv->masterKeyLen) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to write master key file for domain"));
|
2020-11-12 21:14:54 +00:00
|
|
|
return -1;
|
2016-03-29 22:22:46 +00:00
|
|
|
}
|
|
|
|
|
2018-09-05 09:19:14 +00:00
|
|
|
if (qemuSecurityDomainSetPathLabel(driver, vm, path, false) < 0)
|
2020-11-12 21:14:54 +00:00
|
|
|
return -1;
|
2016-03-29 22:22:46 +00:00
|
|
|
|
2020-11-12 21:14:54 +00:00
|
|
|
return 0;
|
2016-03-29 22:22:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-07-08 21:37:24 +00:00
|
|
|
static void
|
|
|
|
qemuDomainMasterKeyFree(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
if (!priv->masterKey)
|
|
|
|
return;
|
|
|
|
|
|
|
|
VIR_DISPOSE_N(priv->masterKey, priv->masterKeyLen);
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
/* qemuDomainMasterKeyReadFile:
|
|
|
|
* @priv: pointer to domain private object
|
|
|
|
*
|
|
|
|
* Expected to be called during qemuProcessReconnect once the domain
|
|
|
|
* libDir has been generated through qemuStateInitialize calling
|
|
|
|
* virDomainObjListLoadAllConfigs which will restore the libDir path
|
|
|
|
* to the domain private object.
|
|
|
|
*
|
|
|
|
* This function will get the path to the master key file and if it
|
|
|
|
* exists, it will read the contents of the file saving it in priv->masterKey.
|
|
|
|
*
|
|
|
|
* Once the file exists, the validity checks may cause failures; however,
|
|
|
|
* if the file doesn't exist or the capability doesn't exist, we just
|
|
|
|
* return (mostly) quietly.
|
|
|
|
*
|
|
|
|
* Returns 0 on success or lack of capability
|
|
|
|
* -1 on failure with error message indicating failure
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainMasterKeyReadFile(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *path = NULL;
|
2016-03-29 22:22:46 +00:00
|
|
|
int fd = -1;
|
|
|
|
uint8_t *masterKey = NULL;
|
|
|
|
ssize_t masterKeyLen = 0;
|
|
|
|
|
|
|
|
/* If we don't have the capability, then do nothing. */
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_SECRET))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(path = qemuDomainGetMasterKeyFilePath(priv->libDir)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!virFileExists(path)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("domain master key file doesn't exist in %s"),
|
|
|
|
priv->libDir);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((fd = open(path, O_RDONLY)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to open domain master key file for read"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
masterKey = g_new0(uint8_t, 1024);
|
2016-03-29 22:22:46 +00:00
|
|
|
|
|
|
|
if ((masterKeyLen = saferead(fd, masterKey, 1024)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("unable to read domain master key file"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (masterKeyLen != QEMU_DOMAIN_MASTER_KEY_LEN) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("invalid master key read, size=%zd"), masterKeyLen);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-09-11 11:42:11 +00:00
|
|
|
masterKey = g_renew(uint8_t, masterKey, masterKeyLen);
|
2016-03-29 22:22:46 +00:00
|
|
|
|
|
|
|
priv->masterKey = masterKey;
|
|
|
|
priv->masterKeyLen = masterKeyLen;
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
if (masterKeyLen > 0)
|
|
|
|
memset(masterKey, 0, masterKeyLen);
|
|
|
|
VIR_FREE(masterKey);
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* qemuDomainMasterKeyRemove:
|
|
|
|
* @priv: Pointer to the domain private object
|
|
|
|
*
|
|
|
|
* Remove the traces of the master key, clear the heap, clear the file,
|
|
|
|
* delete the file.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainMasterKeyRemove(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *path = NULL;
|
2016-03-29 22:22:46 +00:00
|
|
|
|
|
|
|
if (!priv->masterKey)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Clear the contents */
|
2016-07-08 21:37:24 +00:00
|
|
|
qemuDomainMasterKeyFree(priv);
|
2016-03-29 22:22:46 +00:00
|
|
|
|
|
|
|
/* Delete the master key file */
|
|
|
|
path = qemuDomainGetMasterKeyFilePath(priv->libDir);
|
|
|
|
unlink(path);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* qemuDomainMasterKeyCreate:
|
2016-05-02 17:11:24 +00:00
|
|
|
* @vm: Pointer to the domain object
|
2016-03-29 22:22:46 +00:00
|
|
|
*
|
|
|
|
* As long as the underlying qemu has the secret capability,
|
|
|
|
* generate and store 'raw' in a file a random 32-byte key to
|
|
|
|
* be used as a secret shared with qemu to share sensitive data.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, -1 w/ error message on failure
|
|
|
|
*/
|
|
|
|
int
|
2016-05-02 17:11:24 +00:00
|
|
|
qemuDomainMasterKeyCreate(virDomainObjPtr vm)
|
2016-03-29 22:22:46 +00:00
|
|
|
{
|
2016-04-13 15:17:35 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
/* If we don't have the capability, then do nothing. */
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_SECRET))
|
|
|
|
return 0;
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
priv->masterKey = g_new0(uint8_t, QEMU_DOMAIN_MASTER_KEY_LEN);
|
2016-03-29 22:22:46 +00:00
|
|
|
priv->masterKeyLen = QEMU_DOMAIN_MASTER_KEY_LEN;
|
|
|
|
|
2018-05-29 08:01:38 +00:00
|
|
|
if (virRandomBytes(priv->masterKey, priv->masterKeyLen) < 0) {
|
2018-05-29 05:46:32 +00:00
|
|
|
VIR_DISPOSE_N(priv->masterKey, priv->masterKeyLen);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-03-29 22:22:46 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-06 12:56:52 +00:00
|
|
|
static void
|
2018-06-05 20:20:59 +00:00
|
|
|
qemuDomainSecretPlainClear(qemuDomainSecretPlainPtr secret)
|
2016-04-06 12:56:52 +00:00
|
|
|
{
|
2018-06-05 20:20:59 +00:00
|
|
|
VIR_FREE(secret->username);
|
|
|
|
VIR_DISPOSE_N(secret->secret, secret->secretlen);
|
2016-04-06 12:56:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-02 10:43:32 +00:00
|
|
|
static void
|
2018-06-05 20:20:59 +00:00
|
|
|
qemuDomainSecretAESClear(qemuDomainSecretAESPtr secret,
|
2018-05-28 13:17:01 +00:00
|
|
|
bool keepAlias)
|
2016-05-02 10:43:32 +00:00
|
|
|
{
|
2018-05-28 13:17:01 +00:00
|
|
|
if (!keepAlias)
|
2018-06-05 20:20:59 +00:00
|
|
|
VIR_FREE(secret->alias);
|
2018-05-28 13:17:01 +00:00
|
|
|
|
2018-06-05 20:20:59 +00:00
|
|
|
VIR_FREE(secret->username);
|
|
|
|
VIR_FREE(secret->iv);
|
|
|
|
VIR_FREE(secret->ciphertext);
|
2016-05-02 10:43:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-28 13:17:01 +00:00
|
|
|
static void
|
|
|
|
qemuDomainSecretInfoClear(qemuDomainSecretInfoPtr secinfo,
|
|
|
|
bool keepAlias)
|
2016-04-06 12:56:52 +00:00
|
|
|
{
|
2018-05-28 13:17:01 +00:00
|
|
|
if (!secinfo)
|
2016-04-06 12:56:52 +00:00
|
|
|
return;
|
|
|
|
|
2018-05-28 13:17:01 +00:00
|
|
|
switch ((qemuDomainSecretInfoType) secinfo->type) {
|
2016-05-02 10:43:32 +00:00
|
|
|
case VIR_DOMAIN_SECRET_INFO_TYPE_PLAIN:
|
2018-06-05 20:20:59 +00:00
|
|
|
qemuDomainSecretPlainClear(&secinfo->s.plain);
|
2016-05-02 10:43:32 +00:00
|
|
|
break;
|
|
|
|
|
2016-05-11 20:57:54 +00:00
|
|
|
case VIR_DOMAIN_SECRET_INFO_TYPE_AES:
|
2018-06-05 20:20:59 +00:00
|
|
|
qemuDomainSecretAESClear(&secinfo->s.aes, keepAlias);
|
2016-05-02 10:43:32 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_SECRET_INFO_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
2018-05-28 13:17:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
2020-03-06 13:44:43 +00:00
|
|
|
qemuDomainSecretInfoFree(qemuDomainSecretInfoPtr secinfo)
|
2018-05-28 13:17:01 +00:00
|
|
|
{
|
2020-03-06 13:44:43 +00:00
|
|
|
qemuDomainSecretInfoClear(secinfo, false);
|
|
|
|
g_free(secinfo);
|
2016-04-06 12:56:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-28 13:17:01 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainSecretInfoDestroy:
|
|
|
|
* @secinfo: object to destroy
|
|
|
|
*
|
|
|
|
* Removes any data unnecessary for further use, but keeps alias allocated.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainSecretInfoDestroy(qemuDomainSecretInfoPtr secinfo)
|
|
|
|
{
|
|
|
|
qemuDomainSecretInfoClear(secinfo, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-13 09:20:36 +00:00
|
|
|
static virClassPtr qemuDomainDiskPrivateClass;
|
2017-10-27 12:37:22 +00:00
|
|
|
static void qemuDomainDiskPrivateDispose(void *obj);
|
2015-05-13 09:20:36 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDiskPrivateOnceInit(void)
|
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuDomainDiskPrivate, virClassForObject()))
|
2015-05-13 09:20:36 +00:00
|
|
|
return -1;
|
2018-04-17 15:42:33 +00:00
|
|
|
|
|
|
|
return 0;
|
2015-05-13 09:20:36 +00:00
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainDiskPrivate);
|
2015-05-13 09:20:36 +00:00
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainDiskPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainDiskPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainDiskPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainDiskPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
2017-10-27 12:37:22 +00:00
|
|
|
static void
|
|
|
|
qemuDomainDiskPrivateDispose(void *obj)
|
|
|
|
{
|
|
|
|
qemuDomainDiskPrivatePtr priv = obj;
|
|
|
|
|
2019-02-15 12:03:58 +00:00
|
|
|
virObjectUnref(priv->migrSource);
|
2018-06-15 07:12:01 +00:00
|
|
|
VIR_FREE(priv->qomName);
|
2018-08-21 12:45:57 +00:00
|
|
|
VIR_FREE(priv->nodeCopyOnRead);
|
2018-11-14 15:47:50 +00:00
|
|
|
virObjectUnref(priv->blockjob);
|
2017-10-27 12:37:22 +00:00
|
|
|
}
|
2015-05-13 09:20:36 +00:00
|
|
|
|
2017-10-05 13:22:09 +00:00
|
|
|
static virClassPtr qemuDomainStorageSourcePrivateClass;
|
|
|
|
static void qemuDomainStorageSourcePrivateDispose(void *obj);
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainStorageSourcePrivateOnceInit(void)
|
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuDomainStorageSourcePrivate, virClassForObject()))
|
2017-10-05 13:22:09 +00:00
|
|
|
return -1;
|
2018-04-17 15:42:33 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-10-05 13:22:09 +00:00
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainStorageSourcePrivate);
|
2017-10-05 13:22:09 +00:00
|
|
|
|
|
|
|
virObjectPtr
|
|
|
|
qemuDomainStorageSourcePrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainStorageSourcePrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainStorageSourcePrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainStorageSourcePrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainStorageSourcePrivateDispose(void *obj)
|
|
|
|
{
|
|
|
|
qemuDomainStorageSourcePrivatePtr priv = obj;
|
|
|
|
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&priv->secinfo, qemuDomainSecretInfoFree);
|
|
|
|
g_clear_pointer(&priv->encinfo, qemuDomainSecretInfoFree);
|
2020-05-14 08:47:42 +00:00
|
|
|
g_clear_pointer(&priv->httpcookie, qemuDomainSecretInfoFree);
|
2020-06-29 13:10:42 +00:00
|
|
|
g_clear_pointer(&priv->tlsKeySecret, qemuDomainSecretInfoFree);
|
2017-10-05 13:22:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-05-22 10:29:10 +00:00
|
|
|
qemuDomainStorageSourcePrivatePtr
|
|
|
|
qemuDomainStorageSourcePrivateFetch(virStorageSourcePtr src)
|
|
|
|
{
|
|
|
|
if (!src->privateData)
|
|
|
|
src->privateData = qemuDomainStorageSourcePrivateNew();
|
|
|
|
|
|
|
|
return QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(src);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-29 13:40:09 +00:00
|
|
|
static virClassPtr qemuDomainVcpuPrivateClass;
|
|
|
|
static void qemuDomainVcpuPrivateDispose(void *obj);
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainVcpuPrivateOnceInit(void)
|
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuDomainVcpuPrivate, virClassForObject()))
|
2016-06-29 13:40:09 +00:00
|
|
|
return -1;
|
2018-04-17 15:42:33 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-06-29 13:40:09 +00:00
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainVcpuPrivate);
|
2016-06-29 13:40:09 +00:00
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainVcpuPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainVcpuPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainVcpuPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainVcpuPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2016-07-31 12:05:04 +00:00
|
|
|
qemuDomainVcpuPrivateDispose(void *obj)
|
2016-06-29 13:40:09 +00:00
|
|
|
{
|
2016-07-31 12:05:04 +00:00
|
|
|
qemuDomainVcpuPrivatePtr priv = obj;
|
|
|
|
|
|
|
|
VIR_FREE(priv->type);
|
|
|
|
VIR_FREE(priv->alias);
|
2019-08-29 12:47:10 +00:00
|
|
|
virJSONValueFree(priv->props);
|
2016-06-29 13:40:09 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-21 12:31:37 +00:00
|
|
|
static virClassPtr qemuDomainChrSourcePrivateClass;
|
|
|
|
static void qemuDomainChrSourcePrivateDispose(void *obj);
|
2016-06-17 10:36:11 +00:00
|
|
|
|
|
|
|
static int
|
2016-10-21 12:31:37 +00:00
|
|
|
qemuDomainChrSourcePrivateOnceInit(void)
|
2016-06-17 10:36:11 +00:00
|
|
|
{
|
2018-04-17 15:42:33 +00:00
|
|
|
if (!VIR_CLASS_NEW(qemuDomainChrSourcePrivate, virClassForObject()))
|
2016-06-17 10:36:11 +00:00
|
|
|
return -1;
|
2018-04-17 15:42:33 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-06-17 10:36:11 +00:00
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainChrSourcePrivate);
|
2016-06-17 10:36:11 +00:00
|
|
|
|
|
|
|
static virObjectPtr
|
2016-10-21 12:31:37 +00:00
|
|
|
qemuDomainChrSourcePrivateNew(void)
|
2016-06-17 10:36:11 +00:00
|
|
|
{
|
2016-10-21 12:31:37 +00:00
|
|
|
qemuDomainChrSourcePrivatePtr priv;
|
2016-06-17 10:36:11 +00:00
|
|
|
|
2016-10-21 12:31:37 +00:00
|
|
|
if (qemuDomainChrSourcePrivateInitialize() < 0)
|
2016-06-17 10:36:11 +00:00
|
|
|
return NULL;
|
|
|
|
|
2016-10-21 12:31:37 +00:00
|
|
|
if (!(priv = virObjectNew(qemuDomainChrSourcePrivateClass)))
|
2016-06-17 10:36:11 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2016-10-21 12:31:37 +00:00
|
|
|
qemuDomainChrSourcePrivateDispose(void *obj)
|
2016-06-17 10:36:11 +00:00
|
|
|
{
|
2016-10-21 12:31:37 +00:00
|
|
|
qemuDomainChrSourcePrivatePtr priv = obj;
|
2016-06-17 10:36:11 +00:00
|
|
|
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&priv->secinfo, qemuDomainSecretInfoFree);
|
2016-06-17 10:36:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-18 11:14:42 +00:00
|
|
|
static virClassPtr qemuDomainVsockPrivateClass;
|
|
|
|
static void qemuDomainVsockPrivateDispose(void *obj);
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainVsockPrivateOnceInit(void)
|
|
|
|
{
|
|
|
|
if (!VIR_CLASS_NEW(qemuDomainVsockPrivate, virClassForObject()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainVsockPrivate);
|
2018-05-18 11:14:42 +00:00
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainVsockPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainVsockPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainVsockPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainVsockPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
2018-05-22 13:57:47 +00:00
|
|
|
priv->vhostfd = -1;
|
|
|
|
|
2018-05-18 11:14:42 +00:00
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2019-10-14 12:45:33 +00:00
|
|
|
qemuDomainVsockPrivateDispose(void *obj G_GNUC_UNUSED)
|
2018-05-18 11:14:42 +00:00
|
|
|
{
|
2018-05-22 13:57:47 +00:00
|
|
|
qemuDomainVsockPrivatePtr priv = obj;
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(priv->vhostfd);
|
2018-05-18 11:14:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-10 14:50:11 +00:00
|
|
|
static virClassPtr qemuDomainGraphicsPrivateClass;
|
|
|
|
static void qemuDomainGraphicsPrivateDispose(void *obj);
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainGraphicsPrivateOnceInit(void)
|
|
|
|
{
|
|
|
|
if (!VIR_CLASS_NEW(qemuDomainGraphicsPrivate, virClassForObject()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainGraphicsPrivate);
|
2019-01-10 14:50:11 +00:00
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainGraphicsPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainGraphicsPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainGraphicsPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainGraphicsPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainGraphicsPrivateDispose(void *obj)
|
|
|
|
{
|
|
|
|
qemuDomainGraphicsPrivatePtr priv = obj;
|
|
|
|
|
|
|
|
VIR_FREE(priv->tlsAlias);
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&priv->secinfo, qemuDomainSecretInfoFree);
|
2019-01-10 14:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-08 14:55:02 +00:00
|
|
|
static virClassPtr qemuDomainNetworkPrivateClass;
|
|
|
|
static void qemuDomainNetworkPrivateDispose(void *obj);
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainNetworkPrivateOnceInit(void)
|
|
|
|
{
|
|
|
|
if (!VIR_CLASS_NEW(qemuDomainNetworkPrivate, virClassForObject()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainNetworkPrivate);
|
|
|
|
|
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainNetworkPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainNetworkPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainNetworkPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainNetworkPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2019-10-14 12:45:33 +00:00
|
|
|
qemuDomainNetworkPrivateDispose(void *obj G_GNUC_UNUSED)
|
2019-08-08 14:55:02 +00:00
|
|
|
{
|
2019-08-08 14:55:06 +00:00
|
|
|
qemuDomainNetworkPrivatePtr priv = obj;
|
|
|
|
|
|
|
|
qemuSlirpFree(priv->slirp);
|
2019-08-08 14:55:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-10 12:53:10 +00:00
|
|
|
static virClassPtr qemuDomainFSPrivateClass;
|
|
|
|
static void qemuDomainFSPrivateDispose(void *obj);
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainFSPrivateOnceInit(void)
|
|
|
|
{
|
|
|
|
if (!VIR_CLASS_NEW(qemuDomainFSPrivate, virClassForObject()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainFSPrivate);
|
|
|
|
|
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainFSPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainFSPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainFSPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainFSPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2019-11-01 11:34:52 +00:00
|
|
|
qemuDomainFSPrivateDispose(void *obj)
|
2019-12-10 12:53:10 +00:00
|
|
|
{
|
2019-11-01 11:34:52 +00:00
|
|
|
qemuDomainFSPrivatePtr priv = obj;
|
|
|
|
|
|
|
|
g_free(priv->vhostuser_fs_sock);
|
2019-12-10 12:53:10 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 10:44:36 +00:00
|
|
|
static virClassPtr qemuDomainVideoPrivateClass;
|
|
|
|
static void qemuDomainVideoPrivateDispose(void *obj);
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainVideoPrivateOnceInit(void)
|
|
|
|
{
|
|
|
|
if (!VIR_CLASS_NEW(qemuDomainVideoPrivate, virClassForObject()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuDomainVideoPrivate);
|
|
|
|
|
|
|
|
|
|
|
|
static virObjectPtr
|
|
|
|
qemuDomainVideoPrivateNew(void)
|
|
|
|
{
|
|
|
|
qemuDomainVideoPrivatePtr priv;
|
|
|
|
|
|
|
|
if (qemuDomainVideoPrivateInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!(priv = virObjectNew(qemuDomainVideoPrivateClass)))
|
|
|
|
return NULL;
|
|
|
|
|
2019-09-27 16:34:44 +00:00
|
|
|
priv->vhost_user_fd = -1;
|
|
|
|
|
2019-09-23 10:44:36 +00:00
|
|
|
return (virObjectPtr) priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2019-09-23 10:44:37 +00:00
|
|
|
qemuDomainVideoPrivateDispose(void *obj)
|
2019-09-23 10:44:36 +00:00
|
|
|
{
|
2019-09-23 10:44:37 +00:00
|
|
|
qemuDomainVideoPrivatePtr priv = obj;
|
|
|
|
|
|
|
|
VIR_FORCE_CLOSE(priv->vhost_user_fd);
|
2019-09-23 10:44:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-06 19:00:59 +00:00
|
|
|
/* qemuDomainSecretPlainSetup:
|
|
|
|
* @secinfo: Pointer to secret info
|
2017-03-08 19:37:05 +00:00
|
|
|
* @usageType: The virSecretUsageType
|
2016-06-02 18:27:08 +00:00
|
|
|
* @username: username to use for authentication (may be NULL)
|
|
|
|
* @seclookupdef: Pointer to seclookupdef data
|
2016-04-06 19:00:59 +00:00
|
|
|
*
|
|
|
|
* Taking a secinfo, fill in the plaintext information
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on failure with error message
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretPlainSetup(qemuDomainSecretInfoPtr secinfo,
|
2017-03-08 19:37:05 +00:00
|
|
|
virSecretUsageType usageType,
|
2016-06-02 18:27:08 +00:00
|
|
|
const char *username,
|
|
|
|
virSecretLookupTypeDefPtr seclookupdef)
|
2016-04-06 19:00:59 +00:00
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virConnect) conn = virGetConnectSecret();
|
2018-02-09 16:14:41 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
return -1;
|
|
|
|
|
2016-05-02 11:20:32 +00:00
|
|
|
secinfo->type = VIR_DOMAIN_SECRET_INFO_TYPE_PLAIN;
|
2019-10-20 11:49:46 +00:00
|
|
|
secinfo->s.plain.username = g_strdup(username);
|
2016-04-06 19:00:59 +00:00
|
|
|
|
2018-02-09 16:14:41 +00:00
|
|
|
ret = virSecretGetSecretString(conn, seclookupdef, usageType,
|
|
|
|
&secinfo->s.plain.secret,
|
|
|
|
&secinfo->s.plain.secretlen);
|
|
|
|
|
|
|
|
return ret;
|
2016-04-06 19:00:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
/* qemuDomainSecretAESSetup:
|
|
|
|
* @priv: pointer to domain private object
|
2020-03-16 09:37:26 +00:00
|
|
|
* @alias: alias of the secret
|
|
|
|
* @username: username to use (may be NULL)
|
|
|
|
* @secret: secret data
|
|
|
|
* @secretlen: length of @secret
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
*
|
2020-03-16 09:37:26 +00:00
|
|
|
* Encrypts @secret for use with qemu.
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
*
|
2020-03-16 09:23:24 +00:00
|
|
|
* Returns qemuDomainSecretInfoPtr filled with the necessary information.
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
*/
|
2020-03-16 09:23:24 +00:00
|
|
|
static qemuDomainSecretInfoPtr
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretAESSetup(qemuDomainObjPrivatePtr priv,
|
2020-03-16 09:37:26 +00:00
|
|
|
const char *alias,
|
2016-06-02 18:27:08 +00:00
|
|
|
const char *username,
|
2020-03-16 09:37:26 +00:00
|
|
|
uint8_t *secret,
|
|
|
|
size_t secretlen)
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
{
|
2020-03-16 09:23:24 +00:00
|
|
|
g_autoptr(qemuDomainSecretInfo) secinfo = NULL;
|
2020-03-16 09:13:38 +00:00
|
|
|
g_autofree uint8_t *raw_iv = NULL;
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
size_t ivlen = QEMU_DOMAIN_AES_IV_LEN;
|
2020-03-16 09:13:38 +00:00
|
|
|
g_autofree uint8_t *ciphertext = NULL;
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
size_t ciphertextlen = 0;
|
|
|
|
|
2020-03-16 09:23:24 +00:00
|
|
|
if (!qemuDomainSupportsEncryptedSecret(priv)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("encrypted secrets are not supported"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
secinfo = g_new0(qemuDomainSecretInfo, 1);
|
2018-02-09 16:14:41 +00:00
|
|
|
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
secinfo->type = VIR_DOMAIN_SECRET_INFO_TYPE_AES;
|
2020-03-16 09:37:26 +00:00
|
|
|
secinfo->s.aes.alias = g_strdup(alias);
|
2019-10-20 11:49:46 +00:00
|
|
|
secinfo->s.aes.username = g_strdup(username);
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
|
2020-03-16 09:13:38 +00:00
|
|
|
raw_iv = g_new0(uint8_t, ivlen);
|
2018-05-29 05:46:32 +00:00
|
|
|
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
/* Create a random initialization vector */
|
2018-05-29 08:01:38 +00:00
|
|
|
if (virRandomBytes(raw_iv, ivlen) < 0)
|
2020-03-16 09:23:24 +00:00
|
|
|
return NULL;
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
|
|
|
|
/* Encode the IV and save that since qemu will need it */
|
2019-09-16 12:29:20 +00:00
|
|
|
secinfo->s.aes.iv = g_base64_encode(raw_iv, ivlen);
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
|
|
|
|
if (virCryptoEncryptData(VIR_CRYPTO_CIPHER_AES256CBC,
|
|
|
|
priv->masterKey, QEMU_DOMAIN_MASTER_KEY_LEN,
|
|
|
|
raw_iv, ivlen, secret, secretlen,
|
|
|
|
&ciphertext, &ciphertextlen) < 0)
|
2020-03-16 09:37:26 +00:00
|
|
|
return NULL;
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
|
|
|
|
/* Now encode the ciphertext and store to be passed to qemu */
|
2019-09-16 12:29:20 +00:00
|
|
|
secinfo->s.aes.ciphertext = g_base64_encode(ciphertext,
|
|
|
|
ciphertextlen);
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
|
2020-03-16 09:23:24 +00:00
|
|
|
return g_steal_pointer(&secinfo);
|
2020-03-16 09:37:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainSecretAESSetupFromSecret:
|
|
|
|
* @priv: pointer to domain private object
|
|
|
|
* @srcalias: Alias of the disk/hostdev used to generate the secret alias
|
2020-03-09 05:56:04 +00:00
|
|
|
* @secretuse: specific usage for the secret (may be NULL if main object is using it)
|
2020-03-16 09:37:26 +00:00
|
|
|
* @usageType: The virSecretUsageType
|
|
|
|
* @username: username to use for authentication (may be NULL)
|
|
|
|
* @seclookupdef: Pointer to seclookupdef data
|
|
|
|
*
|
|
|
|
* Looks up a secret in the secret driver based on @usageType and @seclookupdef
|
2020-03-09 05:56:04 +00:00
|
|
|
* and builds qemuDomainSecretInfoPtr from it. @use describes the usage of the
|
|
|
|
* secret in case if @srcalias requires more secrets for various usage cases.
|
2020-03-16 09:37:26 +00:00
|
|
|
*/
|
|
|
|
static qemuDomainSecretInfoPtr
|
|
|
|
qemuDomainSecretAESSetupFromSecret(qemuDomainObjPrivatePtr priv,
|
|
|
|
const char *srcalias,
|
2020-03-09 05:56:04 +00:00
|
|
|
const char *secretuse,
|
2020-03-16 09:37:26 +00:00
|
|
|
virSecretUsageType usageType,
|
|
|
|
const char *username,
|
2020-03-09 05:56:04 +00:00
|
|
|
virSecretLookupTypeDefPtr seclookupdef)
|
2020-03-16 09:37:26 +00:00
|
|
|
{
|
|
|
|
g_autoptr(virConnect) conn = virGetConnectSecret();
|
|
|
|
qemuDomainSecretInfoPtr secinfo;
|
2020-03-09 05:56:04 +00:00
|
|
|
g_autofree char *alias = qemuAliasForSecret(srcalias, secretuse);
|
2020-03-16 09:37:26 +00:00
|
|
|
uint8_t *secret = NULL;
|
|
|
|
size_t secretlen = 0;
|
|
|
|
|
|
|
|
if (!conn)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (virSecretGetSecretString(conn, seclookupdef, usageType,
|
|
|
|
&secret, &secretlen) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
secinfo = qemuDomainSecretAESSetup(priv, alias, username, secret, secretlen);
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
|
|
|
|
VIR_DISPOSE_N(secret, secretlen);
|
2020-03-16 09:37:26 +00:00
|
|
|
|
|
|
|
return secinfo;
|
qemu: Utilize qemu secret objects for RBD auth/secret
https://bugzilla.redhat.com/show_bug.cgi?id=1182074
If they're available and we need to pass secrets to qemu, then use the
qemu domain secret object in order to pass the secrets for RBD volumes
instead of passing the base64 encoded secret on the command line.
The goal is to make AES secrets the default and have no user interaction
required in order to allow using the AES mechanism. If the mechanism
is not available, then fall back to the current plain mechanism using
a base64 encoded secret.
New APIs:
qemu_domain.c:
qemuDomainGetSecretAESAlias:
Generate/return the secret object alias for an AES Secret Info type.
This will be called from qemuDomainSecretAESSetup.
qemuDomainSecretAESSetup: (private)
This API handles the details of the generation of the AES secret
and saves the pieces that need to be passed to qemu in order for
the secret to be decrypted. The encrypted secret based upon the
domain master key, an initialization vector (16 byte random value),
and the stored secret. Finally, the requirement from qemu is the IV
and encrypted secret are to be base64 encoded.
qemu_command.c:
qemuBuildSecretInfoProps: (private)
Generate/return a JSON properties object for the AES secret to
be used by both the command building and eventually the hotplug
code in order to add the secret object. Code was designed so that
in the future perhaps hotplug could use it if it made sense.
qemuBuildObjectSecretCommandLine (private)
Generate and add to the command line the -object secret for the
secret. This will be required for the subsequent RBD reference
to the object.
qemuBuildDiskSecinfoCommandLine (private)
Handle adding the AES secret object.
Adjustments:
qemu_domain.c:
The qemuDomainSecretSetup was altered to call either the AES or Plain
Setup functions based upon whether AES secrets are possible (we have
the encryption API) or not, we have secrets, and of course if the
protocol source is RBD.
qemu_command.c:
Adjust the qemuBuildRBDSecinfoURI API's in order to generate the
specific command options for an AES secret, such as:
-object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted,
format=base64
-drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\
mon_host=mon1.example.org\:6321,password-secret=$alias,...
where the 'id=' value is the secret object alias generated by
concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey'
is the master key shared with qemu, and the -drive syntax will
reference that alias as the 'password-secret'. For the -drive
syntax, the 'id=myname' is kept to define the username, while the
'key=$base64 encoded secret' is removed.
While according to the syntax described for qemu commit '60390a21'
or as seen in the email archive:
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html
it is possible to pass a plaintext password via a file, the qemu
commit 'ac1d8878' describes the more feature rich 'keyid=' option
based upon the shared masterKey.
Add tests for checking/comparing output.
NB: For hotplug, since the hotplug code doesn't add command line
arguments, passing the encoded secret directly to the monitor
will suffice.
2016-04-11 15:26:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-22 14:36:20 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainSupportsEncryptedSecret:
|
|
|
|
* @priv: qemu domain private data
|
|
|
|
*
|
|
|
|
* Returns true if libvirt can use encrypted 'secret' objects with VM which
|
|
|
|
* @priv belongs to.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainSupportsEncryptedSecret(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
return virCryptoHaveCipher(VIR_CRYPTO_CIPHER_AES256CBC) &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_SECRET) &&
|
|
|
|
priv->masterKey;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-22 11:08:19 +00:00
|
|
|
/* qemuDomainSecretInfoNewPlain:
|
2017-02-20 20:04:58 +00:00
|
|
|
* @usageType: Secret usage type
|
2018-05-22 15:34:11 +00:00
|
|
|
* @username: username
|
|
|
|
* @lookupDef: lookup def describing secret
|
2017-02-20 20:04:58 +00:00
|
|
|
*
|
2018-05-22 11:08:19 +00:00
|
|
|
* Helper function to create a secinfo to be used for secinfo consumers. This
|
2018-05-22 15:34:11 +00:00
|
|
|
* sets up a 'plain' (unencrypted) secret for legacy consumers.
|
2017-02-20 20:04:58 +00:00
|
|
|
*
|
|
|
|
* Returns @secinfo on success, NULL on failure. Caller is responsible
|
|
|
|
* to eventually free @secinfo.
|
|
|
|
*/
|
|
|
|
static qemuDomainSecretInfoPtr
|
2018-05-22 15:34:11 +00:00
|
|
|
qemuDomainSecretInfoNewPlain(virSecretUsageType usageType,
|
2018-05-22 11:08:19 +00:00
|
|
|
const char *username,
|
2018-05-22 15:34:11 +00:00
|
|
|
virSecretLookupTypeDefPtr lookupDef)
|
2017-02-20 20:04:58 +00:00
|
|
|
{
|
|
|
|
qemuDomainSecretInfoPtr secinfo = NULL;
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
secinfo = g_new0(qemuDomainSecretInfo, 1);
|
2017-02-20 20:04:58 +00:00
|
|
|
|
2018-05-22 15:34:11 +00:00
|
|
|
if (qemuDomainSecretPlainSetup(secinfo, usageType, username, lookupDef) < 0) {
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&secinfo, qemuDomainSecretInfoFree);
|
2018-05-22 15:34:11 +00:00
|
|
|
return NULL;
|
2017-02-20 20:04:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return secinfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-01 19:14:40 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainSecretInfoTLSNew:
|
|
|
|
* @priv: pointer to domain private object
|
|
|
|
* @srcAlias: Alias base to use for TLS object
|
|
|
|
* @secretUUID: Provide a secretUUID value to look up/create the secretInfo
|
|
|
|
*
|
|
|
|
* Using the passed @secretUUID, generate a seclookupdef that can be used
|
|
|
|
* to generate the returned qemuDomainSecretInfoPtr for a TLS based secret.
|
|
|
|
*
|
|
|
|
* Returns qemuDomainSecretInfoPtr or NULL on error.
|
|
|
|
*/
|
2017-03-03 12:22:16 +00:00
|
|
|
qemuDomainSecretInfoPtr
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretInfoTLSNew(qemuDomainObjPrivatePtr priv,
|
2017-03-01 19:14:40 +00:00
|
|
|
const char *srcAlias,
|
|
|
|
const char *secretUUID)
|
|
|
|
{
|
|
|
|
virSecretLookupTypeDef seclookupdef = {0};
|
|
|
|
|
|
|
|
if (virUUIDParse(secretUUID, seclookupdef.u.uuid) < 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("malformed TLS secret uuid '%s' provided"),
|
|
|
|
secretUUID);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
seclookupdef.type = VIR_SECRET_LOOKUP_TYPE_UUID;
|
|
|
|
|
2020-03-09 05:56:04 +00:00
|
|
|
return qemuDomainSecretAESSetupFromSecret(priv, srcAlias, NULL,
|
2020-03-16 09:42:36 +00:00
|
|
|
VIR_SECRET_USAGE_TYPE_TLS,
|
2020-03-09 05:56:04 +00:00
|
|
|
NULL, &seclookupdef);
|
2017-03-01 19:14:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-06 19:00:59 +00:00
|
|
|
void
|
|
|
|
qemuDomainSecretDiskDestroy(virDomainDiskDefPtr disk)
|
|
|
|
{
|
2018-05-28 13:31:42 +00:00
|
|
|
qemuDomainStorageSourcePrivatePtr srcPriv;
|
|
|
|
virStorageSourcePtr n;
|
2016-04-06 19:00:59 +00:00
|
|
|
|
2018-05-28 13:31:42 +00:00
|
|
|
for (n = disk->src; virStorageSourceIsBacking(n); n = n->backingStore) {
|
|
|
|
if ((srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(n))) {
|
|
|
|
qemuDomainSecretInfoDestroy(srcPriv->secinfo);
|
|
|
|
qemuDomainSecretInfoDestroy(srcPriv->encinfo);
|
2020-06-29 13:10:42 +00:00
|
|
|
qemuDomainSecretInfoDestroy(srcPriv->tlsKeySecret);
|
2018-05-28 13:31:42 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-06 19:00:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-24 19:47:09 +00:00
|
|
|
bool
|
2018-05-22 07:18:34 +00:00
|
|
|
qemuDomainStorageSourceHasAuth(virStorageSourcePtr src)
|
2016-06-24 19:47:09 +00:00
|
|
|
{
|
|
|
|
if (!virStorageSourceIsEmpty(src) &&
|
|
|
|
virStorageSourceGetActualType(src) == VIR_STORAGE_TYPE_NETWORK &&
|
|
|
|
src->auth &&
|
|
|
|
(src->protocol == VIR_STORAGE_NET_PROTOCOL_ISCSI ||
|
|
|
|
src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-06-29 14:00:03 +00:00
|
|
|
static bool
|
2016-12-22 12:12:49 +00:00
|
|
|
qemuDomainDiskHasEncryptionSecret(virStorageSourcePtr src)
|
|
|
|
{
|
|
|
|
if (!virStorageSourceIsEmpty(src) && src->encryption &&
|
|
|
|
src->encryption->format == VIR_STORAGE_ENCRYPTION_FORMAT_LUKS &&
|
|
|
|
src->encryption->nsecrets > 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-03-09 08:04:33 +00:00
|
|
|
static qemuDomainSecretInfoPtr
|
|
|
|
qemuDomainSecretStorageSourcePrepareCookies(qemuDomainObjPrivatePtr priv,
|
|
|
|
virStorageSourcePtr src,
|
|
|
|
const char *aliasprotocol)
|
|
|
|
{
|
|
|
|
g_autofree char *secretalias = qemuAliasForSecret(aliasprotocol, "httpcookie");
|
2020-03-23 15:31:19 +00:00
|
|
|
g_autofree char *cookies = qemuBlockStorageSourceGetCookieString(src);
|
2020-03-09 08:04:33 +00:00
|
|
|
|
|
|
|
return qemuDomainSecretAESSetup(priv, secretalias, NULL,
|
|
|
|
(uint8_t *) cookies, strlen(cookies));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-20 11:50:23 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainSecretStorageSourcePrepare:
|
|
|
|
* @priv: domain private object
|
|
|
|
* @src: storage source struct to setup
|
|
|
|
* @authalias: prefix of the alias for secret holding authentication data
|
|
|
|
* @encalias: prefix of the alias for secret holding encryption password
|
2016-04-06 19:00:59 +00:00
|
|
|
*
|
2017-10-20 11:50:23 +00:00
|
|
|
* Prepares data necessary for encryption and authentication of @src. The two
|
|
|
|
* alias prefixes are provided since in the backing chain authentication belongs
|
|
|
|
* to the storage protocol data whereas encryption is relevant to the format
|
|
|
|
* driver in qemu. The two will have different node names.
|
2016-04-06 19:00:59 +00:00
|
|
|
*
|
2017-10-20 11:50:23 +00:00
|
|
|
* Returns 0 on success; -1 on error while reporting an libvirt error.
|
2016-04-06 19:00:59 +00:00
|
|
|
*/
|
2017-10-20 11:50:23 +00:00
|
|
|
static int
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretStorageSourcePrepare(qemuDomainObjPrivatePtr priv,
|
2017-10-20 11:50:23 +00:00
|
|
|
virStorageSourcePtr src,
|
2020-03-06 14:13:21 +00:00
|
|
|
const char *aliasprotocol,
|
|
|
|
const char *aliasformat)
|
2016-04-06 19:00:59 +00:00
|
|
|
{
|
2017-10-05 13:22:13 +00:00
|
|
|
qemuDomainStorageSourcePrivatePtr srcPriv;
|
2018-05-22 15:34:11 +00:00
|
|
|
bool iscsiHasPS = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_ISCSI_PASSWORD_SECRET);
|
2018-05-22 07:18:34 +00:00
|
|
|
bool hasAuth = qemuDomainStorageSourceHasAuth(src);
|
2017-11-09 11:54:43 +00:00
|
|
|
bool hasEnc = qemuDomainDiskHasEncryptionSecret(src);
|
|
|
|
|
2020-03-09 08:04:33 +00:00
|
|
|
if (!hasAuth && !hasEnc && src->ncookies == 0)
|
2017-11-09 11:54:43 +00:00
|
|
|
return 0;
|
2017-10-05 13:22:13 +00:00
|
|
|
|
2017-10-20 11:50:23 +00:00
|
|
|
if (!(src->privateData = qemuDomainStorageSourcePrivateNew()))
|
2017-10-05 13:22:13 +00:00
|
|
|
return -1;
|
|
|
|
|
2017-10-20 11:50:23 +00:00
|
|
|
srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(src);
|
2016-04-06 19:00:59 +00:00
|
|
|
|
2017-11-09 11:54:43 +00:00
|
|
|
if (hasAuth) {
|
2017-03-08 19:37:05 +00:00
|
|
|
virSecretUsageType usageType = VIR_SECRET_USAGE_TYPE_ISCSI;
|
2016-04-06 19:00:59 +00:00
|
|
|
|
2016-06-02 18:15:54 +00:00
|
|
|
if (src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD)
|
2017-03-08 19:37:05 +00:00
|
|
|
usageType = VIR_SECRET_USAGE_TYPE_CEPH;
|
2016-06-02 18:15:54 +00:00
|
|
|
|
2018-05-22 15:34:11 +00:00
|
|
|
if (!qemuDomainSupportsEncryptedSecret(priv) ||
|
|
|
|
(src->protocol == VIR_STORAGE_NET_PROTOCOL_ISCSI && !iscsiHasPS)) {
|
|
|
|
srcPriv->secinfo = qemuDomainSecretInfoNewPlain(usageType,
|
|
|
|
src->auth->username,
|
|
|
|
&src->auth->seclookupdef);
|
|
|
|
} else {
|
2020-03-06 14:13:21 +00:00
|
|
|
srcPriv->secinfo = qemuDomainSecretAESSetupFromSecret(priv, aliasprotocol,
|
2020-03-09 05:58:57 +00:00
|
|
|
"auth",
|
2020-03-16 09:42:36 +00:00
|
|
|
usageType,
|
|
|
|
src->auth->username,
|
2020-03-09 05:56:04 +00:00
|
|
|
&src->auth->seclookupdef);
|
2018-05-22 15:34:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!srcPriv->secinfo)
|
|
|
|
return -1;
|
2016-04-06 19:00:59 +00:00
|
|
|
}
|
|
|
|
|
2017-11-09 11:54:43 +00:00
|
|
|
if (hasEnc) {
|
2020-03-06 14:13:21 +00:00
|
|
|
if (!(srcPriv->encinfo = qemuDomainSecretAESSetupFromSecret(priv, aliasformat,
|
2020-03-09 05:58:57 +00:00
|
|
|
"encryption",
|
2020-03-16 09:42:36 +00:00
|
|
|
VIR_SECRET_USAGE_TYPE_VOLUME,
|
|
|
|
NULL,
|
2020-03-09 05:56:04 +00:00
|
|
|
&src->encryption->secrets[0]->seclookupdef)))
|
2017-02-20 20:04:58 +00:00
|
|
|
return -1;
|
2016-06-02 20:28:28 +00:00
|
|
|
}
|
|
|
|
|
2020-03-09 08:04:33 +00:00
|
|
|
if (src->ncookies &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
|
|
|
!(srcPriv->httpcookie = qemuDomainSecretStorageSourcePrepareCookies(priv,
|
|
|
|
src,
|
|
|
|
aliasprotocol)))
|
|
|
|
return -1;
|
|
|
|
|
2016-04-06 19:00:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-06 14:41:33 +00:00
|
|
|
void
|
|
|
|
qemuDomainSecretHostdevDestroy(virDomainHostdevDefPtr hostdev)
|
|
|
|
{
|
2017-09-23 11:03:10 +00:00
|
|
|
qemuDomainStorageSourcePrivatePtr srcPriv;
|
2016-04-06 14:41:33 +00:00
|
|
|
|
2017-09-23 11:03:10 +00:00
|
|
|
if (virHostdevIsSCSIDevice(hostdev)) {
|
|
|
|
virDomainHostdevSubsysSCSIPtr scsisrc = &hostdev->source.subsys.u.scsi;
|
|
|
|
virDomainHostdevSubsysSCSIiSCSIPtr iscsisrc = &scsisrc->u.iscsi;
|
2016-04-06 14:41:33 +00:00
|
|
|
|
2017-09-23 11:03:10 +00:00
|
|
|
if (scsisrc->protocol == VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI) {
|
|
|
|
srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(iscsisrc->src);
|
2020-07-10 12:54:09 +00:00
|
|
|
if (srcPriv)
|
|
|
|
qemuDomainSecretInfoDestroy(srcPriv->secinfo);
|
2017-09-23 11:03:10 +00:00
|
|
|
}
|
|
|
|
}
|
2016-04-06 14:41:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-21 23:02:35 +00:00
|
|
|
void
|
|
|
|
qemuDomainSecretChardevDestroy(virDomainChrSourceDefPtr dev)
|
|
|
|
{
|
|
|
|
qemuDomainChrSourcePrivatePtr chrSourcePriv =
|
|
|
|
QEMU_DOMAIN_CHR_SOURCE_PRIVATE(dev);
|
|
|
|
|
|
|
|
if (!chrSourcePriv || !chrSourcePriv->secinfo)
|
|
|
|
return;
|
|
|
|
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&chrSourcePriv->secinfo, qemuDomainSecretInfoFree);
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* qemuDomainSecretChardevPrepare:
|
|
|
|
* @cfg: Pointer to driver config object
|
|
|
|
* @priv: pointer to domain private object
|
|
|
|
* @chrAlias: Alias of the chr device
|
|
|
|
* @dev: Pointer to a char source definition
|
|
|
|
*
|
|
|
|
* For a TCP character device, generate a qemuDomainSecretInfo to be used
|
|
|
|
* by the command line code to generate the secret for the tls-creds to use.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on failure
|
|
|
|
*/
|
|
|
|
int
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretChardevPrepare(virQEMUDriverConfigPtr cfg,
|
2016-10-21 23:02:35 +00:00
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
const char *chrAlias,
|
|
|
|
virDomainChrSourceDefPtr dev)
|
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *charAlias = NULL;
|
2016-10-21 23:02:35 +00:00
|
|
|
|
|
|
|
if (dev->type != VIR_DOMAIN_CHR_TYPE_TCP)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (dev->data.tcp.haveTLS == VIR_TRISTATE_BOOL_YES &&
|
|
|
|
cfg->chardevTLSx509secretUUID) {
|
|
|
|
qemuDomainChrSourcePrivatePtr chrSourcePriv =
|
|
|
|
QEMU_DOMAIN_CHR_SOURCE_PRIVATE(dev);
|
|
|
|
|
|
|
|
if (!(charAlias = qemuAliasChardevFromDevAlias(chrAlias)))
|
2017-02-20 20:04:58 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
|
2017-03-01 19:14:40 +00:00
|
|
|
chrSourcePriv->secinfo =
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretInfoTLSNew(priv, charAlias,
|
2017-03-01 19:14:40 +00:00
|
|
|
cfg->chardevTLSx509secretUUID);
|
|
|
|
|
|
|
|
if (!chrSourcePriv->secinfo)
|
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-14 13:24:27 +00:00
|
|
|
static void
|
|
|
|
qemuDomainSecretGraphicsDestroy(virDomainGraphicsDefPtr graphics)
|
|
|
|
{
|
|
|
|
qemuDomainGraphicsPrivatePtr gfxPriv = QEMU_DOMAIN_GRAPHICS_PRIVATE(graphics);
|
|
|
|
|
|
|
|
if (!gfxPriv)
|
|
|
|
return;
|
|
|
|
|
|
|
|
VIR_FREE(gfxPriv->tlsAlias);
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&gfxPriv->secinfo, qemuDomainSecretInfoFree);
|
2019-01-14 13:24:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainSecretGraphicsPrepare(virQEMUDriverConfigPtr cfg,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
virDomainGraphicsDefPtr graphics)
|
|
|
|
{
|
|
|
|
virQEMUCapsPtr qemuCaps = priv->qemuCaps;
|
|
|
|
qemuDomainGraphicsPrivatePtr gfxPriv = QEMU_DOMAIN_GRAPHICS_PRIVATE(graphics);
|
|
|
|
|
|
|
|
if (graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_VNC)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_TLS_CREDS_X509))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!cfg->vncTLS)
|
|
|
|
return 0;
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
gfxPriv->tlsAlias = g_strdup("vnc-tls-creds0");
|
2019-01-14 13:24:27 +00:00
|
|
|
|
2019-01-14 13:20:01 +00:00
|
|
|
if (cfg->vncTLSx509secretUUID) {
|
|
|
|
gfxPriv->secinfo = qemuDomainSecretInfoTLSNew(priv, gfxPriv->tlsAlias,
|
|
|
|
cfg->vncTLSx509secretUUID);
|
|
|
|
if (!gfxPriv->secinfo)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-01-14 13:24:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-06 19:00:59 +00:00
|
|
|
/* qemuDomainSecretDestroy:
|
|
|
|
* @vm: Domain object
|
|
|
|
*
|
2018-05-28 13:31:42 +00:00
|
|
|
* Removes all unnecessary data which was needed to generate 'secret' objects.
|
2016-04-06 19:00:59 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainSecretDestroy(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++)
|
|
|
|
qemuDomainSecretDiskDestroy(vm->def->disks[i]);
|
2016-04-06 14:41:33 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++)
|
|
|
|
qemuDomainSecretHostdevDestroy(vm->def->hostdevs[i]);
|
2016-10-21 23:02:35 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nserials; i++)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->serials[i]->source);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nparallels; i++)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->parallels[i]->source);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nchannels; i++)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->channels[i]->source);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nconsoles; i++)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->consoles[i]->source);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nsmartcards; i++) {
|
|
|
|
if (vm->def->smartcards[i]->type ==
|
|
|
|
VIR_DOMAIN_SMARTCARD_TYPE_PASSTHROUGH)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->smartcards[i]->data.passthru);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nrngs; i++) {
|
|
|
|
if (vm->def->rngs[i]->backend == VIR_DOMAIN_RNG_BACKEND_EGD)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->rngs[i]->source.chardev);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nredirdevs; i++)
|
|
|
|
qemuDomainSecretChardevDestroy(vm->def->redirdevs[i]->source);
|
2019-01-14 13:24:27 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ngraphics; i++)
|
|
|
|
qemuDomainSecretGraphicsDestroy(vm->def->graphics[i]);
|
2016-04-06 19:00:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* qemuDomainSecretPrepare:
|
2016-10-21 23:02:35 +00:00
|
|
|
* @driver: Pointer to driver object
|
2016-04-06 19:00:59 +00:00
|
|
|
* @vm: Domain object
|
|
|
|
*
|
|
|
|
* For any objects that may require an auth/secret setup, create a
|
2018-12-04 17:08:14 +00:00
|
|
|
* qemuDomainSecretInfo and save it in the appropriate place within
|
2016-04-06 19:00:59 +00:00
|
|
|
* the private structures. This will be used by command line build
|
|
|
|
* code in order to pass the secret along to qemu in order to provide
|
|
|
|
* the necessary authentication data.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on failure with error message set
|
|
|
|
*/
|
|
|
|
int
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretPrepare(virQEMUDriverPtr driver,
|
2016-04-06 19:00:59 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2016-05-02 10:43:32 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-04-06 19:00:59 +00:00
|
|
|
size_t i;
|
|
|
|
|
2020-09-10 10:32:04 +00:00
|
|
|
/* disk and hostdev secrets are prepared when preparing internal data */
|
2016-04-06 14:41:33 +00:00
|
|
|
|
2016-10-21 23:02:35 +00:00
|
|
|
for (i = 0; i < vm->def->nserials; i++) {
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->serials[i]->info.alias,
|
|
|
|
vm->def->serials[i]->source) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nparallels; i++) {
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->parallels[i]->info.alias,
|
|
|
|
vm->def->parallels[i]->source) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nchannels; i++) {
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->channels[i]->info.alias,
|
|
|
|
vm->def->channels[i]->source) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nconsoles; i++) {
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->consoles[i]->info.alias,
|
|
|
|
vm->def->consoles[i]->source) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nsmartcards; i++)
|
|
|
|
if (vm->def->smartcards[i]->type ==
|
|
|
|
VIR_DOMAIN_SMARTCARD_TYPE_PASSTHROUGH &&
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->smartcards[i]->info.alias,
|
|
|
|
vm->def->smartcards[i]->data.passthru) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nrngs; i++) {
|
|
|
|
if (vm->def->rngs[i]->backend == VIR_DOMAIN_RNG_BACKEND_EGD &&
|
2018-02-09 16:14:41 +00:00
|
|
|
qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->rngs[i]->info.alias,
|
|
|
|
vm->def->rngs[i]->source.chardev) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nredirdevs; i++) {
|
2018-02-09 16:14:41 +00:00
|
|
|
if (qemuDomainSecretChardevPrepare(cfg, priv,
|
2016-10-21 23:02:35 +00:00
|
|
|
vm->def->redirdevs[i]->info.alias,
|
|
|
|
vm->def->redirdevs[i]->source) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-10-21 23:02:35 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 13:24:27 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
if (qemuDomainSecretGraphicsPrepare(cfg, priv, vm->def->graphics[i]) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2019-01-14 13:24:27 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2016-04-06 19:00:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-26 08:15:55 +00:00
|
|
|
/* This is the old way of setting up per-domain directories */
|
2019-10-22 13:26:14 +00:00
|
|
|
static void
|
2016-02-26 08:15:55 +00:00
|
|
|
qemuDomainSetPrivatePathsOld(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
if (!priv->libDir)
|
|
|
|
priv->libDir = g_strdup_printf("%s/domain-%s", cfg->libDir, vm->def->name);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
if (!priv->channelTargetDir)
|
|
|
|
priv->channelTargetDir = g_strdup_printf("%s/domain-%s",
|
|
|
|
cfg->channelTargetDir, vm->def->name);
|
2016-02-26 08:15:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2016-04-03 19:59:46 +00:00
|
|
|
qemuDomainSetPrivatePaths(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
2016-02-26 08:15:55 +00:00
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-04-03 19:59:46 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *domname = virDomainDefGetShortName(vm->def);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2016-04-26 06:43:40 +00:00
|
|
|
if (!domname)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-04-26 06:43:40 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
if (!priv->libDir)
|
|
|
|
priv->libDir = g_strdup_printf("%s/domain-%s", cfg->libDir, domname);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
if (!priv->channelTargetDir)
|
|
|
|
priv->channelTargetDir = g_strdup_printf("%s/domain-%s",
|
|
|
|
cfg->channelTargetDir, domname);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2016-02-26 08:15:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-12 12:26:11 +00:00
|
|
|
int
|
|
|
|
qemuDomainObjStartWorker(virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
|
|
|
|
|
|
|
if (!priv->eventThread) {
|
|
|
|
g_autofree char *threadName = g_strdup_printf("vm-%s", dom->def->name);
|
|
|
|
if (!(priv->eventThread = virEventThreadNew(threadName)))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
qemuDomainObjStopWorker(virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
2020-07-23 08:02:59 +00:00
|
|
|
virEventThread *eventThread;
|
2020-02-12 12:26:11 +00:00
|
|
|
|
2020-07-23 08:02:59 +00:00
|
|
|
if (!priv->eventThread)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are dropping the only reference here so that the event loop thread
|
|
|
|
* is going to be exited synchronously. In order to avoid deadlocks we
|
|
|
|
* need to unlock the VM so that any handler being called can finish
|
|
|
|
* execution and thus even loop thread be finished too.
|
|
|
|
*/
|
|
|
|
eventThread = g_steal_pointer(&priv->eventThread);
|
|
|
|
virObjectUnlock(dom);
|
|
|
|
g_object_unref(eventThread);
|
|
|
|
virObjectLock(dom);
|
2020-02-12 12:26:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-04 10:02:00 +00:00
|
|
|
static void *
|
2017-07-21 13:46:56 +00:00
|
|
|
qemuDomainObjPrivateAlloc(void *opaque)
|
2010-12-16 15:23:41 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
priv = g_new0(qemuDomainObjPrivate, 1);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
if (qemuDomainObjInitJob(&priv->job, &qemuPrivateJobCallbacks) < 0) {
|
2013-07-04 10:02:00 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to init qemu driver mutexes"));
|
2011-11-23 14:51:28 +00:00
|
|
|
goto error;
|
2013-07-04 10:02:00 +00:00
|
|
|
}
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2013-01-02 15:38:52 +00:00
|
|
|
if (!(priv->devs = virChrdevAlloc()))
|
2011-10-06 10:24:47 +00:00
|
|
|
goto error;
|
|
|
|
|
2020-10-21 11:12:08 +00:00
|
|
|
if (!(priv->blockjobs = virHashNew(virObjectFreeHashData)))
|
2018-11-29 11:50:09 +00:00
|
|
|
goto error;
|
|
|
|
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
/* agent commands block by default, user can choose different behavior */
|
|
|
|
priv->agentTimeout = VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK;
|
2012-08-03 16:34:06 +00:00
|
|
|
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
|
2017-07-21 13:46:56 +00:00
|
|
|
priv->driver = opaque;
|
2011-08-26 18:10:22 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return priv;
|
2011-11-23 14:51:28 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2011-11-23 14:51:28 +00:00
|
|
|
VIR_FREE(priv);
|
|
|
|
return NULL;
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
|
2017-08-23 12:19:36 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainObjPrivateDataClear:
|
|
|
|
* @priv: domain private data
|
|
|
|
*
|
|
|
|
* Clears private data entries, which are not necessary or stale if the VM is
|
|
|
|
* not running.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainObjPrivateDataClear(qemuDomainObjPrivatePtr priv)
|
2010-12-16 15:23:41 +00:00
|
|
|
{
|
2020-08-02 17:36:03 +00:00
|
|
|
g_strfreev(priv->qemuDevices);
|
2017-08-23 12:19:36 +00:00
|
|
|
priv->qemuDevices = NULL;
|
|
|
|
|
2020-09-22 12:07:27 +00:00
|
|
|
virCgroupFree(priv->cgroup);
|
|
|
|
priv->cgroup = NULL;
|
2017-08-23 12:19:36 +00:00
|
|
|
|
|
|
|
virPerfFree(priv->perf);
|
|
|
|
priv->perf = NULL;
|
|
|
|
|
|
|
|
VIR_FREE(priv->machineName);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
virObjectUnref(priv->qemuCaps);
|
2017-08-23 12:19:36 +00:00
|
|
|
priv->qemuCaps = NULL;
|
2011-05-04 11:55:38 +00:00
|
|
|
|
2017-08-23 12:19:36 +00:00
|
|
|
VIR_FREE(priv->pidfile);
|
2016-11-15 10:30:18 +00:00
|
|
|
|
2017-08-23 12:19:36 +00:00
|
|
|
VIR_FREE(priv->libDir);
|
|
|
|
VIR_FREE(priv->channelTargetDir);
|
|
|
|
|
2018-11-05 10:48:16 +00:00
|
|
|
priv->memPrealloc = false;
|
|
|
|
|
2017-08-23 12:19:36 +00:00
|
|
|
/* remove automatic pinning data */
|
|
|
|
virBitmapFree(priv->autoNodeset);
|
|
|
|
priv->autoNodeset = NULL;
|
|
|
|
virBitmapFree(priv->autoCpuset);
|
|
|
|
priv->autoCpuset = NULL;
|
|
|
|
|
|
|
|
/* remove address data */
|
2014-05-13 16:10:40 +00:00
|
|
|
virDomainPCIAddressSetFree(priv->pciaddrs);
|
2017-08-23 12:19:36 +00:00
|
|
|
priv->pciaddrs = NULL;
|
2015-08-12 14:52:17 +00:00
|
|
|
virDomainUSBAddressSetFree(priv->usbaddrs);
|
2017-08-23 12:19:36 +00:00
|
|
|
priv->usbaddrs = NULL;
|
|
|
|
|
|
|
|
virCPUDefFree(priv->origCPU);
|
|
|
|
priv->origCPU = NULL;
|
|
|
|
|
|
|
|
/* clear previously used namespaces */
|
|
|
|
virBitmapFree(priv->namespaces);
|
|
|
|
priv->namespaces = NULL;
|
2017-09-26 14:37:47 +00:00
|
|
|
|
2018-11-13 11:50:41 +00:00
|
|
|
priv->rememberOwner = false;
|
|
|
|
|
2017-09-26 14:37:47 +00:00
|
|
|
priv->reconnectBlockjobs = VIR_TRISTATE_BOOL_ABSENT;
|
2017-10-11 13:57:16 +00:00
|
|
|
priv->allowReboot = VIR_TRISTATE_BOOL_ABSENT;
|
2017-10-17 19:39:41 +00:00
|
|
|
|
|
|
|
virBitmapFree(priv->migrationCaps);
|
|
|
|
priv->migrationCaps = NULL;
|
2018-04-18 08:49:00 +00:00
|
|
|
|
2018-11-29 11:50:09 +00:00
|
|
|
virHashRemoveAll(priv->blockjobs);
|
2019-11-15 15:23:44 +00:00
|
|
|
|
|
|
|
virObjectUnref(priv->pflash0);
|
|
|
|
priv->pflash0 = NULL;
|
|
|
|
virObjectUnref(priv->pflash1);
|
|
|
|
priv->pflash1 = NULL;
|
2019-09-18 09:27:05 +00:00
|
|
|
|
|
|
|
virDomainBackupDefFree(priv->backup);
|
|
|
|
priv->backup = NULL;
|
2019-12-20 08:32:08 +00:00
|
|
|
|
|
|
|
/* reset node name allocator */
|
|
|
|
qemuDomainStorageIdReset(priv);
|
2020-02-25 09:55:11 +00:00
|
|
|
|
|
|
|
priv->dbusDaemonRunning = false;
|
|
|
|
|
2020-08-02 17:36:03 +00:00
|
|
|
g_strfreev(priv->dbusVMStateIds);
|
2020-02-25 09:55:11 +00:00
|
|
|
priv->dbusVMStateIds = NULL;
|
|
|
|
|
|
|
|
priv->dbusVMState = false;
|
2020-09-22 12:39:27 +00:00
|
|
|
|
|
|
|
priv->inhibitDiskTransientDelete = false;
|
2017-08-23 12:19:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainObjPrivateFree(void *data)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = data;
|
|
|
|
|
|
|
|
qemuDomainObjPrivateDataClear(priv);
|
|
|
|
|
2019-02-20 08:51:07 +00:00
|
|
|
virObjectUnref(priv->monConfig);
|
2020-09-14 11:20:35 +00:00
|
|
|
qemuDomainObjClearJob(&priv->job);
|
2010-10-26 14:04:46 +00:00
|
|
|
VIR_FREE(priv->lockState);
|
2011-10-04 07:11:35 +00:00
|
|
|
VIR_FREE(priv->origname);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2013-01-02 15:38:52 +00:00
|
|
|
virChrdevFree(priv->devs);
|
2011-10-06 10:24:47 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
/* This should never be non-NULL if we get here, but just in case... */
|
|
|
|
if (priv->mon) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_ERROR(_("Unexpected QEMU monitor still active during domain deletion"));
|
2010-12-16 15:23:41 +00:00
|
|
|
qemuMonitorClose(priv->mon);
|
|
|
|
}
|
2011-10-05 17:31:54 +00:00
|
|
|
if (priv->agent) {
|
|
|
|
VIR_ERROR(_("Unexpected QEMU agent still active during domain deletion"));
|
|
|
|
qemuAgentClose(priv->agent);
|
|
|
|
}
|
2012-03-16 06:52:26 +00:00
|
|
|
VIR_FREE(priv->cleanupCallbacks);
|
2017-03-03 12:22:16 +00:00
|
|
|
|
2020-03-06 13:44:43 +00:00
|
|
|
g_clear_pointer(&priv->migSecinfo, qemuDomainSecretInfoFree);
|
2016-07-08 21:37:24 +00:00
|
|
|
qemuDomainMasterKeyFree(priv);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2018-11-29 11:50:09 +00:00
|
|
|
virHashFree(priv->blockjobs);
|
|
|
|
|
2020-02-12 12:26:11 +00:00
|
|
|
/* This should never be non-NULL if we get here, but just in case... */
|
|
|
|
if (priv->eventThread) {
|
|
|
|
VIR_ERROR(_("Unexpected event thread still active during domain deletion"));
|
|
|
|
g_object_unref(priv->eventThread);
|
|
|
|
}
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
VIR_FREE(priv);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-24 11:55:16 +00:00
|
|
|
static int
|
|
|
|
qemuStorageSourcePrivateDataAssignSecinfo(qemuDomainSecretInfoPtr *secinfo,
|
|
|
|
char **alias)
|
|
|
|
{
|
|
|
|
if (!*alias)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!*secinfo) {
|
2020-10-05 10:26:10 +00:00
|
|
|
*secinfo = g_new0(qemuDomainSecretInfo, 1);
|
2018-05-24 11:55:16 +00:00
|
|
|
(*secinfo)->type = VIR_DOMAIN_SECRET_INFO_TYPE_AES;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((*secinfo)->type == VIR_DOMAIN_SECRET_INFO_TYPE_AES)
|
2019-10-16 11:43:18 +00:00
|
|
|
(*secinfo)->s.aes.alias = g_steal_pointer(&*alias);
|
2018-05-24 11:55:16 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-01 14:13:26 +00:00
|
|
|
static int
|
|
|
|
qemuStorageSourcePrivateDataParse(xmlXPathContextPtr ctxt,
|
|
|
|
virStorageSourcePtr src)
|
|
|
|
{
|
2018-05-24 11:55:16 +00:00
|
|
|
qemuDomainStorageSourcePrivatePtr priv;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *authalias = NULL;
|
|
|
|
g_autofree char *encalias = NULL;
|
2020-03-09 07:19:02 +00:00
|
|
|
g_autofree char *httpcookiealias = NULL;
|
2020-06-29 13:10:42 +00:00
|
|
|
g_autofree char *tlskeyalias = NULL;
|
2018-05-24 11:55:16 +00:00
|
|
|
|
2018-03-01 17:39:45 +00:00
|
|
|
src->nodestorage = virXPathString("string(./nodenames/nodename[@type='storage']/@name)", ctxt);
|
|
|
|
src->nodeformat = virXPathString("string(./nodenames/nodename[@type='format']/@name)", ctxt);
|
2018-05-30 10:03:41 +00:00
|
|
|
src->tlsAlias = virXPathString("string(./objects/TLSx509/@alias)", ctxt);
|
2018-03-01 17:39:45 +00:00
|
|
|
|
2020-02-10 13:26:08 +00:00
|
|
|
if (src->sliceStorage)
|
|
|
|
src->sliceStorage->nodename = virXPathString("string(./nodenames/nodename[@type='slice-storage']/@name)", ctxt);
|
|
|
|
|
2018-05-11 14:39:21 +00:00
|
|
|
if (src->pr)
|
|
|
|
src->pr->mgralias = virXPathString("string(./reservations/@mgralias)", ctxt);
|
|
|
|
|
2018-05-24 11:55:16 +00:00
|
|
|
authalias = virXPathString("string(./objects/secret[@type='auth']/@alias)", ctxt);
|
|
|
|
encalias = virXPathString("string(./objects/secret[@type='encryption']/@alias)", ctxt);
|
2020-03-09 07:19:02 +00:00
|
|
|
httpcookiealias = virXPathString("string(./objects/secret[@type='httpcookie']/@alias)", ctxt);
|
2020-06-29 13:10:42 +00:00
|
|
|
tlskeyalias = virXPathString("string(./objects/secret[@type='tlskey']/@alias)", ctxt);
|
2018-05-24 11:55:16 +00:00
|
|
|
|
2020-06-29 13:10:42 +00:00
|
|
|
if (authalias || encalias || httpcookiealias || tlskeyalias) {
|
2018-05-24 11:55:16 +00:00
|
|
|
if (!src->privateData &&
|
|
|
|
!(src->privateData = qemuDomainStorageSourcePrivateNew()))
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2018-05-24 11:55:16 +00:00
|
|
|
|
|
|
|
priv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(src);
|
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->secinfo, &authalias) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2018-05-24 11:55:16 +00:00
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->encinfo, &encalias) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2020-03-09 07:19:02 +00:00
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->httpcookie, &httpcookiealias) < 0)
|
|
|
|
return -1;
|
2020-06-29 13:10:42 +00:00
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->tlsKeySecret, &tlskeyalias) < 0)
|
|
|
|
return -1;
|
2018-05-24 11:55:16 +00:00
|
|
|
}
|
|
|
|
|
2018-03-01 14:13:26 +00:00
|
|
|
if (virStorageSourcePrivateDataParseRelPath(ctxt, src) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2018-05-24 11:55:16 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2018-05-24 11:55:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuStorageSourcePrivateDataFormatSecinfo(virBufferPtr buf,
|
|
|
|
qemuDomainSecretInfoPtr secinfo,
|
|
|
|
const char *type)
|
|
|
|
{
|
|
|
|
if (!secinfo ||
|
|
|
|
secinfo->type != VIR_DOMAIN_SECRET_INFO_TYPE_AES ||
|
|
|
|
!secinfo->s.aes.alias)
|
|
|
|
return;
|
|
|
|
|
|
|
|
virBufferAsprintf(buf, "<secret type='%s' alias='%s'/>\n",
|
|
|
|
type, secinfo->s.aes.alias);
|
2018-03-01 14:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuStorageSourcePrivateDataFormat(virStorageSourcePtr src,
|
|
|
|
virBufferPtr buf)
|
|
|
|
{
|
2019-10-30 11:40:06 +00:00
|
|
|
g_auto(virBuffer) tmp = VIR_BUFFER_INIT_CHILD(buf);
|
2018-05-24 11:55:16 +00:00
|
|
|
qemuDomainStorageSourcePrivatePtr srcPriv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(src);
|
2020-02-10 13:23:30 +00:00
|
|
|
g_auto(virBuffer) nodenamesChildBuf = VIR_BUFFER_INIT_CHILD(buf);
|
2018-05-24 11:55:16 +00:00
|
|
|
|
2020-02-10 13:23:30 +00:00
|
|
|
virBufferEscapeString(&nodenamesChildBuf, "<nodename type='storage' name='%s'/>\n", src->nodestorage);
|
|
|
|
virBufferEscapeString(&nodenamesChildBuf, "<nodename type='format' name='%s'/>\n", src->nodeformat);
|
|
|
|
|
2020-02-10 13:26:08 +00:00
|
|
|
if (src->sliceStorage)
|
|
|
|
virBufferEscapeString(&nodenamesChildBuf, "<nodename type='slice-storage' name='%s'/>\n",
|
|
|
|
src->sliceStorage->nodename);
|
|
|
|
|
2020-02-10 13:23:30 +00:00
|
|
|
virXMLFormatElement(buf, "nodenames", NULL, &nodenamesChildBuf);
|
2018-03-01 17:39:45 +00:00
|
|
|
|
2018-05-11 14:39:21 +00:00
|
|
|
if (src->pr)
|
|
|
|
virBufferAsprintf(buf, "<reservations mgralias='%s'/>\n", src->pr->mgralias);
|
|
|
|
|
2018-03-01 14:13:26 +00:00
|
|
|
if (virStorageSourcePrivateDataFormatRelPath(src, buf) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2018-03-01 14:13:26 +00:00
|
|
|
|
2018-05-24 11:55:16 +00:00
|
|
|
if (srcPriv) {
|
|
|
|
qemuStorageSourcePrivateDataFormatSecinfo(&tmp, srcPriv->secinfo, "auth");
|
|
|
|
qemuStorageSourcePrivateDataFormatSecinfo(&tmp, srcPriv->encinfo, "encryption");
|
2020-03-09 07:19:02 +00:00
|
|
|
qemuStorageSourcePrivateDataFormatSecinfo(&tmp, srcPriv->httpcookie, "httpcookie");
|
2020-06-29 13:10:42 +00:00
|
|
|
qemuStorageSourcePrivateDataFormatSecinfo(&tmp, srcPriv->tlsKeySecret, "tlskey");
|
2018-05-24 11:55:16 +00:00
|
|
|
}
|
|
|
|
|
2018-05-30 10:03:41 +00:00
|
|
|
if (src->tlsAlias)
|
|
|
|
virBufferAsprintf(&tmp, "<TLSx509 alias='%s'/>\n", src->tlsAlias);
|
|
|
|
|
2019-10-24 13:50:50 +00:00
|
|
|
virXMLFormatElement(buf, "objects", NULL, &tmp);
|
2018-05-24 11:55:16 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2018-03-01 14:13:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-15 07:12:01 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDiskPrivateParse(xmlXPathContextPtr ctxt,
|
|
|
|
virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
qemuDomainDiskPrivatePtr priv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
|
|
|
priv->qomName = virXPathString("string(./qom/@name)", ctxt);
|
2018-08-21 12:45:57 +00:00
|
|
|
priv->nodeCopyOnRead = virXPathString("string(./nodenames/nodename[@type='copyOnRead']/@name)", ctxt);
|
2018-06-15 07:12:01 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDiskPrivateFormat(virDomainDiskDefPtr disk,
|
|
|
|
virBufferPtr buf)
|
|
|
|
{
|
|
|
|
qemuDomainDiskPrivatePtr priv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
|
|
|
virBufferEscapeString(buf, "<qom name='%s'/>\n", priv->qomName);
|
|
|
|
|
2018-08-21 12:45:57 +00:00
|
|
|
if (priv->nodeCopyOnRead) {
|
|
|
|
virBufferAddLit(buf, "<nodenames>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
virBufferEscapeString(buf, "<nodename type='copyOnRead' name='%s'/>\n",
|
|
|
|
priv->nodeCopyOnRead);
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</nodenames>\n");
|
|
|
|
}
|
|
|
|
|
2018-06-15 07:12:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-30 08:35:12 +00:00
|
|
|
static void
|
|
|
|
qemuDomainObjPrivateXMLFormatVcpus(virBufferPtr buf,
|
2016-07-01 12:56:14 +00:00
|
|
|
virDomainDefPtr def)
|
2016-06-30 08:35:12 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
2016-07-01 12:56:14 +00:00
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
pid_t tid;
|
2016-06-30 08:35:12 +00:00
|
|
|
|
|
|
|
virBufferAddLit(buf, "<vcpus>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
|
2016-07-01 12:56:14 +00:00
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
tid = QEMU_DOMAIN_VCPU_PRIVATE(vcpu)->tid;
|
|
|
|
|
|
|
|
if (!vcpu->online || tid == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
virBufferAsprintf(buf, "<vcpu id='%zu' pid='%d'/>\n", i, tid);
|
|
|
|
}
|
2016-06-30 08:35:12 +00:00
|
|
|
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</vcpus>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-12 07:24:07 +00:00
|
|
|
static int
|
2017-09-26 14:36:48 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatAutomaticPlacement(virBufferPtr buf,
|
2017-09-26 15:10:51 +00:00
|
|
|
qemuDomainObjPrivatePtr priv)
|
2017-07-12 07:24:07 +00:00
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *nodeset = NULL;
|
|
|
|
g_autofree char *cpuset = NULL;
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2017-07-12 12:10:34 +00:00
|
|
|
if (!priv->autoNodeset && !priv->autoCpuset)
|
2017-07-12 07:24:07 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-07-12 12:10:34 +00:00
|
|
|
if (priv->autoNodeset &&
|
|
|
|
!((nodeset = virBitmapFormat(priv->autoNodeset))))
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2017-07-12 12:10:34 +00:00
|
|
|
if (priv->autoCpuset &&
|
|
|
|
!((cpuset = virBitmapFormat(priv->autoCpuset))))
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-07-12 12:10:34 +00:00
|
|
|
|
|
|
|
virBufferAddLit(buf, "<numad");
|
|
|
|
virBufferEscapeString(buf, " nodeset='%s'", nodeset);
|
|
|
|
virBufferEscapeString(buf, " cpuset='%s'", cpuset);
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2017-07-12 07:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-03-19 06:54:12 +00:00
|
|
|
typedef struct qemuDomainPrivateBlockJobFormatData {
|
|
|
|
virDomainXMLOptionPtr xmlopt;
|
|
|
|
virBufferPtr buf;
|
|
|
|
} qemuDomainPrivateBlockJobFormatData;
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2019-07-25 13:54:48 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobFormatSource(virBufferPtr buf,
|
|
|
|
const char *element,
|
|
|
|
virStorageSourcePtr src,
|
|
|
|
virDomainXMLOptionPtr xmlopt,
|
|
|
|
bool chain)
|
2019-03-19 06:54:12 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
2019-10-30 11:40:06 +00:00
|
|
|
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
2019-03-19 06:54:12 +00:00
|
|
|
unsigned int xmlflags = VIR_DOMAIN_DEF_FORMAT_STATUS;
|
|
|
|
|
|
|
|
virBufferAsprintf(&attrBuf, " type='%s' format='%s'",
|
|
|
|
virStorageTypeToString(src->type),
|
|
|
|
virStorageFileFormatTypeToString(src->format));
|
|
|
|
|
2020-05-07 12:00:28 +00:00
|
|
|
if (virDomainDiskSourceFormat(&childBuf, src, "source", 0, true, xmlflags,
|
|
|
|
false, false, xmlopt) < 0)
|
2019-03-19 06:54:12 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-07-25 13:54:48 +00:00
|
|
|
if (chain &&
|
|
|
|
virDomainDiskBackingStoreFormat(&childBuf, src, xmlopt, xmlflags) < 0)
|
2019-03-19 06:54:12 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-10-24 13:50:50 +00:00
|
|
|
virXMLFormatElement(buf, element, &attrBuf, &childBuf);
|
2019-03-19 06:54:12 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-25 06:14:13 +00:00
|
|
|
static void
|
|
|
|
qemuDomainPrivateBlockJobFormatCommit(qemuBlockJobDataPtr job,
|
|
|
|
virBufferPtr buf)
|
|
|
|
{
|
2020-02-25 06:28:44 +00:00
|
|
|
g_auto(virBuffer) disabledBitmapsBuf = VIR_BUFFER_INIT_CHILD(buf);
|
|
|
|
|
2020-02-25 06:14:13 +00:00
|
|
|
if (job->data.commit.base)
|
|
|
|
virBufferAsprintf(buf, "<base node='%s'/>\n", job->data.commit.base->nodeformat);
|
|
|
|
|
|
|
|
if (job->data.commit.top)
|
|
|
|
virBufferAsprintf(buf, "<top node='%s'/>\n", job->data.commit.top->nodeformat);
|
|
|
|
|
|
|
|
if (job->data.commit.topparent)
|
|
|
|
virBufferAsprintf(buf, "<topparent node='%s'/>\n", job->data.commit.topparent->nodeformat);
|
|
|
|
|
|
|
|
if (job->data.commit.deleteCommittedImages)
|
|
|
|
virBufferAddLit(buf, "<deleteCommittedImages/>\n");
|
2020-02-25 06:28:44 +00:00
|
|
|
|
|
|
|
virXMLFormatElement(buf, "disabledBaseBitmaps", NULL, &disabledBitmapsBuf);
|
2020-02-25 06:14:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-30 09:36:22 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobIterator(void *payload,
|
2020-10-21 11:31:16 +00:00
|
|
|
const char *name G_GNUC_UNUSED,
|
2019-03-19 06:54:12 +00:00
|
|
|
void *opaque)
|
2018-11-30 09:36:22 +00:00
|
|
|
{
|
2019-10-30 11:40:06 +00:00
|
|
|
struct qemuDomainPrivateBlockJobFormatData *data = opaque;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
2019-10-30 11:40:06 +00:00
|
|
|
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(data->buf);
|
|
|
|
g_auto(virBuffer) chainsBuf = VIR_BUFFER_INIT_CHILD(&childBuf);
|
2018-11-30 09:36:22 +00:00
|
|
|
qemuBlockJobDataPtr job = payload;
|
|
|
|
const char *state = qemuBlockjobStateTypeToString(job->state);
|
|
|
|
const char *newstate = NULL;
|
|
|
|
|
|
|
|
if (job->newstate != -1)
|
|
|
|
newstate = qemuBlockjobStateTypeToString(job->newstate);
|
|
|
|
|
|
|
|
virBufferEscapeString(&attrBuf, " name='%s'", job->name);
|
|
|
|
virBufferEscapeString(&attrBuf, " type='%s'", qemuBlockjobTypeToString(job->type));
|
|
|
|
virBufferEscapeString(&attrBuf, " state='%s'", state);
|
|
|
|
virBufferEscapeString(&attrBuf, " newstate='%s'", newstate);
|
2019-11-26 13:55:05 +00:00
|
|
|
if (job->brokentype != QEMU_BLOCKJOB_TYPE_NONE)
|
|
|
|
virBufferEscapeString(&attrBuf, " brokentype='%s'", qemuBlockjobTypeToString(job->brokentype));
|
2020-01-31 12:00:29 +00:00
|
|
|
if (!job->jobflagsmissing)
|
|
|
|
virBufferAsprintf(&attrBuf, " jobflags='0x%x'", job->jobflags);
|
2018-11-30 09:36:22 +00:00
|
|
|
virBufferEscapeString(&childBuf, "<errmsg>%s</errmsg>", job->errmsg);
|
|
|
|
|
2019-03-19 06:54:12 +00:00
|
|
|
if (job->disk) {
|
2019-07-24 12:50:33 +00:00
|
|
|
virBufferEscapeString(&childBuf, "<disk dst='%s'", job->disk->dst);
|
|
|
|
if (job->mirrorChain)
|
|
|
|
virBufferAddLit(&childBuf, " mirror='yes'");
|
|
|
|
virBufferAddLit(&childBuf, "/>\n");
|
2019-03-19 06:54:12 +00:00
|
|
|
} else {
|
|
|
|
if (job->chain &&
|
2019-07-25 13:54:48 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobFormatSource(&chainsBuf,
|
|
|
|
"disk",
|
|
|
|
job->chain,
|
|
|
|
data->xmlopt,
|
|
|
|
true) < 0)
|
2019-03-19 06:54:12 +00:00
|
|
|
return -1;
|
2018-11-30 09:36:22 +00:00
|
|
|
|
2019-03-19 06:54:12 +00:00
|
|
|
if (job->mirrorChain &&
|
2019-07-25 13:54:48 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobFormatSource(&chainsBuf,
|
|
|
|
"mirror",
|
|
|
|
job->mirrorChain,
|
|
|
|
data->xmlopt,
|
|
|
|
true) < 0)
|
2019-03-19 06:54:12 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-10-24 13:50:50 +00:00
|
|
|
virXMLFormatElement(&childBuf, "chains", NULL, &chainsBuf);
|
2019-03-19 06:54:12 +00:00
|
|
|
}
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
switch ((qemuBlockJobType) job->type) {
|
|
|
|
case QEMU_BLOCKJOB_TYPE_PULL:
|
|
|
|
if (job->data.pull.base)
|
|
|
|
virBufferAsprintf(&childBuf, "<base node='%s'/>\n", job->data.pull.base->nodeformat);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_TYPE_COMMIT:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT:
|
2020-02-25 06:14:13 +00:00
|
|
|
qemuDomainPrivateBlockJobFormatCommit(job, &childBuf);
|
2019-08-02 11:01:32 +00:00
|
|
|
break;
|
|
|
|
|
2019-06-10 16:13:09 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_CREATE:
|
|
|
|
if (job->data.create.storage)
|
|
|
|
virBufferAddLit(&childBuf, "<create mode='storage'/>\n");
|
|
|
|
|
|
|
|
if (job->data.create.src &&
|
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobFormatSource(&childBuf,
|
|
|
|
"src",
|
|
|
|
job->data.create.src,
|
|
|
|
data->xmlopt,
|
|
|
|
false) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_COPY:
|
2019-07-22 11:59:01 +00:00
|
|
|
if (job->data.copy.shallownew)
|
|
|
|
virBufferAddLit(&attrBuf, " shallownew='yes'");
|
|
|
|
break;
|
|
|
|
|
2019-10-18 13:10:33 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_BACKUP:
|
2019-10-16 07:39:32 +00:00
|
|
|
virBufferEscapeString(&childBuf, "<bitmap name='%s'/>\n", job->data.backup.bitmap);
|
|
|
|
if (job->data.backup.store) {
|
|
|
|
if (qemuDomainObjPrivateXMLFormatBlockjobFormatSource(&childBuf,
|
|
|
|
"store",
|
|
|
|
job->data.backup.store,
|
|
|
|
data->xmlopt,
|
|
|
|
false) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2019-10-18 13:10:33 +00:00
|
|
|
break;
|
2019-11-26 13:55:05 +00:00
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_TYPE_BROKEN:
|
2019-07-22 11:39:24 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_NONE:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-10-24 13:50:50 +00:00
|
|
|
virXMLFormatElement(data->buf, "blockjob", &attrBuf, &childBuf);
|
|
|
|
return 0;
|
2018-11-30 09:36:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-26 14:37:47 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobs(virBufferPtr buf,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2018-11-30 09:36:22 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
2019-10-30 11:40:06 +00:00
|
|
|
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
2017-09-26 14:37:47 +00:00
|
|
|
bool bj = qemuDomainHasBlockjob(vm, false);
|
2019-03-19 06:54:12 +00:00
|
|
|
struct qemuDomainPrivateBlockJobFormatData iterdata = { priv->driver->xmlopt,
|
|
|
|
&childBuf };
|
2017-09-26 14:37:47 +00:00
|
|
|
|
|
|
|
virBufferAsprintf(&attrBuf, " active='%s'",
|
|
|
|
virTristateBoolTypeToString(virTristateBoolFromBool(bj)));
|
|
|
|
|
2018-11-30 09:36:22 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
2020-10-23 07:07:02 +00:00
|
|
|
virHashForEachSorted(priv->blockjobs,
|
|
|
|
qemuDomainObjPrivateXMLFormatBlockjobIterator,
|
|
|
|
&iterdata) < 0)
|
2018-11-30 09:36:22 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-10-24 13:50:50 +00:00
|
|
|
virXMLFormatElement(buf, "blockjobs", &attrBuf, &childBuf);
|
|
|
|
return 0;
|
2017-09-26 14:37:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-18 09:27:05 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLFormatBackups(virBufferPtr buf,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
|
|
|
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_INCREMENTAL_BACKUP))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (priv->backup &&
|
|
|
|
virDomainBackupDefFormat(&childBuf, priv->backup, true) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
virXMLFormatElement(buf, "backups", &attrBuf, &childBuf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-13 10:06:54 +00:00
|
|
|
void
|
2017-10-11 13:57:16 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatAllowReboot(virBufferPtr buf,
|
|
|
|
virTristateBool allowReboot)
|
|
|
|
{
|
|
|
|
virBufferAsprintf(buf, "<allowReboot value='%s'/>\n",
|
|
|
|
virTristateBoolTypeToString(allowReboot));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
static void
|
|
|
|
qemuDomainObjPrivateXMLFormatPR(virBufferPtr buf,
|
|
|
|
qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
if (priv->prDaemonRunning)
|
|
|
|
virBufferAddLit(buf, "<prDaemon/>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-08 14:55:06 +00:00
|
|
|
static bool
|
|
|
|
qemuDomainHasSlirp(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = vm->def->nets[i];
|
|
|
|
|
|
|
|
if (QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-08 14:55:07 +00:00
|
|
|
static bool
|
|
|
|
qemuDomainGetSlirpHelperOk(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = vm->def->nets[i];
|
|
|
|
|
|
|
|
/* if there is a builtin slirp, prevent slirp-helper */
|
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_USER &&
|
|
|
|
!QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-08 14:55:06 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLFormatSlirp(virBufferPtr buf,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!qemuDomainHasSlirp(vm))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
virBufferAddLit(buf, "<slirp>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = vm->def->nets[i];
|
|
|
|
qemuSlirpPtr slirp = QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp;
|
|
|
|
size_t j;
|
|
|
|
|
|
|
|
if (!slirp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
virBufferAsprintf(buf, "<helper alias='%s' pid='%d'>\n",
|
|
|
|
net->info.alias, slirp->pid);
|
|
|
|
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
for (j = 0; j < QEMU_SLIRP_FEATURE_LAST; j++) {
|
|
|
|
if (qemuSlirpHasFeature(slirp, j)) {
|
|
|
|
virBufferAsprintf(buf, "<feature name='%s'/>\n",
|
|
|
|
qemuSlirpFeatureTypeToString(j));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</helper>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</slirp>\n");
|
|
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-03-05 15:17:24 +00:00
|
|
|
static int
|
2015-05-19 08:14:19 +00:00
|
|
|
qemuDomainObjPrivateXMLFormat(virBufferPtr buf,
|
|
|
|
virDomainObjPtr vm)
|
2010-12-16 15:23:41 +00:00
|
|
|
{
|
2015-05-19 08:14:19 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 15:23:41 +00:00
|
|
|
const char *monitorpath;
|
|
|
|
|
|
|
|
/* priv->monitor_chr is set only for qemu */
|
|
|
|
if (priv->monConfig) {
|
2011-01-07 23:36:25 +00:00
|
|
|
switch (priv->monConfig->type) {
|
2010-12-16 15:23:41 +00:00
|
|
|
case VIR_DOMAIN_CHR_TYPE_UNIX:
|
2011-01-07 23:36:25 +00:00
|
|
|
monitorpath = priv->monConfig->data.nix.path;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_PTY:
|
2011-01-07 23:36:25 +00:00
|
|
|
monitorpath = priv->monConfig->data.file.path;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferEscapeString(buf, "<monitor path='%s'", monitorpath);
|
2011-04-30 16:34:49 +00:00
|
|
|
virBufferAsprintf(buf, " type='%s'/>\n",
|
2011-01-07 23:36:25 +00:00
|
|
|
virDomainChrTypeToString(priv->monConfig->type));
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 09:55:09 +00:00
|
|
|
if (priv->dbusDaemonRunning)
|
|
|
|
virBufferAddLit(buf, "<dbusDaemon/>\n");
|
|
|
|
|
2020-02-25 09:55:11 +00:00
|
|
|
if (priv->dbusVMState)
|
|
|
|
virBufferAddLit(buf, "<dbusVMState/>\n");
|
|
|
|
|
2016-11-15 10:30:18 +00:00
|
|
|
if (priv->namespaces) {
|
|
|
|
ssize_t ns = -1;
|
|
|
|
|
|
|
|
virBufferAddLit(buf, "<namespaces>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
while ((ns = virBitmapNextSetBit(priv->namespaces, ns)) >= 0)
|
|
|
|
virBufferAsprintf(buf, "<%s/>\n", qemuDomainNamespaceTypeToString(ns));
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</namespaces>\n");
|
|
|
|
}
|
|
|
|
|
2016-07-01 12:56:14 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatVcpus(buf, vm->def);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2013-02-01 13:48:58 +00:00
|
|
|
if (priv->qemuCaps) {
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAddLit(buf, "<qemuCaps>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < QEMU_CAPS_LAST; i++) {
|
2013-02-01 13:48:58 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, i)) {
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAsprintf(buf, "<flag name='%s'/>\n",
|
2013-02-01 13:48:58 +00:00
|
|
|
virQEMUCapsTypeToString(i));
|
2011-05-04 11:55:38 +00:00
|
|
|
}
|
|
|
|
}
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</qemuCaps>\n");
|
2011-05-04 11:55:38 +00:00
|
|
|
}
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
if (priv->lockState)
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState);
|
2010-10-26 14:04:46 +00:00
|
|
|
|
2020-07-13 18:03:38 +00:00
|
|
|
if (qemuDomainObjPrivateXMLFormatJob(buf, vm) < 0)
|
2018-03-01 17:16:38 +00:00
|
|
|
return -1;
|
2011-06-06 08:28:38 +00:00
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
if (priv->fakeReboot)
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAddLit(buf, "<fakereboot/>\n");
|
2011-09-28 10:10:13 +00:00
|
|
|
|
2013-07-19 13:08:29 +00:00
|
|
|
if (priv->qemuDevices && *priv->qemuDevices) {
|
|
|
|
char **tmp = priv->qemuDevices;
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAddLit(buf, "<devices>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
2013-07-19 13:08:29 +00:00
|
|
|
while (*tmp) {
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAsprintf(buf, "<device alias='%s'/>\n", *tmp);
|
2013-07-19 13:08:29 +00:00
|
|
|
tmp++;
|
|
|
|
}
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</devices>\n");
|
2013-07-19 13:08:29 +00:00
|
|
|
}
|
|
|
|
|
2017-09-26 14:36:48 +00:00
|
|
|
if (qemuDomainObjPrivateXMLFormatAutomaticPlacement(buf, priv) < 0)
|
2017-07-12 07:24:07 +00:00
|
|
|
return -1;
|
2015-07-24 14:06:33 +00:00
|
|
|
|
2016-02-26 08:15:55 +00:00
|
|
|
/* Various per-domain paths */
|
|
|
|
virBufferEscapeString(buf, "<libDir path='%s'/>\n", priv->libDir);
|
|
|
|
virBufferEscapeString(buf, "<channelTargetDir path='%s'/>\n",
|
|
|
|
priv->channelTargetDir);
|
|
|
|
|
2017-06-30 13:47:23 +00:00
|
|
|
virCPUDefFormatBufFull(buf, priv->origCPU, NULL);
|
2017-05-16 11:26:54 +00:00
|
|
|
|
2017-06-15 06:34:55 +00:00
|
|
|
if (priv->chardevStdioLogd)
|
2017-07-07 12:27:50 +00:00
|
|
|
virBufferAddLit(buf, "<chardevStdioLogd/>\n");
|
2017-06-15 06:34:55 +00:00
|
|
|
|
2018-11-13 11:50:41 +00:00
|
|
|
if (priv->rememberOwner)
|
|
|
|
virBufferAddLit(buf, "<rememberOwner/>\n");
|
|
|
|
|
2017-10-11 13:57:16 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatAllowReboot(buf, priv->allowReboot);
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
qemuDomainObjPrivateXMLFormatPR(buf, priv);
|
|
|
|
|
2017-07-07 12:29:32 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
|
|
|
virBufferAsprintf(buf, "<nodename index='%llu'/>\n", priv->nodenameindex);
|
|
|
|
|
2018-11-05 10:48:16 +00:00
|
|
|
if (priv->memPrealloc)
|
|
|
|
virBufferAddLit(buf, "<memPrealloc/>\n");
|
|
|
|
|
2017-09-26 14:37:47 +00:00
|
|
|
if (qemuDomainObjPrivateXMLFormatBlockjobs(buf, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-08-08 14:55:06 +00:00
|
|
|
if (qemuDomainObjPrivateXMLFormatSlirp(buf, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
virBufferAsprintf(buf, "<agentTimeout>%i</agentTimeout>\n", priv->agentTimeout);
|
|
|
|
|
2019-09-18 09:27:05 +00:00
|
|
|
if (qemuDomainObjPrivateXMLFormatBackups(buf, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-30 08:35:12 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseVcpu(xmlNodePtr node,
|
|
|
|
unsigned int idx,
|
2016-07-01 12:56:14 +00:00
|
|
|
virDomainDefPtr def)
|
2016-06-30 08:35:12 +00:00
|
|
|
{
|
2016-07-01 12:56:14 +00:00
|
|
|
virDomainVcpuDefPtr vcpu;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *idstr = NULL;
|
|
|
|
g_autofree char *pidstr = NULL;
|
2016-07-01 12:56:14 +00:00
|
|
|
unsigned int tmp;
|
2016-06-30 08:35:12 +00:00
|
|
|
|
2016-07-01 12:56:14 +00:00
|
|
|
idstr = virXMLPropString(node, "id");
|
|
|
|
|
2016-07-11 09:30:03 +00:00
|
|
|
if (idstr &&
|
|
|
|
(virStrToLong_uip(idstr, NULL, 10, &idx) < 0)) {
|
2016-07-01 12:56:14 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2016-07-11 09:30:03 +00:00
|
|
|
_("cannot parse vcpu index '%s'"), idstr);
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-07-11 09:30:03 +00:00
|
|
|
}
|
|
|
|
if (!(vcpu = virDomainDefGetVcpu(def, idx))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("invalid vcpu index '%u'"), idx);
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-06-30 13:06:46 +00:00
|
|
|
}
|
|
|
|
|
2016-06-30 08:35:12 +00:00
|
|
|
if (!(pidstr = virXMLPropString(node, "pid")))
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-06-30 08:35:12 +00:00
|
|
|
|
2016-07-01 12:56:14 +00:00
|
|
|
if (virStrToLong_uip(pidstr, NULL, 10, &tmp) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2016-06-30 08:35:12 +00:00
|
|
|
|
2016-07-01 12:56:14 +00:00
|
|
|
QEMU_DOMAIN_VCPU_PRIVATE(vcpu)->tid = tmp;
|
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2016-06-30 08:35:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-12 07:24:07 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseAutomaticPlacement(xmlXPathContextPtr ctxt,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
virQEMUDriverPtr driver)
|
|
|
|
{
|
2019-11-29 10:40:39 +00:00
|
|
|
g_autoptr(virCapsHostNUMA) caps = NULL;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *nodeset = NULL;
|
|
|
|
g_autofree char *cpuset = NULL;
|
2018-04-10 14:12:05 +00:00
|
|
|
int nodesetSize = 0;
|
|
|
|
size_t i;
|
2017-07-12 07:24:07 +00:00
|
|
|
|
|
|
|
nodeset = virXPathString("string(./numad/@nodeset)", ctxt);
|
2017-07-12 12:10:34 +00:00
|
|
|
cpuset = virXPathString("string(./numad/@cpuset)", ctxt);
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2017-07-12 12:10:34 +00:00
|
|
|
if (!nodeset && !cpuset)
|
2017-07-12 07:24:07 +00:00
|
|
|
return 0;
|
|
|
|
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!(caps = virQEMUDriverGetHostNUMACaps(driver)))
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2018-04-10 14:12:05 +00:00
|
|
|
/* Figure out how big the nodeset bitmap needs to be.
|
|
|
|
* This is necessary because NUMA node IDs are not guaranteed to
|
|
|
|
* start from 0 or be densely allocated */
|
2019-11-29 10:40:39 +00:00
|
|
|
for (i = 0; i < caps->cells->len; i++) {
|
2019-11-29 09:55:59 +00:00
|
|
|
virCapsHostNUMACellPtr cell =
|
2019-11-29 10:40:39 +00:00
|
|
|
g_ptr_array_index(caps->cells, i);
|
2019-11-29 09:55:59 +00:00
|
|
|
nodesetSize = MAX(nodesetSize, cell->num + 1);
|
|
|
|
}
|
2018-04-10 14:12:05 +00:00
|
|
|
|
2017-07-12 12:10:34 +00:00
|
|
|
if (nodeset &&
|
2018-04-10 14:12:05 +00:00
|
|
|
virBitmapParse(nodeset, &priv->autoNodeset, nodesetSize) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2017-07-12 12:10:34 +00:00
|
|
|
if (cpuset) {
|
|
|
|
if (virBitmapParse(cpuset, &priv->autoCpuset, VIR_DOMAIN_CPUMASK_LEN) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-07-12 12:10:34 +00:00
|
|
|
} else {
|
|
|
|
/* autoNodeset is present in this case, since otherwise we wouldn't
|
|
|
|
* reach this code */
|
2019-11-29 10:40:39 +00:00
|
|
|
if (!(priv->autoCpuset = virCapabilitiesHostNUMAGetCpus(caps,
|
2019-11-29 09:55:59 +00:00
|
|
|
priv->autoNodeset)))
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-07-12 12:10:34 +00:00
|
|
|
}
|
2017-07-12 07:24:07 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2017-07-12 07:24:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-03-19 06:54:12 +00:00
|
|
|
static virStorageSourcePtr
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobChain(xmlNodePtr node,
|
|
|
|
xmlXPathContextPtr ctxt,
|
|
|
|
virDomainXMLOptionPtr xmlopt)
|
|
|
|
|
|
|
|
{
|
2020-07-28 19:47:48 +00:00
|
|
|
VIR_XPATH_NODE_AUTORESTORE(ctxt)
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *format = NULL;
|
|
|
|
g_autofree char *type = NULL;
|
|
|
|
g_autofree char *index = NULL;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virStorageSource) src = NULL;
|
2019-03-19 06:54:12 +00:00
|
|
|
xmlNodePtr sourceNode;
|
|
|
|
unsigned int xmlflags = VIR_DOMAIN_DEF_PARSE_STATUS;
|
|
|
|
|
|
|
|
ctxt->node = node;
|
|
|
|
|
|
|
|
if (!(type = virXMLPropString(ctxt->node, "type")) ||
|
|
|
|
!(format = virXMLPropString(ctxt->node, "format")) ||
|
|
|
|
!(index = virXPathString("string(./source/@index)", ctxt)) ||
|
|
|
|
!(sourceNode = virXPathNode("./source", ctxt))) {
|
|
|
|
virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
|
|
_("missing job chain data"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(src = virDomainStorageSourceParseBase(type, format, index)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (virDomainStorageSourceParse(sourceNode, ctxt, src, xmlflags, xmlopt) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (virDomainDiskBackingStoreParse(ctxt, src, xmlflags, xmlopt) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&src);
|
2019-03-19 06:54:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
static void
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobNodename(qemuBlockJobDataPtr job,
|
|
|
|
const char *xpath,
|
|
|
|
virStorageSourcePtr *src,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *nodename = NULL;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
|
|
|
*src = NULL;
|
|
|
|
|
|
|
|
if (!(nodename = virXPathString(xpath, ctxt)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (job->disk &&
|
2020-07-15 10:33:34 +00:00
|
|
|
(*src = virStorageSourceFindByNodeName(job->disk->src, nodename)))
|
2019-07-22 11:39:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (job->chain &&
|
2020-07-15 10:33:34 +00:00
|
|
|
(*src = virStorageSourceFindByNodeName(job->chain, nodename)))
|
2019-07-22 11:39:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (job->mirrorChain &&
|
2020-07-15 10:33:34 +00:00
|
|
|
(*src = virStorageSourceFindByNodeName(job->mirrorChain, nodename)))
|
2019-07-22 11:39:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* the node was in the XML but was not found in the job definitions */
|
|
|
|
VIR_DEBUG("marking block job '%s' as invalid: node name '%s' missing",
|
|
|
|
job->name, nodename);
|
|
|
|
job->invalidData = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-25 06:22:05 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobDataCommit(qemuBlockJobDataPtr job,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
|
|
|
if (job->type == QEMU_BLOCKJOB_TYPE_COMMIT) {
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobNodename(job,
|
|
|
|
"string(./topparent/@node)",
|
|
|
|
&job->data.commit.topparent,
|
|
|
|
ctxt);
|
|
|
|
|
|
|
|
if (!job->data.commit.topparent)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobNodename(job,
|
|
|
|
"string(./top/@node)",
|
|
|
|
&job->data.commit.top,
|
|
|
|
ctxt);
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobNodename(job,
|
|
|
|
"string(./base/@node)",
|
|
|
|
&job->data.commit.base,
|
|
|
|
ctxt);
|
|
|
|
|
|
|
|
if (virXPathNode("./deleteCommittedImages", ctxt))
|
|
|
|
job->data.commit.deleteCommittedImages = true;
|
|
|
|
|
|
|
|
if (!job->data.commit.top ||
|
|
|
|
!job->data.commit.base)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
static void
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobDataPtr job,
|
2019-06-10 16:13:09 +00:00
|
|
|
xmlXPathContextPtr ctxt,
|
|
|
|
virDomainXMLOptionPtr xmlopt)
|
2019-07-22 11:39:24 +00:00
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *createmode = NULL;
|
|
|
|
g_autofree char *shallownew = NULL;
|
2019-06-10 16:13:09 +00:00
|
|
|
xmlNodePtr tmp;
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
switch ((qemuBlockJobType) job->type) {
|
|
|
|
case QEMU_BLOCKJOB_TYPE_PULL:
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobNodename(job,
|
|
|
|
"string(./base/@node)",
|
|
|
|
&job->data.pull.base,
|
|
|
|
ctxt);
|
|
|
|
/* base is not present if pulling everything */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_TYPE_COMMIT:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT:
|
2020-02-25 06:22:05 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseBlockjobDataCommit(job, ctxt) < 0)
|
2019-07-22 11:39:24 +00:00
|
|
|
goto broken;
|
2020-02-25 06:22:05 +00:00
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
break;
|
|
|
|
|
2019-06-10 16:13:09 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_CREATE:
|
|
|
|
if (!(tmp = virXPathNode("./src", ctxt)) ||
|
|
|
|
!(job->data.create.src = qemuDomainObjPrivateXMLParseBlockjobChain(tmp, ctxt, xmlopt)))
|
|
|
|
goto broken;
|
|
|
|
|
|
|
|
if ((createmode = virXPathString("string(./create/@mode)", ctxt))) {
|
|
|
|
if (STRNEQ(createmode, "storage"))
|
|
|
|
goto broken;
|
|
|
|
|
|
|
|
job->data.create.storage = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_COPY:
|
2019-07-22 11:59:01 +00:00
|
|
|
if ((shallownew = virXPathString("string(./@shallownew)", ctxt))) {
|
|
|
|
if (STRNEQ(shallownew, "yes"))
|
|
|
|
goto broken;
|
|
|
|
|
|
|
|
job->data.copy.shallownew = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2019-10-18 13:10:33 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_BACKUP:
|
2019-10-16 07:39:32 +00:00
|
|
|
job->data.backup.bitmap = virXPathString("string(./bitmap/@name)", ctxt);
|
|
|
|
|
|
|
|
if (!(tmp = virXPathNode("./store", ctxt)) ||
|
|
|
|
!(job->data.backup.store = qemuDomainObjPrivateXMLParseBlockjobChain(tmp, ctxt, xmlopt)))
|
|
|
|
goto broken;
|
2019-10-18 13:10:33 +00:00
|
|
|
break;
|
2019-11-26 13:55:05 +00:00
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_TYPE_BROKEN:
|
2019-07-22 11:39:24 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_NONE:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
|
|
|
broken:
|
|
|
|
VIR_DEBUG("marking block job '%s' as invalid: malformed job data", job->name);
|
|
|
|
job->invalidData = true;
|
2019-07-22 11:39:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-26 14:37:47 +00:00
|
|
|
static int
|
2018-11-30 09:36:22 +00:00
|
|
|
qemuDomainObjPrivateXMLParseBlockjobData(virDomainObjPtr vm,
|
|
|
|
xmlNodePtr node,
|
2019-03-19 06:54:12 +00:00
|
|
|
xmlXPathContextPtr ctxt,
|
|
|
|
virDomainXMLOptionPtr xmlopt)
|
2018-11-30 09:36:22 +00:00
|
|
|
{
|
2020-07-28 19:47:48 +00:00
|
|
|
VIR_XPATH_NODE_AUTORESTORE(ctxt)
|
2018-11-30 09:36:22 +00:00
|
|
|
virDomainDiskDefPtr disk = NULL;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *name = NULL;
|
|
|
|
g_autofree char *typestr = NULL;
|
2019-11-26 13:55:05 +00:00
|
|
|
g_autofree char *brokentypestr = NULL;
|
2018-11-30 09:36:22 +00:00
|
|
|
int type;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *statestr = NULL;
|
2018-11-30 09:36:22 +00:00
|
|
|
int state = QEMU_BLOCKJOB_STATE_FAILED;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *diskdst = NULL;
|
|
|
|
g_autofree char *newstatestr = NULL;
|
|
|
|
g_autofree char *mirror = NULL;
|
2018-11-30 09:36:22 +00:00
|
|
|
int newstate = -1;
|
|
|
|
bool invalidData = false;
|
2019-03-19 06:54:12 +00:00
|
|
|
xmlNodePtr tmp;
|
2020-01-31 12:00:29 +00:00
|
|
|
unsigned long jobflags = 0;
|
2018-11-30 09:36:22 +00:00
|
|
|
|
|
|
|
ctxt->node = node;
|
|
|
|
|
|
|
|
if (!(name = virXPathString("string(./@name)", ctxt))) {
|
|
|
|
VIR_WARN("malformed block job data for vm '%s'", vm->def->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if the job name is known we need to register such a job so that we can
|
|
|
|
* clean it up */
|
|
|
|
if (!(typestr = virXPathString("string(./@type)", ctxt)) ||
|
|
|
|
(type = qemuBlockjobTypeFromString(typestr)) < 0) {
|
2019-11-26 13:55:05 +00:00
|
|
|
type = QEMU_BLOCKJOB_TYPE_BROKEN;
|
2018-11-30 09:36:22 +00:00
|
|
|
invalidData = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(job = qemuBlockJobDataNew(type, name)))
|
|
|
|
return -1;
|
|
|
|
|
2019-11-26 13:55:05 +00:00
|
|
|
if ((brokentypestr = virXPathString("string(./@brokentype)", ctxt)) &&
|
|
|
|
(job->brokentype = qemuBlockjobTypeFromString(brokentypestr)) < 0)
|
|
|
|
job->brokentype = QEMU_BLOCKJOB_TYPE_NONE;
|
|
|
|
|
2018-11-30 09:36:22 +00:00
|
|
|
if (!(statestr = virXPathString("string(./@state)", ctxt)) ||
|
|
|
|
(state = qemuBlockjobStateTypeFromString(statestr)) < 0)
|
|
|
|
invalidData = true;
|
|
|
|
|
|
|
|
if ((newstatestr = virXPathString("string(./@newstate)", ctxt)) &&
|
|
|
|
(newstate = qemuBlockjobStateTypeFromString(newstatestr)) < 0)
|
|
|
|
invalidData = true;
|
|
|
|
|
|
|
|
if ((diskdst = virXPathString("string(./disk/@dst)", ctxt)) &&
|
2019-10-14 15:24:20 +00:00
|
|
|
!(disk = virDomainDiskByTarget(vm->def, diskdst)))
|
2018-11-30 09:36:22 +00:00
|
|
|
invalidData = true;
|
|
|
|
|
2019-07-24 12:50:33 +00:00
|
|
|
if ((mirror = virXPathString("string(./disk/@mirror)", ctxt)) &&
|
|
|
|
STRNEQ(mirror, "yes"))
|
|
|
|
invalidData = true;
|
|
|
|
|
2020-01-31 12:00:29 +00:00
|
|
|
if (virXPathULongHex("string(./@jobflags)", ctxt, &jobflags) != 0)
|
|
|
|
job->jobflagsmissing = true;
|
|
|
|
|
2019-03-19 06:54:12 +00:00
|
|
|
if (!disk && !invalidData) {
|
|
|
|
if ((tmp = virXPathNode("./chains/disk", ctxt)) &&
|
|
|
|
!(job->chain = qemuDomainObjPrivateXMLParseBlockjobChain(tmp, ctxt, xmlopt)))
|
|
|
|
invalidData = true;
|
|
|
|
|
|
|
|
if ((tmp = virXPathNode("./chains/mirror", ctxt)) &&
|
|
|
|
!(job->mirrorChain = qemuDomainObjPrivateXMLParseBlockjobChain(tmp, ctxt, xmlopt)))
|
|
|
|
invalidData = true;
|
|
|
|
}
|
|
|
|
|
2019-09-02 14:11:46 +00:00
|
|
|
if (mirror) {
|
|
|
|
if (disk)
|
|
|
|
job->mirrorChain = virObjectRef(disk->mirror);
|
|
|
|
else
|
|
|
|
invalidData = true;
|
|
|
|
}
|
|
|
|
|
2018-11-30 09:36:22 +00:00
|
|
|
job->state = state;
|
|
|
|
job->newstate = newstate;
|
2020-01-31 12:00:29 +00:00
|
|
|
job->jobflags = jobflags;
|
2018-11-30 09:36:22 +00:00
|
|
|
job->errmsg = virXPathString("string(./errmsg)", ctxt);
|
|
|
|
job->invalidData = invalidData;
|
2019-07-24 12:50:33 +00:00
|
|
|
job->disk = disk;
|
|
|
|
|
2019-06-10 16:13:09 +00:00
|
|
|
qemuDomainObjPrivateXMLParseBlockjobDataSpecific(job, ctxt, xmlopt);
|
2019-07-22 11:39:24 +00:00
|
|
|
|
2019-05-15 08:58:42 +00:00
|
|
|
if (qemuBlockJobRegister(job, vm, disk, false) < 0)
|
2018-11-30 09:36:22 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseBlockjobs(virDomainObjPtr vm,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
2017-09-26 14:37:47 +00:00
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
2018-11-30 09:36:22 +00:00
|
|
|
ssize_t nnodes = 0;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *active = NULL;
|
2017-09-26 14:37:47 +00:00
|
|
|
int tmp;
|
2018-11-30 09:36:22 +00:00
|
|
|
size_t i;
|
2017-09-26 14:37:47 +00:00
|
|
|
|
|
|
|
if ((active = virXPathString("string(./blockjobs/@active)", ctxt)) &&
|
|
|
|
(tmp = virTristateBoolTypeFromString(active)) > 0)
|
|
|
|
priv->reconnectBlockjobs = tmp;
|
|
|
|
|
2018-11-30 09:36:22 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
|
|
|
if ((nnodes = virXPathNodeSet("./blockjobs/blockjob", ctxt, &nodes)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < nnodes; i++) {
|
2019-03-19 06:54:12 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseBlockjobData(vm, nodes[i], ctxt,
|
|
|
|
priv->driver->xmlopt) < 0)
|
2018-11-30 09:36:22 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-26 14:37:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-18 09:27:05 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseBackups(qemuDomainObjPrivatePtr priv,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
|
|
|
ssize_t nnodes = 0;
|
|
|
|
|
|
|
|
if ((nnodes = virXPathNodeSet("./backups/domainbackup", ctxt, &nodes)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (nnodes > 1) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("only one backup job is supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nnodes == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(priv->backup = virDomainBackupDefParseNode(ctxt->doc, nodes[0],
|
|
|
|
priv->driver->xmlopt,
|
|
|
|
VIR_DOMAIN_BACKUP_PARSE_INTERNAL)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-13 10:06:54 +00:00
|
|
|
int
|
2017-10-11 13:57:16 +00:00
|
|
|
qemuDomainObjPrivateXMLParseAllowReboot(xmlXPathContextPtr ctxt,
|
|
|
|
virTristateBool *allowReboot)
|
|
|
|
{
|
|
|
|
int val;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *valStr = NULL;
|
2017-10-11 13:57:16 +00:00
|
|
|
|
|
|
|
if ((valStr = virXPathString("string(./allowReboot/@value)", ctxt))) {
|
|
|
|
if ((val = virTristateBoolTypeFromString(valStr)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("invalid allowReboot value '%s'"), valStr);
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2017-10-11 13:57:16 +00:00
|
|
|
}
|
|
|
|
*allowReboot = val;
|
|
|
|
}
|
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2017-10-11 13:57:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
static void
|
|
|
|
qemuDomainObjPrivateXMLParsePR(xmlXPathContextPtr ctxt,
|
|
|
|
bool *prDaemonRunning)
|
|
|
|
{
|
|
|
|
*prDaemonRunning = virXPathBoolean("boolean(./prDaemon)", ctxt) > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-08 14:55:06 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjPrivateXMLParseSlirpFeatures(xmlNodePtr featuresNode,
|
|
|
|
xmlXPathContextPtr ctxt,
|
|
|
|
qemuSlirpPtr slirp)
|
|
|
|
{
|
2020-07-28 19:47:48 +00:00
|
|
|
VIR_XPATH_NODE_AUTORESTORE(ctxt)
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
2019-08-08 14:55:06 +00:00
|
|
|
size_t i;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
ctxt->node = featuresNode;
|
|
|
|
|
|
|
|
if ((n = virXPathNodeSet("./feature", ctxt, &nodes)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("failed to parse slirp-helper features"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *str = virXMLPropString(nodes[i], "name");
|
2019-08-08 14:55:06 +00:00
|
|
|
int feature;
|
|
|
|
|
|
|
|
if (!str)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
feature = qemuSlirpFeatureTypeFromString(str);
|
|
|
|
if (feature < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown slirp feature %s"), str);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuSlirpSetFeature(slirp, feature);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-05 15:17:24 +00:00
|
|
|
static int
|
2015-05-19 08:14:19 +00:00
|
|
|
qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
|
2015-07-24 17:35:00 +00:00
|
|
|
virDomainObjPtr vm,
|
2015-07-24 14:06:33 +00:00
|
|
|
virDomainDefParserConfigPtr config)
|
2010-12-16 15:23:41 +00:00
|
|
|
{
|
2015-05-19 08:14:19 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2015-07-24 14:06:33 +00:00
|
|
|
virQEMUDriverPtr driver = config->priv;
|
2010-12-16 15:23:41 +00:00
|
|
|
char *monitorpath;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *tmp = NULL;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
int n;
|
|
|
|
size_t i;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
2016-11-15 10:30:18 +00:00
|
|
|
xmlNodePtr node = NULL;
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUCaps) qemuCaps = NULL;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2018-04-06 15:49:01 +00:00
|
|
|
if (!(priv->monConfig = virDomainChrSourceDefNew(NULL)))
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (!(monitorpath =
|
|
|
|
virXPathString("string(./monitor[1]/@path)", ctxt))) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("no monitor path"));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = virXPathString("string(./monitor[1]/@type)", ctxt);
|
|
|
|
if (tmp)
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->type = virDomainChrTypeFromString(tmp);
|
2010-12-16 15:23:41 +00:00
|
|
|
else
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->type = VIR_DOMAIN_CHR_TYPE_PTY;
|
2010-12-16 15:23:41 +00:00
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
2011-01-07 23:36:25 +00:00
|
|
|
switch (priv->monConfig->type) {
|
2010-12-16 15:23:41 +00:00
|
|
|
case VIR_DOMAIN_CHR_TYPE_PTY:
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->data.file.path = monitorpath;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_CHR_TYPE_UNIX:
|
2011-01-07 23:36:25 +00:00
|
|
|
priv->monConfig->data.nix.path = monitorpath;
|
2010-12-16 15:23:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
VIR_FREE(monitorpath);
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unsupported monitor type '%s'"),
|
|
|
|
virDomainChrTypeToString(priv->monConfig->type));
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
Add API to change qemu agent response timeout
Some layered products such as oVirt have requested a way to avoid being
blocked by guest agent commands when querying a loaded vm. For example,
many guest agent commands are polled periodically to monitor changes,
and rather than blocking the calling process, they'd prefer to simply
time out when an agent query is taking too long.
This patch adds a way for the user to specify a custom agent timeout
that is applied to all agent commands.
One special case to note here is the 'guest-sync' command. 'guest-sync'
is issued internally prior to calling any other command. (For example,
when libvirt wants to call 'guest-get-fsinfo', we first call
'guest-sync' and then call 'guest-get-fsinfo').
Previously, the 'guest-sync' command used a 5-second timeout
(VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT), whereas the actual command that
followed always blocked indefinitely
(VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK). As part of this patch, if a
custom timeout is specified that is shorter than
5 seconds, this new timeout is also used for 'guest-sync'. If there is
no custom timeout or if the custom timeout is longer than 5 seconds, we
will continue to use the 5-second timeout.
Signed-off-by: Jonathon Jongsma <jjongsma@redhat.com>
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
Reviewed-by: Michal Privoznik <mprivozn@redhat.com>
2019-11-13 22:06:09 +00:00
|
|
|
if (virXPathInt("string(./agentTimeout)", ctxt, &priv->agentTimeout) == -2) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to parse agent timeout"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2020-02-25 09:55:09 +00:00
|
|
|
priv->dbusDaemonRunning = virXPathBoolean("boolean(./dbusDaemon)", ctxt) > 0;
|
|
|
|
|
2020-02-25 09:55:11 +00:00
|
|
|
priv->dbusVMState = virXPathBoolean("boolean(./dbusVMState)", ctxt) > 0;
|
|
|
|
|
2016-11-15 10:30:18 +00:00
|
|
|
if ((node = virXPathNode("./namespaces", ctxt))) {
|
|
|
|
xmlNodePtr next;
|
|
|
|
|
|
|
|
for (next = node->children; next; next = next->next) {
|
2018-04-25 12:42:34 +00:00
|
|
|
int ns = qemuDomainNamespaceTypeFromString((const char *)next->name);
|
2016-11-15 10:30:18 +00:00
|
|
|
|
|
|
|
if (ns < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("malformed namespace name: %s"),
|
|
|
|
next->name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainEnableNamespace(vm, ns) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->namespaces &&
|
|
|
|
virBitmapIsAllClear(priv->namespaces)) {
|
|
|
|
virBitmapFree(priv->namespaces);
|
|
|
|
priv->namespaces = NULL;
|
|
|
|
}
|
|
|
|
|
2018-11-13 11:50:41 +00:00
|
|
|
priv->rememberOwner = virXPathBoolean("count(./rememberOwner) > 0", ctxt);
|
|
|
|
|
2016-06-30 08:35:12 +00:00
|
|
|
if ((n = virXPathNodeSet("./vcpus/vcpu", ctxt, &nodes)) < 0)
|
2010-12-16 15:23:41 +00:00
|
|
|
goto error;
|
|
|
|
|
2016-06-30 08:35:12 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
2016-07-01 12:56:14 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseVcpu(nodes[i], i, vm->def) < 0)
|
2016-06-30 08:35:12 +00:00
|
|
|
goto error;
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
2016-06-30 08:35:12 +00:00
|
|
|
VIR_FREE(nodes);
|
2014-09-03 13:06:52 +00:00
|
|
|
|
2011-05-04 11:55:38 +00:00
|
|
|
if ((n = virXPathNodeSet("./qemuCaps/flag", ctxt, &nodes)) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("failed to parse qemu capabilities flags"));
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (n > 0) {
|
2013-02-01 13:48:58 +00:00
|
|
|
if (!(qemuCaps = virQEMUCapsNew()))
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *str = virXMLPropString(nodes[i], "name");
|
2011-05-04 11:55:38 +00:00
|
|
|
if (str) {
|
2013-02-01 13:48:58 +00:00
|
|
|
int flag = virQEMUCapsTypeFromString(str);
|
2011-05-04 11:55:38 +00:00
|
|
|
if (flag < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown qemu capabilities flag %s"), str);
|
2011-05-04 11:55:38 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2013-02-01 13:48:58 +00:00
|
|
|
virQEMUCapsSet(qemuCaps, flag);
|
2011-05-04 11:55:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
priv->qemuCaps = g_steal_pointer(&qemuCaps);
|
2011-05-04 11:55:38 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
priv->lockState = virXPathString("string(./lockstate)", ctxt);
|
2011-05-04 11:55:38 +00:00
|
|
|
|
2020-07-13 18:03:38 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseJob(vm, ctxt) < 0)
|
2015-05-19 15:28:25 +00:00
|
|
|
goto error;
|
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1;
|
|
|
|
|
2013-07-19 13:08:29 +00:00
|
|
|
if ((n = virXPathNodeSet("./devices/device", ctxt, &nodes)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to parse qemu device list"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (n > 0) {
|
|
|
|
/* NULL-terminated list */
|
2020-10-05 10:26:10 +00:00
|
|
|
priv->qemuDevices = g_new0(char *, n + 1);
|
2013-07-19 13:08:29 +00:00
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
priv->qemuDevices[i] = virXMLPropString(nodes[i], "alias");
|
|
|
|
if (!priv->qemuDevices[i]) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to parse qemu device list"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2019-08-08 14:55:06 +00:00
|
|
|
if ((n = virXPathNodeSet("./slirp/helper", ctxt, &nodes)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to parse slirp helper list"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
for (i = 0; i < n; i++) {
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *alias = virXMLPropString(nodes[i], "alias");
|
|
|
|
g_autofree char *pid = virXMLPropString(nodes[i], "pid");
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuSlirp) slirp = qemuSlirpNew();
|
2019-08-08 14:55:06 +00:00
|
|
|
virDomainDeviceDef dev;
|
|
|
|
|
|
|
|
if (!alias || !pid || !slirp ||
|
|
|
|
virStrToLong_i(pid, NULL, 10, &slirp->pid) < 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to parse slirp helper list"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainDefFindDevice(vm->def, alias, &dev, true) < 0 ||
|
|
|
|
dev.type != VIR_DOMAIN_DEVICE_NET)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (qemuDomainObjPrivateXMLParseSlirpFeatures(nodes[i], ctxt, slirp) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
QEMU_DOMAIN_NETWORK_PRIVATE(dev.data.net)->slirp = g_steal_pointer(&slirp);
|
2019-08-08 14:55:06 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2017-07-12 07:24:07 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseAutomaticPlacement(ctxt, priv, driver) < 0)
|
2015-07-24 14:06:33 +00:00
|
|
|
goto error;
|
|
|
|
|
2016-02-26 08:15:55 +00:00
|
|
|
if ((tmp = virXPathString("string(./libDir/@path)", ctxt)))
|
|
|
|
priv->libDir = tmp;
|
|
|
|
if ((tmp = virXPathString("string(./channelTargetDir/@path)", ctxt)))
|
|
|
|
priv->channelTargetDir = tmp;
|
|
|
|
tmp = NULL;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
qemuDomainSetPrivatePathsOld(driver, vm);
|
2016-02-26 08:15:55 +00:00
|
|
|
|
2020-10-07 08:54:55 +00:00
|
|
|
if (virCPUDefParseXML(ctxt, "./cpu", VIR_CPU_TYPE_GUEST, &priv->origCPU,
|
|
|
|
false) < 0)
|
2017-05-16 11:26:54 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-06-15 06:34:55 +00:00
|
|
|
priv->chardevStdioLogd = virXPathBoolean("boolean(./chardevStdioLogd)",
|
|
|
|
ctxt) == 1;
|
|
|
|
|
2017-10-11 13:57:16 +00:00
|
|
|
qemuDomainObjPrivateXMLParseAllowReboot(ctxt, &priv->allowReboot);
|
|
|
|
|
2018-04-19 08:00:36 +00:00
|
|
|
qemuDomainObjPrivateXMLParsePR(ctxt, &priv->prDaemonRunning);
|
|
|
|
|
2018-11-30 09:36:22 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseBlockjobs(vm, priv, ctxt) < 0)
|
2017-09-26 14:37:47 +00:00
|
|
|
goto error;
|
|
|
|
|
2019-09-18 09:27:05 +00:00
|
|
|
if (qemuDomainObjPrivateXMLParseBackups(priv, ctxt) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2017-07-07 12:29:32 +00:00
|
|
|
qemuDomainStorageIdReset(priv);
|
|
|
|
if (virXPathULongLong("string(./nodename/@index)", ctxt,
|
|
|
|
&priv->nodenameindex) == -2) {
|
|
|
|
virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
|
|
_("failed to parse node name index"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2018-11-05 10:48:16 +00:00
|
|
|
priv->memPrealloc = virXPathBoolean("boolean(./memPrealloc)", ctxt) == 1;
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2016-11-15 10:30:18 +00:00
|
|
|
virBitmapFree(priv->namespaces);
|
|
|
|
priv->namespaces = NULL;
|
2019-02-20 08:51:07 +00:00
|
|
|
virObjectUnref(priv->monConfig);
|
2016-11-15 10:30:18 +00:00
|
|
|
priv->monConfig = NULL;
|
2020-08-02 17:36:03 +00:00
|
|
|
g_strfreev(priv->qemuDevices);
|
2013-07-19 13:08:29 +00:00
|
|
|
priv->qemuDevices = NULL;
|
2010-12-16 15:23:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-28 09:36:47 +00:00
|
|
|
static void *
|
|
|
|
qemuDomainObjPrivateXMLGetParseOpaque(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
return priv->qemuCaps;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-05 15:17:24 +00:00
|
|
|
virDomainXMLPrivateDataCallbacks virQEMUDriverPrivateDataCallbacks = {
|
|
|
|
.alloc = qemuDomainObjPrivateAlloc,
|
|
|
|
.free = qemuDomainObjPrivateFree,
|
2015-05-13 09:20:36 +00:00
|
|
|
.diskNew = qemuDomainDiskPrivateNew,
|
2018-06-15 07:12:01 +00:00
|
|
|
.diskParse = qemuDomainDiskPrivateParse,
|
|
|
|
.diskFormat = qemuDomainDiskPrivateFormat,
|
2016-06-29 13:40:09 +00:00
|
|
|
.vcpuNew = qemuDomainVcpuPrivateNew,
|
2016-10-21 12:31:37 +00:00
|
|
|
.chrSourceNew = qemuDomainChrSourcePrivateNew,
|
2018-05-18 11:14:42 +00:00
|
|
|
.vsockNew = qemuDomainVsockPrivateNew,
|
2019-01-10 14:50:11 +00:00
|
|
|
.graphicsNew = qemuDomainGraphicsPrivateNew,
|
2019-08-08 14:55:02 +00:00
|
|
|
.networkNew = qemuDomainNetworkPrivateNew,
|
2019-09-23 10:44:36 +00:00
|
|
|
.videoNew = qemuDomainVideoPrivateNew,
|
2019-12-10 12:53:10 +00:00
|
|
|
.fsNew = qemuDomainFSPrivateNew,
|
2013-03-05 15:17:24 +00:00
|
|
|
.parse = qemuDomainObjPrivateXMLParse,
|
|
|
|
.format = qemuDomainObjPrivateXMLFormat,
|
2018-05-28 09:36:47 +00:00
|
|
|
.getParseOpaque = qemuDomainObjPrivateXMLGetParseOpaque,
|
2018-03-01 14:13:26 +00:00
|
|
|
.storageParse = qemuStorageSourcePrivateDataParse,
|
|
|
|
.storageFormat = qemuStorageSourcePrivateDataFormat,
|
2013-03-05 15:17:24 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2019-06-17 13:07:00 +00:00
|
|
|
static void
|
|
|
|
qemuDomainXmlNsDefFree(qemuDomainXmlNsDefPtr def)
|
|
|
|
{
|
|
|
|
if (!def)
|
|
|
|
return;
|
|
|
|
|
2019-06-17 13:12:13 +00:00
|
|
|
virStringListFreeCount(def->args, def->num_args);
|
|
|
|
virStringListFreeCount(def->env_name, def->num_env);
|
|
|
|
virStringListFreeCount(def->env_value, def->num_env);
|
2019-06-17 15:17:56 +00:00
|
|
|
virStringListFreeCount(def->capsadd, def->ncapsadd);
|
|
|
|
virStringListFreeCount(def->capsdel, def->ncapsdel);
|
2019-06-17 13:12:13 +00:00
|
|
|
|
2019-06-17 13:07:00 +00:00
|
|
|
VIR_FREE(def);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
static void
|
|
|
|
qemuDomainDefNamespaceFree(void *nsdata)
|
|
|
|
{
|
2019-06-14 12:18:37 +00:00
|
|
|
qemuDomainXmlNsDefPtr cmd = nsdata;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2019-06-14 12:18:37 +00:00
|
|
|
qemuDomainXmlNsDefFree(cmd);
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
|
2019-06-17 13:41:50 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDefNamespaceParseCommandlineArgs(qemuDomainXmlNsDefPtr nsdef,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
2019-06-17 13:41:50 +00:00
|
|
|
ssize_t nnodes;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if ((nnodes = virXPathNodeSet("./qemu:commandline/qemu:arg", ctxt, &nodes)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (nnodes == 0)
|
|
|
|
return 0;
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
nsdef->args = g_new0(char *, nnodes);
|
2019-06-17 13:41:50 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nnodes; i++) {
|
|
|
|
if (!(nsdef->args[nsdef->num_args++] = virXMLPropString(nodes[i], "value"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("No qemu command-line argument specified"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-17 13:41:50 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefNamespaceParseCommandlineEnvNameValidate(const char *envname)
|
|
|
|
{
|
2019-11-18 14:11:46 +00:00
|
|
|
if (!g_ascii_isalpha(envname[0]) && envname[0] != '_') {
|
2019-06-17 13:41:50 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Invalid environment name, it must begin with a letter or underscore"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strspn(envname, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_") != strlen(envname)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Invalid environment name, it must contain only alphanumerics and underscore"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDefNamespaceParseCommandlineEnv(qemuDomainXmlNsDefPtr nsdef,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree xmlNodePtr *nodes = NULL;
|
2019-06-17 13:41:50 +00:00
|
|
|
ssize_t nnodes;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if ((nnodes = virXPathNodeSet("./qemu:commandline/qemu:env", ctxt, &nodes)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (nnodes == 0)
|
|
|
|
return 0;
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
nsdef->env_name = g_new0(char *, nnodes);
|
|
|
|
nsdef->env_value = g_new0(char *, nnodes);
|
2019-06-17 13:41:50 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nnodes; i++) {
|
|
|
|
if (!(nsdef->env_name[nsdef->num_env] = virXMLPropString(nodes[i], "name"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("No qemu environment name specified"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainDefNamespaceParseCommandlineEnvNameValidate(nsdef->env_name[nsdef->num_env]) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
nsdef->env_value[nsdef->num_env] = virXMLPropString(nodes[i], "value");
|
|
|
|
/* a NULL value for command is allowed, since it might be empty */
|
|
|
|
nsdef->num_env++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-17 15:17:56 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefNamespaceParseCaps(qemuDomainXmlNsDefPtr nsdef,
|
|
|
|
xmlXPathContextPtr ctxt)
|
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree xmlNodePtr *nodesadd = NULL;
|
2019-06-17 15:17:56 +00:00
|
|
|
ssize_t nnodesadd;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree xmlNodePtr *nodesdel = NULL;
|
2019-06-17 15:17:56 +00:00
|
|
|
ssize_t nnodesdel;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if ((nnodesadd = virXPathNodeSet("./qemu:capabilities/qemu:add", ctxt, &nodesadd)) < 0 ||
|
|
|
|
(nnodesdel = virXPathNodeSet("./qemu:capabilities/qemu:del", ctxt, &nodesdel)) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (nnodesadd > 0) {
|
2020-10-05 10:26:10 +00:00
|
|
|
nsdef->capsadd = g_new0(char *, nnodesadd);
|
2019-06-17 15:17:56 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nnodesadd; i++) {
|
|
|
|
if (!(nsdef->capsadd[nsdef->ncapsadd++] = virXMLPropString(nodesadd[i], "capability"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("missing capability name"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nnodesdel > 0) {
|
2020-10-05 10:26:10 +00:00
|
|
|
nsdef->capsdel = g_new0(char *, nnodesdel);
|
2019-06-17 15:17:56 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nnodesdel; i++) {
|
|
|
|
if (!(nsdef->capsdel[nsdef->ncapsdel++] = virXMLPropString(nodesdel[i], "capability"))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("missing capability name"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
static int
|
2019-08-20 21:30:40 +00:00
|
|
|
qemuDomainDefNamespaceParse(xmlXPathContextPtr ctxt,
|
2010-12-16 15:23:41 +00:00
|
|
|
void **data)
|
|
|
|
{
|
2019-06-17 14:10:12 +00:00
|
|
|
qemuDomainXmlNsDefPtr nsdata = NULL;
|
|
|
|
int ret = -1;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
nsdata = g_new0(qemuDomainXmlNsDef, 1);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2019-06-17 14:10:12 +00:00
|
|
|
if (qemuDomainDefNamespaceParseCommandlineArgs(nsdata, ctxt) < 0 ||
|
2019-06-17 15:17:56 +00:00
|
|
|
qemuDomainDefNamespaceParseCommandlineEnv(nsdata, ctxt) < 0 ||
|
|
|
|
qemuDomainDefNamespaceParseCaps(nsdata, ctxt) < 0)
|
2019-06-17 14:10:12 +00:00
|
|
|
goto cleanup;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2019-06-17 15:17:56 +00:00
|
|
|
if (nsdata->num_args > 0 || nsdata->num_env > 0 ||
|
|
|
|
nsdata->ncapsadd > 0 || nsdata->ncapsdel > 0)
|
2019-10-16 11:43:18 +00:00
|
|
|
*data = g_steal_pointer(&nsdata);
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2019-06-17 14:10:12 +00:00
|
|
|
ret = 0;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2019-06-17 14:10:12 +00:00
|
|
|
cleanup:
|
|
|
|
qemuDomainDefNamespaceFree(nsdata);
|
|
|
|
return ret;
|
2010-12-16 15:23:41 +00:00
|
|
|
}
|
|
|
|
|
2019-06-17 14:45:06 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainDefNamespaceFormatXMLCommandline(virBufferPtr buf,
|
|
|
|
qemuDomainXmlNsDefPtr cmd)
|
2010-12-16 15:23:41 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
|
|
|
if (!cmd->num_args && !cmd->num_env)
|
2019-06-17 14:45:06 +00:00
|
|
|
return;
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAddLit(buf, "<qemu:commandline>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
for (i = 0; i < cmd->num_args; i++)
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferEscapeString(buf, "<qemu:arg value='%s'/>\n",
|
2010-12-16 15:23:41 +00:00
|
|
|
cmd->args[i]);
|
|
|
|
for (i = 0; i < cmd->num_env; i++) {
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAsprintf(buf, "<qemu:env name='%s'", cmd->env_name[i]);
|
2010-12-16 15:23:41 +00:00
|
|
|
if (cmd->env_value[i])
|
|
|
|
virBufferEscapeString(buf, " value='%s'", cmd->env_value[i]);
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
|
|
|
}
|
|
|
|
|
2014-03-05 09:50:42 +00:00
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</qemu:commandline>\n");
|
2019-06-17 14:45:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-17 15:17:56 +00:00
|
|
|
static void
|
|
|
|
qemuDomainDefNamespaceFormatXMLCaps(virBufferPtr buf,
|
|
|
|
qemuDomainXmlNsDefPtr xmlns)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!xmlns->ncapsadd && !xmlns->ncapsdel)
|
|
|
|
return;
|
|
|
|
|
|
|
|
virBufferAddLit(buf, "<qemu:capabilities>\n");
|
|
|
|
virBufferAdjustIndent(buf, 2);
|
|
|
|
|
|
|
|
for (i = 0; i < xmlns->ncapsadd; i++)
|
|
|
|
virBufferEscapeString(buf, "<qemu:add capability='%s'/>\n", xmlns->capsadd[i]);
|
|
|
|
|
|
|
|
for (i = 0; i < xmlns->ncapsdel; i++)
|
|
|
|
virBufferEscapeString(buf, "<qemu:del capability='%s'/>\n", xmlns->capsdel[i]);
|
|
|
|
|
|
|
|
virBufferAdjustIndent(buf, -2);
|
|
|
|
virBufferAddLit(buf, "</qemu:capabilities>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-17 14:45:06 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefNamespaceFormatXML(virBufferPtr buf,
|
|
|
|
void *nsdata)
|
|
|
|
{
|
|
|
|
qemuDomainXmlNsDefPtr cmd = nsdata;
|
|
|
|
|
|
|
|
qemuDomainDefNamespaceFormatXMLCommandline(buf, cmd);
|
2019-06-17 15:17:56 +00:00
|
|
|
qemuDomainDefNamespaceFormatXMLCaps(buf, cmd);
|
2019-06-17 14:45:06 +00:00
|
|
|
|
2010-12-16 15:23:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-20 21:39:24 +00:00
|
|
|
virXMLNamespace virQEMUDriverDomainXMLNamespace = {
|
2013-03-05 15:17:24 +00:00
|
|
|
.parse = qemuDomainDefNamespaceParse,
|
|
|
|
.free = qemuDomainDefNamespaceFree,
|
|
|
|
.format = qemuDomainDefNamespaceFormatXML,
|
2019-08-20 22:02:23 +00:00
|
|
|
.prefix = "qemu",
|
2019-08-21 07:48:47 +00:00
|
|
|
.uri = "http://libvirt.org/schemas/domain/qemu/1.0",
|
2013-03-05 15:17:24 +00:00
|
|
|
};
|
2010-12-16 15:23:41 +00:00
|
|
|
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2016-01-11 11:40:32 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefAddImplicitInputDevice(virDomainDef *def)
|
|
|
|
{
|
|
|
|
if (ARCH_IS_X86(def->os.arch)) {
|
|
|
|
if (virDomainDefMaybeAddInput(def,
|
|
|
|
VIR_DOMAIN_INPUT_TYPE_MOUSE,
|
|
|
|
VIR_DOMAIN_INPUT_BUS_PS2) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virDomainDefMaybeAddInput(def,
|
|
|
|
VIR_DOMAIN_INPUT_TYPE_KBD,
|
|
|
|
VIR_DOMAIN_INPUT_BUS_PS2) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-11 11:12:08 +00:00
|
|
|
static int
|
2016-01-07 01:35:36 +00:00
|
|
|
qemuDomainDefAddDefaultDevices(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
2013-03-11 11:12:08 +00:00
|
|
|
{
|
2013-08-02 08:13:33 +00:00
|
|
|
bool addDefaultUSB = true;
|
2015-10-20 16:08:56 +00:00
|
|
|
int usbModel = -1; /* "default for machinetype" */
|
2016-04-19 21:05:54 +00:00
|
|
|
int pciRoot; /* index within def->controllers */
|
qemu: fix handling of default/implicit devices for q35
This patch adds in special handling for a few devices that need to be
treated differently for q35 domains:
usb - there is no implicit/default usb controller for the q35
machinetype. This is done because normally the default usb controller
is added to a domain by just adding "-usb" to the qemu commandline,
and it's assumed that this will add a single piix3 usb1 controller at
slot 1 function 2. That's not what happens when the machinetype is
q35, though. Instead, adding -usb to the commandline adds 3 usb
(version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather
than having
<controller type='usb' index='0'/>
translate into 3 separate devices on the PCI bus, it's cleaner to not
automatically add a default usb device; one can always be added
explicitly if desired. Or we may decide that on q35 machines, 3 usb
controllers will be automatically added when none is given. But for
this initial commit, at least we aren't locking ourselves into
something we later won't want.
video - qemu always initializes the primary video device immediately
after any integrated devices for the machinetype. Unless instructed
otherwise (by using "-device vga..." instead of "-vga" which libvirt
uses in many cases to work around deficiencies and bugs in various
qemu versions) qemu will always pick the first unused slot. In the
case of the "pc" machinetype and its derivatives, this is always slot
2, but on q35 machinetypes, the first free slot is slot 1 (since the
q35's integrated peripheral devices are placed in other slots,
e.g. slot 0x1f). In order to make the PCI address of the video device
predictable, that slot (1 or 2, depending on machinetype) is reserved
even when no video device has been specified.
sata - a q35 machine always has a sata controller implicitly added at
slot 0x1F, function 2. There is no way to avoid this controller, so we
always add it. Note that the xml2xml tests for the pcie-root and q35
cases were changed to use DO_TEST_DIFFERENT() so that we can check for
the sata controller being automatically added. This is especially
important because we can't check for it in the xml2argv output (it has
no effect on that output since it's an implicit device).
ide - q35 has no ide controllers.
isa and smbus controllers - these two are always present in a q35 (at
slot 0x1F functions 0 and 3) but we have no way of modelling them in
our config. We do need to reserve those functions so that the user
doesn't attempt to put anything else there though. (note that the "pc"
machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
|
|
|
bool addImplicitSATA = false;
|
2013-04-22 12:16:13 +00:00
|
|
|
bool addPCIRoot = false;
|
2013-07-10 19:19:32 +00:00
|
|
|
bool addPCIeRoot = false;
|
2013-07-30 19:41:14 +00:00
|
|
|
bool addDefaultMemballoon = true;
|
2014-02-17 10:17:58 +00:00
|
|
|
bool addDefaultUSBKBD = false;
|
|
|
|
bool addDefaultUSBMouse = false;
|
2015-05-28 14:39:13 +00:00
|
|
|
bool addPanicDevice = false;
|
2013-04-22 12:16:13 +00:00
|
|
|
|
2016-01-11 11:40:32 +00:00
|
|
|
/* add implicit input devices */
|
|
|
|
if (qemuDomainDefAddImplicitInputDevice(def) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-01-11 11:40:32 +00:00
|
|
|
|
2013-04-22 12:16:13 +00:00
|
|
|
/* Add implicit PCI root controller if the machine has one */
|
|
|
|
switch (def->os.arch) {
|
|
|
|
case VIR_ARCH_I686:
|
|
|
|
case VIR_ARCH_X86_64:
|
2013-07-10 19:19:32 +00:00
|
|
|
if (STREQ(def->os.machine, "isapc")) {
|
2013-08-02 08:13:33 +00:00
|
|
|
addDefaultUSB = false;
|
2013-04-22 12:16:13 +00:00
|
|
|
break;
|
2013-08-02 08:13:33 +00:00
|
|
|
}
|
2017-04-18 10:43:58 +00:00
|
|
|
if (qemuDomainIsQ35(def)) {
|
2015-08-06 13:33:01 +00:00
|
|
|
addPCIeRoot = true;
|
|
|
|
addImplicitSATA = true;
|
2015-10-20 16:08:56 +00:00
|
|
|
|
2018-01-17 21:47:06 +00:00
|
|
|
/* Prefer adding a USB3 controller if supported, fall back
|
|
|
|
* to USB2 if there is no USB3 available, and if that's
|
|
|
|
* unavailable don't add anything.
|
2015-10-20 16:08:56 +00:00
|
|
|
*/
|
2018-01-17 21:47:06 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_QEMU_XHCI))
|
|
|
|
usbModel = VIR_DOMAIN_CONTROLLER_MODEL_USB_QEMU_XHCI;
|
|
|
|
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_NEC_USB_XHCI))
|
2016-09-19 22:46:41 +00:00
|
|
|
usbModel = VIR_DOMAIN_CONTROLLER_MODEL_USB_NEC_XHCI;
|
|
|
|
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_ICH9_USB_EHCI1))
|
2015-10-20 16:08:56 +00:00
|
|
|
usbModel = VIR_DOMAIN_CONTROLLER_MODEL_USB_ICH9_EHCI1;
|
|
|
|
else
|
|
|
|
addDefaultUSB = false;
|
2015-08-06 13:33:01 +00:00
|
|
|
break;
|
2013-07-10 19:19:32 +00:00
|
|
|
}
|
2017-04-18 10:43:58 +00:00
|
|
|
if (qemuDomainIsI440FX(def))
|
2016-05-03 10:05:27 +00:00
|
|
|
addPCIRoot = true;
|
2013-04-22 12:16:13 +00:00
|
|
|
break;
|
|
|
|
|
2018-11-28 21:45:15 +00:00
|
|
|
case VIR_ARCH_ARMV6L:
|
|
|
|
addDefaultUSB = false;
|
|
|
|
addDefaultMemballoon = false;
|
|
|
|
if (STREQ(def->os.machine, "versatilepb"))
|
|
|
|
addPCIRoot = true;
|
|
|
|
break;
|
|
|
|
|
2013-08-02 08:13:33 +00:00
|
|
|
case VIR_ARCH_ARMV7L:
|
2014-01-02 10:42:56 +00:00
|
|
|
case VIR_ARCH_AARCH64:
|
2015-07-17 11:27:45 +00:00
|
|
|
addDefaultUSB = false;
|
|
|
|
addDefaultMemballoon = false;
|
2018-08-22 09:15:20 +00:00
|
|
|
if (qemuDomainIsARMVirt(def))
|
2015-07-17 11:27:45 +00:00
|
|
|
addPCIeRoot = virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_GPEX);
|
|
|
|
break;
|
2013-08-02 08:13:33 +00:00
|
|
|
|
2014-02-17 10:17:58 +00:00
|
|
|
case VIR_ARCH_PPC64:
|
2014-11-04 17:21:26 +00:00
|
|
|
case VIR_ARCH_PPC64LE:
|
2014-02-17 10:17:58 +00:00
|
|
|
addPCIRoot = true;
|
|
|
|
addDefaultUSBKBD = true;
|
|
|
|
addDefaultUSBMouse = true;
|
2015-05-28 14:39:13 +00:00
|
|
|
/* For pSeries guests, the firmware provides the same
|
|
|
|
* functionality as the pvpanic device, so automatically
|
|
|
|
* add the definition if not already present */
|
2017-04-18 10:43:58 +00:00
|
|
|
if (qemuDomainIsPSeries(def))
|
2015-05-28 14:39:13 +00:00
|
|
|
addPanicDevice = true;
|
2014-02-17 10:17:58 +00:00
|
|
|
break;
|
|
|
|
|
2013-04-22 12:16:13 +00:00
|
|
|
case VIR_ARCH_ALPHA:
|
|
|
|
case VIR_ARCH_PPC:
|
|
|
|
case VIR_ARCH_PPCEMB:
|
|
|
|
case VIR_ARCH_SH4:
|
|
|
|
case VIR_ARCH_SH4EB:
|
|
|
|
addPCIRoot = true;
|
|
|
|
break;
|
2016-04-29 13:23:45 +00:00
|
|
|
|
2018-06-14 20:32:27 +00:00
|
|
|
case VIR_ARCH_RISCV32:
|
|
|
|
case VIR_ARCH_RISCV64:
|
|
|
|
addDefaultUSB = false;
|
2018-09-21 13:29:38 +00:00
|
|
|
if (qemuDomainIsRISCVVirt(def))
|
|
|
|
addPCIeRoot = virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_GPEX);
|
2018-06-14 20:32:27 +00:00
|
|
|
break;
|
|
|
|
|
2015-02-18 15:44:17 +00:00
|
|
|
case VIR_ARCH_S390:
|
|
|
|
case VIR_ARCH_S390X:
|
|
|
|
addDefaultUSB = false;
|
2016-04-29 13:23:42 +00:00
|
|
|
addPanicDevice = true;
|
2018-11-08 11:00:23 +00:00
|
|
|
addPCIRoot = virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_ZPCI);
|
2015-02-18 15:44:17 +00:00
|
|
|
break;
|
2015-04-09 08:38:28 +00:00
|
|
|
|
|
|
|
case VIR_ARCH_SPARC:
|
2020-11-18 14:59:47 +00:00
|
|
|
addDefaultUSB = false;
|
|
|
|
addDefaultMemballoon = false;
|
|
|
|
break;
|
|
|
|
|
2015-04-09 08:38:28 +00:00
|
|
|
case VIR_ARCH_SPARC64:
|
|
|
|
addPCIRoot = true;
|
|
|
|
break;
|
|
|
|
|
2018-02-14 09:43:59 +00:00
|
|
|
case VIR_ARCH_ARMV7B:
|
|
|
|
case VIR_ARCH_CRIS:
|
|
|
|
case VIR_ARCH_ITANIUM:
|
|
|
|
case VIR_ARCH_LM32:
|
|
|
|
case VIR_ARCH_M68K:
|
|
|
|
case VIR_ARCH_MICROBLAZE:
|
|
|
|
case VIR_ARCH_MICROBLAZEEL:
|
|
|
|
case VIR_ARCH_MIPS:
|
|
|
|
case VIR_ARCH_MIPSEL:
|
|
|
|
case VIR_ARCH_MIPS64:
|
|
|
|
case VIR_ARCH_MIPS64EL:
|
|
|
|
case VIR_ARCH_OR32:
|
|
|
|
case VIR_ARCH_PARISC:
|
|
|
|
case VIR_ARCH_PARISC64:
|
|
|
|
case VIR_ARCH_PPCLE:
|
|
|
|
case VIR_ARCH_UNICORE32:
|
|
|
|
case VIR_ARCH_XTENSA:
|
|
|
|
case VIR_ARCH_XTENSAEB:
|
|
|
|
case VIR_ARCH_NONE:
|
|
|
|
case VIR_ARCH_LAST:
|
2013-04-22 12:16:13 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-02 08:13:33 +00:00
|
|
|
if (addDefaultUSB &&
|
2015-10-20 16:08:56 +00:00
|
|
|
virDomainControllerFind(def, VIR_DOMAIN_CONTROLLER_TYPE_USB, 0) < 0 &&
|
|
|
|
virDomainDefAddUSBController(def, 0, usbModel) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2013-08-02 08:13:33 +00:00
|
|
|
|
qemu: fix handling of default/implicit devices for q35
This patch adds in special handling for a few devices that need to be
treated differently for q35 domains:
usb - there is no implicit/default usb controller for the q35
machinetype. This is done because normally the default usb controller
is added to a domain by just adding "-usb" to the qemu commandline,
and it's assumed that this will add a single piix3 usb1 controller at
slot 1 function 2. That's not what happens when the machinetype is
q35, though. Instead, adding -usb to the commandline adds 3 usb
(version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather
than having
<controller type='usb' index='0'/>
translate into 3 separate devices on the PCI bus, it's cleaner to not
automatically add a default usb device; one can always be added
explicitly if desired. Or we may decide that on q35 machines, 3 usb
controllers will be automatically added when none is given. But for
this initial commit, at least we aren't locking ourselves into
something we later won't want.
video - qemu always initializes the primary video device immediately
after any integrated devices for the machinetype. Unless instructed
otherwise (by using "-device vga..." instead of "-vga" which libvirt
uses in many cases to work around deficiencies and bugs in various
qemu versions) qemu will always pick the first unused slot. In the
case of the "pc" machinetype and its derivatives, this is always slot
2, but on q35 machinetypes, the first free slot is slot 1 (since the
q35's integrated peripheral devices are placed in other slots,
e.g. slot 0x1f). In order to make the PCI address of the video device
predictable, that slot (1 or 2, depending on machinetype) is reserved
even when no video device has been specified.
sata - a q35 machine always has a sata controller implicitly added at
slot 0x1F, function 2. There is no way to avoid this controller, so we
always add it. Note that the xml2xml tests for the pcie-root and q35
cases were changed to use DO_TEST_DIFFERENT() so that we can check for
the sata controller being automatically added. This is especially
important because we can't check for it in the xml2argv output (it has
no effect on that output since it's an implicit device).
ide - q35 has no ide controllers.
isa and smbus controllers - these two are always present in a q35 (at
slot 0x1F functions 0 and 3) but we have no way of modelling them in
our config. We do need to reserve those functions so that the user
doesn't attempt to put anything else there though. (note that the "pc"
machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
|
|
|
if (addImplicitSATA &&
|
|
|
|
virDomainDefMaybeAddController(
|
|
|
|
def, VIR_DOMAIN_CONTROLLER_TYPE_SATA, 0, -1) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
qemu: fix handling of default/implicit devices for q35
This patch adds in special handling for a few devices that need to be
treated differently for q35 domains:
usb - there is no implicit/default usb controller for the q35
machinetype. This is done because normally the default usb controller
is added to a domain by just adding "-usb" to the qemu commandline,
and it's assumed that this will add a single piix3 usb1 controller at
slot 1 function 2. That's not what happens when the machinetype is
q35, though. Instead, adding -usb to the commandline adds 3 usb
(version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather
than having
<controller type='usb' index='0'/>
translate into 3 separate devices on the PCI bus, it's cleaner to not
automatically add a default usb device; one can always be added
explicitly if desired. Or we may decide that on q35 machines, 3 usb
controllers will be automatically added when none is given. But for
this initial commit, at least we aren't locking ourselves into
something we later won't want.
video - qemu always initializes the primary video device immediately
after any integrated devices for the machinetype. Unless instructed
otherwise (by using "-device vga..." instead of "-vga" which libvirt
uses in many cases to work around deficiencies and bugs in various
qemu versions) qemu will always pick the first unused slot. In the
case of the "pc" machinetype and its derivatives, this is always slot
2, but on q35 machinetypes, the first free slot is slot 1 (since the
q35's integrated peripheral devices are placed in other slots,
e.g. slot 0x1f). In order to make the PCI address of the video device
predictable, that slot (1 or 2, depending on machinetype) is reserved
even when no video device has been specified.
sata - a q35 machine always has a sata controller implicitly added at
slot 0x1F, function 2. There is no way to avoid this controller, so we
always add it. Note that the xml2xml tests for the pcie-root and q35
cases were changed to use DO_TEST_DIFFERENT() so that we can check for
the sata controller being automatically added. This is especially
important because we can't check for it in the xml2argv output (it has
no effect on that output since it's an implicit device).
ide - q35 has no ide controllers.
isa and smbus controllers - these two are always present in a q35 (at
slot 0x1F functions 0 and 3) but we have no way of modelling them in
our config. We do need to reserve those functions so that the user
doesn't attempt to put anything else there though. (note that the "pc"
machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
|
|
|
|
2016-04-19 21:05:54 +00:00
|
|
|
pciRoot = virDomainControllerFind(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0);
|
|
|
|
|
2015-08-11 18:56:21 +00:00
|
|
|
/* NB: any machine that sets addPCIRoot to true must also return
|
|
|
|
* true from the function qemuDomainSupportsPCI().
|
|
|
|
*/
|
2016-04-19 21:05:54 +00:00
|
|
|
if (addPCIRoot) {
|
|
|
|
if (pciRoot >= 0) {
|
|
|
|
if (def->controllers[pciRoot]->model != VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT) {
|
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("The PCI controller with index='0' must be "
|
|
|
|
"model='pci-root' for this machine type, "
|
|
|
|
"but model='%s' was found instead"),
|
|
|
|
virDomainControllerModelPCITypeToString(def->controllers[pciRoot]->model));
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-04-19 21:05:54 +00:00
|
|
|
}
|
|
|
|
} else if (!virDomainDefAddController(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0,
|
|
|
|
VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT)) {
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-04-19 21:05:54 +00:00
|
|
|
}
|
|
|
|
}
|
2013-04-22 12:16:13 +00:00
|
|
|
|
qemu: add dmi-to-pci-bridge controller
This PCI controller, named "dmi-to-pci-bridge" in the libvirt config,
and implemented with qemu's "i82801b11-bridge" device, connects to a
PCI Express slot (e.g. one of the slots provided by the pcie-root
controller, aka "pcie.0" on the qemu commandline), and provides 31
*non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31.
Any time a machine is defined which has a pcie-root controller
(i.e. any q35-based machinetype), libvirt will automatically add a
dmi-to-pci-bridge controller if one doesn't exist, and also add a
pci-bridge controller. The reasoning here is that any useful domain
will have either an immediate (startup time) or eventual (subsequent
hot-plug) need for a standard PCI slot; since the pcie-root controller
only provides PCIe slots, we need to connect a dmi-to-pci-bridge
controller to it in order to get a non-hot-plug PCI slot that we can
then use to connect a pci-bridge - the slots provided by the
pci-bridge will be both standard PCI and hot-pluggable.
Since pci-bridge devices themselves can not be hot-plugged into a
running system (although you can hot-plug other devices into a
pci-bridge's slots), any new pci-bridge controller that is added can
(and will) be plugged into the dmi-to-pci-bridge as long as it has
empty slots available.
This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST"
to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit
the automatically added dmi-to-pci-bridge and pci-bridge devices, and
the "after" xml can include it - this way we are testing if libvirt is
properly adding these devices.
2013-07-31 01:37:32 +00:00
|
|
|
/* When a machine has a pcie-root, make sure that there is always
|
|
|
|
* a dmi-to-pci-bridge controller added as bus 1, and a pci-bridge
|
|
|
|
* as bus 2, so that standard PCI devices can be connected
|
2015-08-11 18:56:21 +00:00
|
|
|
*
|
|
|
|
* NB: any machine that sets addPCIeRoot to true must also return
|
|
|
|
* true from the function qemuDomainSupportsPCI().
|
qemu: add dmi-to-pci-bridge controller
This PCI controller, named "dmi-to-pci-bridge" in the libvirt config,
and implemented with qemu's "i82801b11-bridge" device, connects to a
PCI Express slot (e.g. one of the slots provided by the pcie-root
controller, aka "pcie.0" on the qemu commandline), and provides 31
*non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31.
Any time a machine is defined which has a pcie-root controller
(i.e. any q35-based machinetype), libvirt will automatically add a
dmi-to-pci-bridge controller if one doesn't exist, and also add a
pci-bridge controller. The reasoning here is that any useful domain
will have either an immediate (startup time) or eventual (subsequent
hot-plug) need for a standard PCI slot; since the pcie-root controller
only provides PCIe slots, we need to connect a dmi-to-pci-bridge
controller to it in order to get a non-hot-plug PCI slot that we can
then use to connect a pci-bridge - the slots provided by the
pci-bridge will be both standard PCI and hot-pluggable.
Since pci-bridge devices themselves can not be hot-plugged into a
running system (although you can hot-plug other devices into a
pci-bridge's slots), any new pci-bridge controller that is added can
(and will) be plugged into the dmi-to-pci-bridge as long as it has
empty slots available.
This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST"
to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit
the automatically added dmi-to-pci-bridge and pci-bridge devices, and
the "after" xml can include it - this way we are testing if libvirt is
properly adding these devices.
2013-07-31 01:37:32 +00:00
|
|
|
*/
|
|
|
|
if (addPCIeRoot) {
|
2016-04-19 21:05:54 +00:00
|
|
|
if (pciRoot >= 0) {
|
|
|
|
if (def->controllers[pciRoot]->model != VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT) {
|
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("The PCI controller with index='0' must be "
|
|
|
|
"model='pcie-root' for this machine type, "
|
|
|
|
"but model='%s' was found instead"),
|
|
|
|
virDomainControllerModelPCITypeToString(def->controllers[pciRoot]->model));
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-04-19 21:05:54 +00:00
|
|
|
}
|
|
|
|
} else if (!virDomainDefAddController(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0,
|
|
|
|
VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT)) {
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2016-04-19 21:05:54 +00:00
|
|
|
}
|
qemu: add dmi-to-pci-bridge controller
This PCI controller, named "dmi-to-pci-bridge" in the libvirt config,
and implemented with qemu's "i82801b11-bridge" device, connects to a
PCI Express slot (e.g. one of the slots provided by the pcie-root
controller, aka "pcie.0" on the qemu commandline), and provides 31
*non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31.
Any time a machine is defined which has a pcie-root controller
(i.e. any q35-based machinetype), libvirt will automatically add a
dmi-to-pci-bridge controller if one doesn't exist, and also add a
pci-bridge controller. The reasoning here is that any useful domain
will have either an immediate (startup time) or eventual (subsequent
hot-plug) need for a standard PCI slot; since the pcie-root controller
only provides PCIe slots, we need to connect a dmi-to-pci-bridge
controller to it in order to get a non-hot-plug PCI slot that we can
then use to connect a pci-bridge - the slots provided by the
pci-bridge will be both standard PCI and hot-pluggable.
Since pci-bridge devices themselves can not be hot-plugged into a
running system (although you can hot-plug other devices into a
pci-bridge's slots), any new pci-bridge controller that is added can
(and will) be plugged into the dmi-to-pci-bridge as long as it has
empty slots available.
This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST"
to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit
the automatically added dmi-to-pci-bridge and pci-bridge devices, and
the "after" xml can include it - this way we are testing if libvirt is
properly adding these devices.
2013-07-31 01:37:32 +00:00
|
|
|
}
|
2013-08-17 00:33:23 +00:00
|
|
|
|
2013-07-30 19:41:14 +00:00
|
|
|
if (addDefaultMemballoon && !def->memballoon) {
|
2013-08-17 00:33:23 +00:00
|
|
|
virDomainMemballoonDefPtr memballoon;
|
2020-10-05 10:26:10 +00:00
|
|
|
memballoon = g_new0(virDomainMemballoonDef, 1);
|
2013-08-17 00:33:23 +00:00
|
|
|
|
|
|
|
memballoon->model = VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO;
|
|
|
|
def->memballoon = memballoon;
|
|
|
|
}
|
|
|
|
|
2019-01-17 17:52:38 +00:00
|
|
|
if (STRPREFIX(def->os.machine, "s390-virtio") &&
|
|
|
|
virQEMUCapsGet(qemuCaps, QEMU_CAPS_VIRTIO_S390) && def->memballoon)
|
|
|
|
def->memballoon->model = VIR_DOMAIN_MEMBALLOON_MODEL_NONE;
|
|
|
|
|
2019-02-27 17:41:35 +00:00
|
|
|
if (addDefaultUSBMouse) {
|
|
|
|
bool hasUSBTablet = false;
|
|
|
|
size_t j;
|
|
|
|
|
|
|
|
for (j = 0; j < def->ninputs; j++) {
|
|
|
|
if (def->inputs[j]->type == VIR_DOMAIN_INPUT_TYPE_TABLET &&
|
|
|
|
def->inputs[j]->bus == VIR_DOMAIN_INPUT_BUS_USB) {
|
|
|
|
hasUSBTablet = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Historically, we have automatically added USB keyboard and
|
|
|
|
* mouse to some guests. While the former device is generally
|
|
|
|
* safe to have, adding the latter is undesiderable if a USB
|
|
|
|
* tablet is already present in the guest */
|
|
|
|
if (hasUSBTablet)
|
|
|
|
addDefaultUSBMouse = false;
|
|
|
|
}
|
|
|
|
|
2014-02-17 10:17:58 +00:00
|
|
|
if (addDefaultUSBKBD &&
|
|
|
|
def->ngraphics > 0 &&
|
|
|
|
virDomainDefMaybeAddInput(def,
|
|
|
|
VIR_DOMAIN_INPUT_TYPE_KBD,
|
|
|
|
VIR_DOMAIN_INPUT_BUS_USB) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2014-02-17 10:17:58 +00:00
|
|
|
|
|
|
|
if (addDefaultUSBMouse &&
|
|
|
|
def->ngraphics > 0 &&
|
|
|
|
virDomainDefMaybeAddInput(def,
|
|
|
|
VIR_DOMAIN_INPUT_TYPE_MOUSE,
|
|
|
|
VIR_DOMAIN_INPUT_BUS_USB) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2014-02-17 10:17:58 +00:00
|
|
|
|
2015-11-24 12:26:36 +00:00
|
|
|
if (addPanicDevice) {
|
|
|
|
size_t j;
|
|
|
|
for (j = 0; j < def->npanics; j++) {
|
|
|
|
if (def->panics[j]->model == VIR_DOMAIN_PANIC_MODEL_DEFAULT ||
|
2016-04-29 13:23:42 +00:00
|
|
|
(ARCH_IS_PPC64(def->os.arch) &&
|
|
|
|
def->panics[j]->model == VIR_DOMAIN_PANIC_MODEL_PSERIES) ||
|
|
|
|
(ARCH_IS_S390(def->os.arch) &&
|
|
|
|
def->panics[j]->model == VIR_DOMAIN_PANIC_MODEL_S390))
|
2015-11-24 12:26:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2015-05-28 14:39:13 +00:00
|
|
|
|
2015-11-24 12:26:36 +00:00
|
|
|
if (j == def->npanics) {
|
2020-10-05 10:26:10 +00:00
|
|
|
virDomainPanicDefPtr panic = g_new0(virDomainPanicDef, 1);
|
2020-10-04 19:51:15 +00:00
|
|
|
|
|
|
|
if (VIR_APPEND_ELEMENT_COPY(def->panics,
|
2015-11-24 12:26:36 +00:00
|
|
|
def->npanics, panic) < 0) {
|
|
|
|
VIR_FREE(panic);
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-11-24 12:26:36 +00:00
|
|
|
}
|
|
|
|
}
|
2015-05-28 14:39:13 +00:00
|
|
|
}
|
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2016-01-07 01:35:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-03 18:49:07 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDefEnableDefaultFeatures:
|
|
|
|
* @def: domain definition
|
2016-05-09 13:38:55 +00:00
|
|
|
* @qemuCaps: QEMU capabilities
|
2016-02-03 18:49:07 +00:00
|
|
|
*
|
|
|
|
* Make sure that features that should be enabled by default are actually
|
|
|
|
* enabled and configure default values related to those features.
|
|
|
|
*/
|
|
|
|
static void
|
2016-05-09 13:38:55 +00:00
|
|
|
qemuDomainDefEnableDefaultFeatures(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
2016-02-03 18:49:07 +00:00
|
|
|
{
|
2018-02-01 16:52:48 +00:00
|
|
|
/* The virt machine type always uses GIC: if the relevant information
|
2016-05-09 13:38:55 +00:00
|
|
|
* was not included in the domain XML, we need to choose a suitable
|
|
|
|
* GIC version ourselves */
|
2018-02-01 16:52:48 +00:00
|
|
|
if ((def->features[VIR_DOMAIN_FEATURE_GIC] == VIR_TRISTATE_SWITCH_ABSENT &&
|
2018-08-22 09:15:20 +00:00
|
|
|
qemuDomainIsARMVirt(def)) ||
|
2018-02-01 16:52:48 +00:00
|
|
|
(def->features[VIR_DOMAIN_FEATURE_GIC] == VIR_TRISTATE_SWITCH_ON &&
|
|
|
|
def->gic_version == VIR_GIC_VERSION_NONE)) {
|
|
|
|
virGICVersion version;
|
2016-05-09 13:38:55 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Looking for usable GIC version in domain capabilities");
|
|
|
|
for (version = VIR_GIC_VERSION_LAST - 1;
|
|
|
|
version > VIR_GIC_VERSION_NONE;
|
|
|
|
version--) {
|
2017-05-12 11:29:57 +00:00
|
|
|
|
|
|
|
/* We want to use the highest available GIC version for guests;
|
|
|
|
* however, the emulated GICv3 is currently lacking a MSI controller,
|
|
|
|
* making it unsuitable for the pure PCIe topology we aim for.
|
|
|
|
*
|
|
|
|
* For that reason, we skip this step entirely for TCG guests,
|
|
|
|
* and rely on the code below to pick the default version, GICv2,
|
|
|
|
* which supports all the features we need.
|
|
|
|
*
|
|
|
|
* See https://bugzilla.redhat.com/show_bug.cgi?id=1414081 */
|
|
|
|
if (version == VIR_GIC_VERSION_3 &&
|
|
|
|
def->virtType == VIR_DOMAIN_VIRT_QEMU) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-05-09 13:38:55 +00:00
|
|
|
if (virQEMUCapsSupportsGICVersion(qemuCaps,
|
|
|
|
def->virtType,
|
|
|
|
version)) {
|
|
|
|
VIR_DEBUG("Using GIC version %s",
|
|
|
|
virGICVersionTypeToString(version));
|
|
|
|
def->gic_version = version;
|
|
|
|
break;
|
|
|
|
}
|
2016-02-03 18:49:27 +00:00
|
|
|
}
|
|
|
|
|
2018-02-01 16:52:48 +00:00
|
|
|
/* Use the default GIC version (GICv2) as a last-ditch attempt
|
|
|
|
* if no match could be found above */
|
|
|
|
if (def->gic_version == VIR_GIC_VERSION_NONE) {
|
|
|
|
VIR_DEBUG("Using GIC version 2 (default)");
|
|
|
|
def->gic_version = VIR_GIC_VERSION_2;
|
|
|
|
}
|
|
|
|
|
2016-05-09 13:38:55 +00:00
|
|
|
/* Even if we haven't found a usable GIC version in the domain
|
|
|
|
* capabilities, we still want to enable this */
|
|
|
|
def->features[VIR_DOMAIN_FEATURE_GIC] = VIR_TRISTATE_SWITCH_ON;
|
2016-02-03 18:49:27 +00:00
|
|
|
}
|
2016-02-03 18:49:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-07 01:39:06 +00:00
|
|
|
static int
|
|
|
|
qemuCanonicalizeMachine(virDomainDefPtr def, virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
const char *canon;
|
|
|
|
|
2019-10-24 06:52:21 +00:00
|
|
|
if (!(canon = virQEMUCapsGetCanonicalMachine(qemuCaps, def->virtType,
|
|
|
|
def->os.machine)))
|
2016-01-07 01:39:06 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (STRNEQ(canon, def->os.machine)) {
|
|
|
|
char *tmp;
|
2019-10-20 11:49:46 +00:00
|
|
|
tmp = g_strdup(canon);
|
2016-01-07 01:39:06 +00:00
|
|
|
VIR_FREE(def->os.machine);
|
|
|
|
def->os.machine = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-19 07:35:02 +00:00
|
|
|
static int
|
2016-04-26 12:27:16 +00:00
|
|
|
qemuDomainRecheckInternalPaths(virDomainDefPtr def,
|
|
|
|
virQEMUDriverConfigPtr cfg,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
size_t i = 0;
|
2016-06-08 13:18:25 +00:00
|
|
|
size_t j = 0;
|
2016-04-26 12:27:16 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->ngraphics; ++i) {
|
|
|
|
virDomainGraphicsDefPtr graphics = def->graphics[i];
|
|
|
|
|
2016-06-08 13:18:25 +00:00
|
|
|
for (j = 0; j < graphics->nListens; ++j) {
|
|
|
|
virDomainGraphicsListenDefPtr glisten = &graphics->listens[j];
|
|
|
|
|
|
|
|
/* This will happen only if we parse XML from old libvirts where
|
|
|
|
* unix socket was available only for VNC graphics. In this
|
|
|
|
* particular case we should follow the behavior and if we remove
|
2017-02-27 16:00:15 +00:00
|
|
|
* the auto-generated socket based on config option from qemu.conf
|
2016-06-08 13:18:25 +00:00
|
|
|
* we need to change the listen type to address. */
|
|
|
|
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
|
|
|
|
glisten->type == VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_SOCKET &&
|
|
|
|
glisten->socket &&
|
2017-02-27 16:00:15 +00:00
|
|
|
!glisten->autoGenerated &&
|
2016-06-08 13:18:25 +00:00
|
|
|
STRPREFIX(glisten->socket, cfg->libDir)) {
|
|
|
|
if (flags & VIR_DOMAIN_DEF_PARSE_INACTIVE) {
|
|
|
|
VIR_FREE(glisten->socket);
|
|
|
|
glisten->type = VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS;
|
|
|
|
} else {
|
|
|
|
glisten->fromConfig = true;
|
|
|
|
}
|
2016-05-19 07:35:02 +00:00
|
|
|
}
|
2016-04-26 12:27:16 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-19 07:35:02 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-04-26 12:27:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefVcpusPostParse(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
unsigned int maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
virDomainVcpuDefPtr prevvcpu;
|
|
|
|
size_t i;
|
|
|
|
bool has_order = false;
|
|
|
|
|
|
|
|
/* vcpu 0 needs to be present, first, and non-hotpluggable */
|
|
|
|
vcpu = virDomainDefGetVcpu(def, 0);
|
|
|
|
if (!vcpu->online) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("vcpu 0 can't be offline"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (vcpu->hotpluggable == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("vcpu0 can't be hotpluggable"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (vcpu->order != 0 && vcpu->order != 1) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("vcpu0 must be enabled first"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->order != 0)
|
|
|
|
has_order = true;
|
|
|
|
|
|
|
|
prevvcpu = vcpu;
|
|
|
|
|
|
|
|
/* all online vcpus or non online vcpu need to have order set */
|
|
|
|
for (i = 1; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
|
|
|
|
if (vcpu->online &&
|
|
|
|
(vcpu->order != 0) != has_order) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("all vcpus must have either set or unset order"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* few conditions for non-hotpluggable (thus online) vcpus */
|
|
|
|
if (vcpu->hotpluggable == VIR_TRISTATE_BOOL_NO) {
|
|
|
|
/* they can be ordered only at the beginning */
|
|
|
|
if (prevvcpu->hotpluggable == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("online non-hotpluggable vcpus need to be "
|
|
|
|
"ordered prior to hotplugable vcpus"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* they need to be in order (qemu doesn't support any order yet).
|
|
|
|
* Also note that multiple vcpus may share order on some platforms */
|
|
|
|
if (prevvcpu->order > vcpu->order) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("online non-hotpluggable vcpus must be ordered "
|
|
|
|
"in ascending order"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
prevvcpu = vcpu;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-26 16:42:02 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefSetDefaultCPU(virDomainDefPtr def,
|
2019-11-26 17:51:22 +00:00
|
|
|
virArch hostarch,
|
2019-09-26 16:42:02 +00:00
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
const char *model;
|
|
|
|
|
|
|
|
if (def->cpu &&
|
|
|
|
(def->cpu->mode != VIR_CPU_MODE_CUSTOM ||
|
|
|
|
def->cpu->model))
|
|
|
|
return 0;
|
|
|
|
|
2020-02-25 15:05:06 +00:00
|
|
|
if (!virCPUArchIsSupported(def->os.arch))
|
|
|
|
return 0;
|
|
|
|
|
2019-09-26 16:42:02 +00:00
|
|
|
/* Default CPU model info from QEMU is usable for TCG only except for
|
|
|
|
* x86, s390, and ppc64. */
|
|
|
|
if (!ARCH_IS_X86(def->os.arch) &&
|
|
|
|
!ARCH_IS_S390(def->os.arch) &&
|
|
|
|
!ARCH_IS_PPC64(def->os.arch) &&
|
|
|
|
def->virtType != VIR_DOMAIN_VIRT_QEMU)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
model = virQEMUCapsGetMachineDefaultCPU(qemuCaps, def->os.machine, def->virtType);
|
|
|
|
if (!model) {
|
|
|
|
VIR_DEBUG("Unknown default CPU model for domain '%s'", def->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (STREQ(model, "host") && def->virtType != VIR_DOMAIN_VIRT_KVM) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("QEMU reports invalid default CPU model \"host\" "
|
|
|
|
"for non-kvm domain virt type"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!def->cpu)
|
2019-11-29 11:00:26 +00:00
|
|
|
def->cpu = virCPUDefNew();
|
2019-09-26 16:42:02 +00:00
|
|
|
|
|
|
|
def->cpu->type = VIR_CPU_TYPE_GUEST;
|
|
|
|
|
|
|
|
if (STREQ(model, "host")) {
|
2019-11-12 14:28:33 +00:00
|
|
|
if (ARCH_IS_S390(def->os.arch) &&
|
2019-11-26 17:51:22 +00:00
|
|
|
virQEMUCapsIsCPUModeSupported(qemuCaps, hostarch, def->virtType,
|
2020-02-05 14:51:09 +00:00
|
|
|
VIR_CPU_MODE_HOST_MODEL,
|
|
|
|
def->os.machine)) {
|
2019-11-12 14:28:33 +00:00
|
|
|
def->cpu->mode = VIR_CPU_MODE_HOST_MODEL;
|
|
|
|
} else {
|
|
|
|
def->cpu->mode = VIR_CPU_MODE_HOST_PASSTHROUGH;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Setting default CPU mode for domain '%s' to %s",
|
|
|
|
def->name, virCPUModeTypeToString(def->cpu->mode));
|
2019-09-26 16:42:02 +00:00
|
|
|
} else {
|
2019-11-12 14:28:33 +00:00
|
|
|
/* We need to turn off all CPU checks when the domain is started
|
|
|
|
* because the default CPU (e.g., qemu64) may not be runnable on any
|
|
|
|
* host. QEMU will just disable the unavailable features and we will
|
|
|
|
* update the CPU definition accordingly and set check to FULL when
|
|
|
|
* starting the domain. */
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_NONE;
|
2019-09-26 16:42:02 +00:00
|
|
|
def->cpu->mode = VIR_CPU_MODE_CUSTOM;
|
|
|
|
def->cpu->match = VIR_CPU_MATCH_EXACT;
|
|
|
|
def->cpu->fallback = VIR_CPU_FALLBACK_FORBID;
|
|
|
|
def->cpu->model = g_strdup(model);
|
2019-11-12 14:28:33 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Setting default CPU model for domain '%s' to %s",
|
|
|
|
def->name, model);
|
2019-09-26 16:42:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-03-01 15:12:07 +00:00
|
|
|
static int
|
2020-06-02 13:34:07 +00:00
|
|
|
qemuDomainDefCPUPostParse(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
2017-03-01 15:12:07 +00:00
|
|
|
{
|
2019-10-11 08:05:59 +00:00
|
|
|
virCPUFeatureDefPtr sveFeature = NULL;
|
|
|
|
bool sveVectorLengthsProvided = false;
|
|
|
|
size_t i;
|
|
|
|
|
2017-03-01 15:12:07 +00:00
|
|
|
if (!def->cpu)
|
|
|
|
return 0;
|
|
|
|
|
2017-04-25 17:07:19 +00:00
|
|
|
if (def->cpu->cache) {
|
|
|
|
virCPUCacheDefPtr cache = def->cpu->cache;
|
|
|
|
|
|
|
|
if (!ARCH_IS_X86(def->os.arch)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("CPU cache specification is not supported "
|
|
|
|
"for '%s' architecture"),
|
|
|
|
virArchToString(def->os.arch));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cache->mode) {
|
|
|
|
case VIR_CPU_CACHE_MODE_EMULATE:
|
|
|
|
if (cache->level != 3) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("CPU cache mode '%s' can only be used with "
|
|
|
|
"level='3'"),
|
|
|
|
virCPUCacheModeTypeToString(cache->mode));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_CPU_CACHE_MODE_PASSTHROUGH:
|
|
|
|
if (def->cpu->mode != VIR_CPU_MODE_HOST_PASSTHROUGH) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("CPU cache mode '%s' can only be used with "
|
|
|
|
"'%s' CPUs"),
|
|
|
|
virCPUCacheModeTypeToString(cache->mode),
|
|
|
|
virCPUModeTypeToString(VIR_CPU_MODE_HOST_PASSTHROUGH));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cache->level != -1) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("unsupported CPU cache level for mode '%s'"),
|
|
|
|
virCPUCacheModeTypeToString(cache->mode));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_CPU_CACHE_MODE_DISABLE:
|
|
|
|
if (cache->level != -1) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("unsupported CPU cache level for mode '%s'"),
|
|
|
|
virCPUCacheModeTypeToString(cache->mode));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_CPU_CACHE_MODE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-11 08:05:59 +00:00
|
|
|
for (i = 0; i < def->cpu->nfeatures; i++) {
|
|
|
|
virCPUFeatureDefPtr feature = &def->cpu->features[i];
|
|
|
|
|
|
|
|
if (STREQ(feature->name, "sve")) {
|
|
|
|
sveFeature = feature;
|
|
|
|
} else if (STRPREFIX(feature->name, "sve")) {
|
|
|
|
sveVectorLengthsProvided = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sveVectorLengthsProvided) {
|
|
|
|
if (sveFeature) {
|
|
|
|
if (sveFeature->policy == VIR_CPU_FEATURE_DISABLE ||
|
|
|
|
sveFeature->policy == VIR_CPU_FEATURE_FORBID) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("SVE disabled, but SVE vector lengths provided"));
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
sveFeature->policy = VIR_CPU_FEATURE_REQUIRE;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (VIR_RESIZE_N(def->cpu->features, def->cpu->nfeatures_max,
|
|
|
|
def->cpu->nfeatures, 1) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
def->cpu->features[def->cpu->nfeatures].name = g_strdup("sve");
|
|
|
|
def->cpu->features[def->cpu->nfeatures].policy = VIR_CPU_FEATURE_REQUIRE;
|
|
|
|
|
|
|
|
def->cpu->nfeatures++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-15 09:33:05 +00:00
|
|
|
/* Running domains were either started before QEMU_CAPS_CPU_MIGRATABLE was
|
|
|
|
* introduced and thus we can't rely on it or they already have the
|
|
|
|
* migratable default set. */
|
|
|
|
if (def->id == -1 &&
|
|
|
|
qemuCaps &&
|
2020-06-02 13:34:07 +00:00
|
|
|
def->cpu->mode == VIR_CPU_MODE_HOST_PASSTHROUGH &&
|
2020-07-15 09:33:05 +00:00
|
|
|
def->cpu->migratable == VIR_TRISTATE_SWITCH_ABSENT) {
|
2020-06-02 13:34:07 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_CPU_MIGRATABLE))
|
|
|
|
def->cpu->migratable = VIR_TRISTATE_SWITCH_ON;
|
|
|
|
else if (ARCH_IS_X86(def->os.arch))
|
|
|
|
def->cpu->migratable = VIR_TRISTATE_SWITCH_OFF;
|
|
|
|
}
|
|
|
|
|
2017-03-01 15:12:07 +00:00
|
|
|
/* Nothing to be done if only CPU topology is specified. */
|
|
|
|
if (def->cpu->mode == VIR_CPU_MODE_CUSTOM &&
|
|
|
|
!def->cpu->model)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (def->cpu->check != VIR_CPU_CHECK_DEFAULT)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch ((virCPUMode) def->cpu->mode) {
|
|
|
|
case VIR_CPU_MODE_HOST_PASSTHROUGH:
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_NONE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_CPU_MODE_HOST_MODEL:
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_PARTIAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_CPU_MODE_CUSTOM:
|
|
|
|
/* Custom CPUs in TCG mode are not compared to host CPU by default. */
|
|
|
|
if (def->virtType == VIR_DOMAIN_VIRT_QEMU)
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_NONE;
|
|
|
|
else
|
|
|
|
def->cpu->check = VIR_CPU_CHECK_PARTIAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_CPU_MODE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-10 21:37:18 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefTsegPostParse(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
if (def->features[VIR_DOMAIN_FEATURE_SMM] != VIR_TRISTATE_SWITCH_ON)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!def->tseg_specified)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!qemuDomainIsQ35(def)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("SMM TSEG is only supported with q35 machine type"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MCH_EXTENDED_TSEG_MBYTES)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Setting TSEG size is not supported with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (def->tseg_size & ((1 << 20) - 1)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("SMM TSEG size must be divisible by 1 MiB"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-06-10 18:35:51 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDefNumaCPUsRectify:
|
|
|
|
* @numa: pointer to numa definition
|
|
|
|
* @maxCpus: number of CPUs this numa is supposed to have
|
|
|
|
*
|
|
|
|
* This function emulates the (to be deprecated) behavior of filling
|
|
|
|
* up in node0 with the remaining CPUs, in case of an incomplete NUMA
|
|
|
|
* setup, up to getVcpusMax.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, -1 on error
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainDefNumaCPUsRectify(virDomainDefPtr def, virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
unsigned int vcpusMax, numacpus;
|
|
|
|
|
|
|
|
/* QEMU_CAPS_NUMA tells us if QEMU is able to handle disjointed
|
|
|
|
* NUMA CPU ranges. The filling process will create a disjointed
|
|
|
|
* setup in node0 most of the time. Do not proceed if QEMU
|
|
|
|
* can't handle it.*/
|
|
|
|
if (virDomainNumaGetNodeCount(def->numa) == 0 ||
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_NUMA))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vcpusMax = virDomainDefGetVcpusMax(def);
|
|
|
|
numacpus = virDomainNumaGetCPUCountTotal(def->numa);
|
|
|
|
|
|
|
|
if (numacpus < vcpusMax) {
|
|
|
|
if (virDomainNumaFillCPUsInNode(def->numa, 0, vcpusMax) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDefNumaCPUsPostParse(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
return qemuDomainDefNumaCPUsRectify(def, qemuCaps);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-06-10 18:11:48 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefTPMsPostParse(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
virDomainTPMDefPtr proxyTPM = NULL;
|
|
|
|
virDomainTPMDefPtr regularTPM = NULL;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < def->ntpms; i++) {
|
|
|
|
virDomainTPMDefPtr tpm = def->tpms[i];
|
|
|
|
|
2020-07-09 20:46:18 +00:00
|
|
|
/* TPM 1.2 and 2 are not compatible, so we choose a specific version here */
|
2020-07-09 20:46:19 +00:00
|
|
|
if (tpm->version == VIR_DOMAIN_TPM_VERSION_DEFAULT) {
|
2020-07-09 20:46:20 +00:00
|
|
|
if (tpm->model == VIR_DOMAIN_TPM_MODEL_SPAPR ||
|
|
|
|
tpm->model == VIR_DOMAIN_TPM_MODEL_CRB)
|
2020-07-09 20:46:19 +00:00
|
|
|
tpm->version = VIR_DOMAIN_TPM_VERSION_2_0;
|
|
|
|
else
|
|
|
|
tpm->version = VIR_DOMAIN_TPM_VERSION_1_2;
|
|
|
|
}
|
2020-07-09 20:46:18 +00:00
|
|
|
|
2020-06-10 18:11:48 +00:00
|
|
|
if (tpm->model == VIR_DOMAIN_TPM_MODEL_SPAPR_PROXY) {
|
|
|
|
if (proxyTPM) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("only a single TPM Proxy device is supported"));
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
proxyTPM = tpm;
|
|
|
|
}
|
|
|
|
} else if (regularTPM) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("only a single TPM non-proxy device is supported"));
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
regularTPM = tpm;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-15 13:16:20 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefPostParseBasic(virDomainDefPtr def,
|
2019-10-14 12:45:33 +00:00
|
|
|
void *opaque G_GNUC_UNUSED)
|
2017-08-15 13:16:20 +00:00
|
|
|
{
|
2019-11-26 16:53:53 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
|
2017-08-15 13:16:20 +00:00
|
|
|
/* check for emulator and create a default one if needed */
|
2019-11-26 16:53:53 +00:00
|
|
|
if (!def->emulator) {
|
|
|
|
if (!(def->emulator = virQEMUCapsGetDefaultEmulator(
|
2019-12-09 16:34:45 +00:00
|
|
|
driver->hostarch, def->os.arch))) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("No emulator found for arch '%s'"),
|
|
|
|
virArchToString(def->os.arch));
|
2019-11-26 16:53:53 +00:00
|
|
|
return 1;
|
2019-12-09 16:34:45 +00:00
|
|
|
}
|
2019-11-26 16:53:53 +00:00
|
|
|
}
|
2017-08-15 13:16:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-01-07 01:35:36 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefPostParse(virDomainDefPtr def,
|
2016-04-26 12:27:16 +00:00
|
|
|
unsigned int parseFlags,
|
2016-09-22 14:41:33 +00:00
|
|
|
void *opaque,
|
2019-12-10 11:35:43 +00:00
|
|
|
void *parseOpaque)
|
2016-01-07 01:35:36 +00:00
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
2019-09-26 15:47:53 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2019-12-10 11:35:43 +00:00
|
|
|
virQEMUCapsPtr qemuCaps = parseOpaque;
|
2019-12-03 10:49:49 +00:00
|
|
|
|
2019-12-10 11:35:43 +00:00
|
|
|
/* Note that qemuCaps may be NULL when this function is called. This
|
|
|
|
* function shall not fail in that case. It will be re-run on VM startup
|
|
|
|
* with the capabilities populated.
|
|
|
|
*/
|
|
|
|
if (!qemuCaps)
|
2019-12-03 10:49:49 +00:00
|
|
|
return 1;
|
|
|
|
|
2016-01-07 01:35:36 +00:00
|
|
|
if (def->os.bootloader || def->os.bootloaderArgs) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("bootloader is not supported by QEMU"));
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2016-01-07 01:35:36 +00:00
|
|
|
}
|
|
|
|
|
2016-02-25 15:21:41 +00:00
|
|
|
if (!def->os.machine) {
|
2019-12-03 10:49:49 +00:00
|
|
|
const char *machine = virQEMUCapsGetPreferredMachine(qemuCaps,
|
|
|
|
def->virtType);
|
2020-04-16 12:18:28 +00:00
|
|
|
if (!machine) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("could not get preferred machine for %s type=%s"),
|
|
|
|
def->emulator,
|
|
|
|
virDomainVirtTypeToString(def->virtType));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-12-03 10:49:49 +00:00
|
|
|
def->os.machine = g_strdup(machine);
|
2016-02-25 15:21:41 +00:00
|
|
|
}
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
qemuDomainNVRAMPathGenerate(cfg, def);
|
2016-03-09 15:10:54 +00:00
|
|
|
|
2016-01-07 01:35:36 +00:00
|
|
|
if (qemuDomainDefAddDefaultDevices(def, qemuCaps) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2016-01-07 01:35:36 +00:00
|
|
|
|
2016-01-07 01:39:06 +00:00
|
|
|
if (qemuCanonicalizeMachine(def, qemuCaps) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2016-01-07 01:39:06 +00:00
|
|
|
|
2019-11-26 17:51:22 +00:00
|
|
|
if (qemuDomainDefSetDefaultCPU(def, driver->hostarch, qemuCaps) < 0)
|
2019-09-26 16:42:02 +00:00
|
|
|
return -1;
|
|
|
|
|
2016-05-09 13:38:55 +00:00
|
|
|
qemuDomainDefEnableDefaultFeatures(def, qemuCaps);
|
2016-02-03 18:49:07 +00:00
|
|
|
|
2016-05-19 07:35:02 +00:00
|
|
|
if (qemuDomainRecheckInternalPaths(def, cfg, parseFlags) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2016-04-26 12:27:16 +00:00
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityVerify(driver->securityManager, def) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2016-01-07 01:43:15 +00:00
|
|
|
|
2016-08-04 12:36:24 +00:00
|
|
|
if (qemuDomainDefVcpusPostParse(def) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2016-08-04 12:36:24 +00:00
|
|
|
|
2020-06-02 13:34:07 +00:00
|
|
|
if (qemuDomainDefCPUPostParse(def, qemuCaps) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2017-03-01 15:12:07 +00:00
|
|
|
|
2018-05-10 21:37:18 +00:00
|
|
|
if (qemuDomainDefTsegPostParse(def, qemuCaps) < 0)
|
2019-09-26 15:47:53 +00:00
|
|
|
return -1;
|
2018-05-10 21:37:18 +00:00
|
|
|
|
2020-06-10 18:35:51 +00:00
|
|
|
if (qemuDomainDefNumaCPUsPostParse(def, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-06-10 18:11:48 +00:00
|
|
|
if (qemuDomainDefTPMsPostParse(def) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-09-26 15:47:53 +00:00
|
|
|
return 0;
|
2013-03-11 11:12:08 +00:00
|
|
|
}
|
|
|
|
|
2016-05-17 13:56:51 +00:00
|
|
|
|
2019-09-12 22:25:21 +00:00
|
|
|
int
|
|
|
|
qemuDomainValidateActualNetDef(const virDomainNetDef *net,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Validations that can only be properly checked at runtime (after
|
|
|
|
* an <interface type='network'> has been resolved to its actual
|
|
|
|
* type.
|
|
|
|
*
|
|
|
|
* (In its current form this function can still be called before
|
|
|
|
* the actual type has been resolved (e.g. at domain definition
|
|
|
|
* time), but only if the validations would SUCCEED for
|
|
|
|
* type='network'.)
|
|
|
|
*/
|
2019-09-27 15:47:21 +00:00
|
|
|
char macstr[VIR_MAC_STRING_BUFLEN];
|
2019-09-12 22:25:21 +00:00
|
|
|
virDomainNetType actualType = virDomainNetGetActualType(net);
|
|
|
|
|
2019-09-27 15:47:21 +00:00
|
|
|
virMacAddrFormat(&net->mac, macstr);
|
|
|
|
|
conf: add hypervisor agnostic, domain start-time, validation function for NetDef
<interface> devices (virDomainNetDef) are a bit different from other
types of devices in that their actual type may come from a network (in
the form of a port connection), and that doesn't happen until the
domain is started. This means that any validation of an <interface> at
parse time needs to be a bit liberal in what it accepts - when
type='network', you could think that something is/isn't allowed, but
once the domain is started and a port is created by the configured
network, the opposite might be true.
To solve this problem hypervisor drivers need to do an extra
validation step when the domain is being started. I recently (commit
3cff23f7, libvirt 5.7.0) added a function to peform such validation
for all interfaces to the QEMU driver -
qemuDomainValidateActualNetDef() - but while that function is a good
single point to call for the multiple places that need to "start" an
interface (domain startup, device hotplug, device update), it can't be
called by the other hypervisor drivers, since 1) it's in the QEMU
driver, and 2) it contains some checks specific to QEMU. For
validation that applies to network devices on *all* hypervisors, we
need yet another interface validation function that can be called by
any hypervisor driver (not just QEMU) right after its network port has
been created during domain startup or hotplug. This patch adds that
function - virDomainActualNetDefValidate(), in the conf directory,
and calls it in appropriate places in the QEMU, lxc, and libxl
drivers.
This new function is the place to put all network device validation
that 1) is hypervisor agnostic, and 2) can't be done until we know the
"actual type" of an interface.
There is no framework for validation at domain startup as there is for
post-parse validation, but I don't want to create a whole elaborate
system that will only be used by one type of device. For that reason,
I just made a single function that should be called directly from the
hypervisors, when they are initializing interfaces to start a domain,
right after conditionally allocating the network port (and regardless
of whether or not that was actually needed). In the case of the QEMU
driver, qemuDomainValidateActualNetDef() is already called in all the
appropriate places, so we can just call the new function from
there. In the case of the other hypervisors, we search for
virDomainNetAllocateActualDevice() (which is the hypervisor-agnostic
function that calls virNetworkPortCreateXML()), and add the call to our
new function right after that.
The new function itself could be plunked down into many places in the
code, but we already have 3 validation functions for network devices
in 2 different places (not counting any basic validation done in
virDomainNetDefParseXML() itself):
1) post-parse hypervisor-agnostic
(virDomainNetDefValidate() - domain_conf.c:6145)
2) post-parse hypervisor-specific
(qemuDomainDeviceDefValidateNetwork() - qemu_domain.c:5498)
3) domain-start hypervisor-specific
(qemuDomainValidateActualNetDef() - qemu_domain.c:5390)
I placed (3) right next to (2) when I added it, specifically to avoid
spreading validation all over the code. For the same reason, I decided
to put this new function right next to (1) - this way if someone needs
to add validation specific to qemu, they go to one location, and if
they need to add validation applying to everyone, they go to the
other. It looks a bit strange to have a public function in between a
bunch of statics, but I think it's better than the alternative of
further fragmentation. (I'm open to other ideas though, of course.)
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Cole Robinson <crobinso@redhat.com>
2019-10-18 19:48:13 +00:00
|
|
|
/* hypervisor-agnostic validation */
|
|
|
|
if (virDomainActualNetDefValidate(net) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* QEMU-specific validation */
|
|
|
|
|
2019-09-12 22:25:21 +00:00
|
|
|
/* Only tap/macvtap devices support multiqueue. */
|
|
|
|
if (net->driver.virtio.queues > 0) {
|
|
|
|
|
|
|
|
if (!(actualType == VIR_DOMAIN_NET_TYPE_NETWORK ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_BRIDGE ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_DIRECT ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_ETHERNET ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_VHOSTUSER)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2019-09-27 15:47:21 +00:00
|
|
|
_("interface %s - multiqueue is not supported for network interfaces of type %s"),
|
|
|
|
macstr, virDomainNetTypeToString(actualType));
|
2019-09-12 22:25:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (net->driver.virtio.queues > 1 &&
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_VHOSTUSER &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_VHOSTUSER_MULTIQUEUE)) {
|
2019-09-27 15:47:21 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("interface %s - multiqueue is not supported for network interfaces of type vhost-user with this QEMU binary"),
|
|
|
|
macstr);
|
2019-09-12 22:25:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only standard tap devices support nwfilter rules, and even then only
|
|
|
|
* when *not* connected to an OVS bridge or midonet (indicated by having
|
|
|
|
* a <virtualport> element in the config)
|
|
|
|
*/
|
|
|
|
if (net->filter) {
|
2019-10-01 17:56:35 +00:00
|
|
|
const virNetDevVPortProfile *vport = virDomainNetGetActualVirtPortProfile(net);
|
2019-09-27 15:47:21 +00:00
|
|
|
|
2019-09-12 22:25:21 +00:00
|
|
|
if (!(actualType == VIR_DOMAIN_NET_TYPE_NETWORK ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_BRIDGE ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_ETHERNET)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2019-09-27 15:47:21 +00:00
|
|
|
_("interface %s - filterref is not supported for network interfaces of type %s"),
|
|
|
|
macstr, virDomainNetTypeToString(actualType));
|
2019-09-12 22:25:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (vport && vport->virtPortType != VIR_NETDEV_VPORT_PROFILE_NONE) {
|
|
|
|
/* currently none of the defined virtualport types support iptables */
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2019-09-27 15:47:21 +00:00
|
|
|
_("interface %s - filterref is not supported for network interfaces with virtualport type %s"),
|
|
|
|
macstr, virNetDevVPortTypeToString(vport->virtPortType));
|
2019-09-12 22:25:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (net->backend.tap &&
|
|
|
|
!(actualType == VIR_DOMAIN_NET_TYPE_NETWORK ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_BRIDGE ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_ETHERNET)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2019-09-27 15:47:21 +00:00
|
|
|
_("interface %s - custom tap device path is not supported for network interfaces of type %s"),
|
|
|
|
macstr, virDomainNetTypeToString(actualType));
|
2019-09-12 22:25:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
qemu: support interface <teaming> functionality
The QEMU driver uses the <teaming type='persistent|transient'
persistent='blah'/> element to setup a "failover" pair of devices -
the persistent device must be a virtio emulated NIC, with the only
extra configuration being the addition of ",failover=on" to the device
commandline, and the transient device must be a hostdev NIC
(<interface type='hostdev'> or <interface type='network'> with a
network that is a pool of SRIOV VFs) where the extra configuration is
the addition of ",failover_pair_id=$aliasOfVirtio" to the device
commandline. These new options are supported in QEMU 4.2.0 and later.
Extra qemu-specific validation is added to ensure that the device
type/model is appropriate and that the qemu binary supports these
commandline options.
The result of this will be:
1) The virtio device presented to the guest will have an extra bit set
in its PCI capabilities indicating that it can be used as a failover
backup device. The virtio guest driver will need to be equipped to do
something with this information - this is included in the Linux
virtio-net driver in kernel 4.18 and above (and also backported to
some older distro kernels). Unfortunately there is no way for libvirt
to learn whether or not the guest driver supports failover - if it
doesn't then the extra PCI capability will be ignored and the guest OS
will just see two independent devices. (NB: the current virtio guest
driver also requires that the MAC addresses of the two NICs match in
order to pair them into a bond).
2) When a migration is requested, QEMu will automatically unplug the
transient/hostdev NIC from the guest on the source host before
starting migration, and automatically re-plug a similar device after
restarting the guest CPUs on the destination host. While the transient
NIC is unplugged, all network traffic will go through the
persistent/virtio device, but when the hostdev NIC is plugged in, it
will get all the traffic. This means that in normal circumstances the
guest gets the performance advantage of vfio-assigned "real hardware"
networking, but it can still be migrated with the only downside being
a performance penalty (due to using an emulated NIC) during the
migration.
Signed-off-by: Laine Stump <laine@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
2020-01-23 20:34:53 +00:00
|
|
|
if (net->teaming.type == VIR_DOMAIN_NET_TEAMING_TYPE_TRANSIENT &&
|
|
|
|
actualType != VIR_DOMAIN_NET_TYPE_HOSTDEV) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("interface %s - teaming transient device must be type='hostdev', not '%s'"),
|
|
|
|
macstr, virDomainNetTypeToString(actualType));
|
|
|
|
return -1;
|
|
|
|
}
|
2019-09-12 22:25:21 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-29 15:28:11 +00:00
|
|
|
int
|
2018-03-29 06:50:30 +00:00
|
|
|
qemuDomainValidateStorageSource(virStorageSourcePtr src,
|
2020-05-05 15:00:41 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
bool maskBlockdev)
|
2018-03-23 13:08:36 +00:00
|
|
|
{
|
2018-04-18 11:22:29 +00:00
|
|
|
int actualType = virStorageSourceGetActualType(src);
|
2020-05-05 14:51:51 +00:00
|
|
|
bool blockdev = virQEMUCapsGet(qemuCaps, QEMU_CAPS_BLOCKDEV);
|
2018-04-18 11:22:29 +00:00
|
|
|
|
2020-05-05 15:00:41 +00:00
|
|
|
if (maskBlockdev)
|
|
|
|
blockdev = false;
|
|
|
|
|
2018-03-23 13:08:36 +00:00
|
|
|
if (src->format == VIR_STORAGE_FILE_COW) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("'cow' storage format is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-29 06:47:10 +00:00
|
|
|
if (src->format == VIR_STORAGE_FILE_DIR) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("'directory' storage format is not directly supported by QEMU, "
|
|
|
|
"use 'dir' disk type instead"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-29 06:49:08 +00:00
|
|
|
if (src->format == VIR_STORAGE_FILE_ISO) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("storage format 'iso' is not directly supported by QEMU, "
|
|
|
|
"use 'raw' instead"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-05-22 12:53:06 +00:00
|
|
|
if ((src->format == VIR_STORAGE_FILE_QCOW ||
|
|
|
|
src->format == VIR_STORAGE_FILE_QCOW2) &&
|
|
|
|
src->encryption &&
|
|
|
|
(src->encryption->format == VIR_STORAGE_ENCRYPTION_FORMAT_DEFAULT ||
|
|
|
|
src->encryption->format == VIR_STORAGE_ENCRYPTION_FORMAT_QCOW)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("old qcow/qcow2 encryption is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-29 06:50:30 +00:00
|
|
|
if (src->format == VIR_STORAGE_FILE_QCOW2 &&
|
|
|
|
src->encryption &&
|
|
|
|
src->encryption->format == VIR_STORAGE_ENCRYPTION_FORMAT_LUKS &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_QCOW2_LUKS)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
2018-12-04 17:08:14 +00:00
|
|
|
_("LUKS encrypted QCOW2 images are not supported by this QEMU"));
|
2018-03-29 06:50:30 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-04-18 11:22:29 +00:00
|
|
|
if (src->format == VIR_STORAGE_FILE_FAT &&
|
2019-06-25 08:36:06 +00:00
|
|
|
actualType != VIR_STORAGE_TYPE_VOLUME &&
|
2018-04-18 11:22:29 +00:00
|
|
|
actualType != VIR_STORAGE_TYPE_DIR) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("storage format 'fat' is supported only with 'dir' "
|
|
|
|
"storage type"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (actualType == VIR_STORAGE_TYPE_DIR) {
|
|
|
|
if (src->format > 0 &&
|
|
|
|
src->format != VIR_STORAGE_FILE_FAT) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("storage type 'dir' requires use of storage format 'fat'"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!src->readonly) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("virtual FAT storage can't be accessed in read-write mode"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-03 11:24:48 +00:00
|
|
|
if (src->pr &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_PR_MANAGER_HELPER)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("reservations not supported with this QEMU binary"));
|
|
|
|
return -1;
|
2018-05-11 12:53:54 +00:00
|
|
|
}
|
|
|
|
|
2018-08-07 12:57:43 +00:00
|
|
|
/* Use QEMU_CAPS_ISCSI_PASSWORD_SECRET as witness that iscsi 'initiator-name'
|
|
|
|
* option is available, it was introduced at the same time. */
|
|
|
|
if (src->initiator.iqn &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_ISCSI_PASSWORD_SECRET)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("iSCSI initiator IQN not supported with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-02-05 13:51:12 +00:00
|
|
|
if (src->sliceStorage) {
|
|
|
|
/* In pre-blockdev era we can't configure the slice so we can allow them
|
|
|
|
* only for detected backing store entries as they are populated
|
|
|
|
* from a place that qemu would be able to read */
|
2020-05-05 14:51:51 +00:00
|
|
|
if (!src->detected && !blockdev) {
|
2020-02-05 13:51:12 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("storage slice is not supported by this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-05 16:59:04 +00:00
|
|
|
if (src->sslverify != VIR_TRISTATE_BOOL_ABSENT) {
|
|
|
|
if (actualType != VIR_STORAGE_TYPE_NETWORK ||
|
|
|
|
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_FTPS)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("ssl verification is supported only with HTTPS/FTPS protocol"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-05 14:51:51 +00:00
|
|
|
if (!src->detected && !blockdev) {
|
2020-03-05 16:59:04 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("ssl verification setting is not supported by this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->ncookies > 0) {
|
|
|
|
if (actualType != VIR_STORAGE_TYPE_NETWORK ||
|
|
|
|
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("http cookies are supported only with HTTP(S) protocol"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-05 14:51:51 +00:00
|
|
|
if (!src->detected && !blockdev) {
|
2020-03-05 16:59:04 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("http cookies are not supported by this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virStorageSourceNetCookiesValidate(src) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->readahead > 0) {
|
|
|
|
if (actualType != VIR_STORAGE_TYPE_NETWORK ||
|
|
|
|
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_FTP &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_FTPS)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
2020-06-05 12:33:00 +00:00
|
|
|
_("readahead is supported only with HTTP(S)/FTP(s) protocols"));
|
2020-03-05 16:59:04 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-05 14:51:51 +00:00
|
|
|
if (!src->detected && !blockdev) {
|
2020-03-05 16:59:04 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("readahead setting is not supported with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->timeout > 0) {
|
|
|
|
if (actualType != VIR_STORAGE_TYPE_NETWORK ||
|
|
|
|
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_FTP &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_FTPS)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("timeout is supported only with HTTP(S)/FTP(s) protocols"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-05 14:51:51 +00:00
|
|
|
if (!src->detected && !blockdev) {
|
2020-03-05 16:59:04 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("timeout setting is not supported with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-27 15:34:37 +00:00
|
|
|
if (src->query &&
|
|
|
|
(actualType != VIR_STORAGE_TYPE_NETWORK ||
|
|
|
|
(src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTPS &&
|
|
|
|
src->protocol != VIR_STORAGE_NET_PROTOCOL_HTTP))) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("query is supported only with HTTP(S) protocols"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-13 18:41:45 +00:00
|
|
|
/* TFTP protocol was not supported for some time, lock it out at least with
|
|
|
|
* -blockdev */
|
|
|
|
if (actualType == VIR_STORAGE_TYPE_NETWORK &&
|
|
|
|
src->protocol == VIR_STORAGE_NET_PROTOCOL_TFTP &&
|
|
|
|
blockdev) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("'tftp' protocol is not supported with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-23 13:08:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-16 13:49:15 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDefaultNetModel:
|
|
|
|
* @def: domain definition
|
|
|
|
* @qemuCaps: qemu capabilities
|
|
|
|
*
|
|
|
|
* Returns the default network model for a given domain. Note that if @qemuCaps
|
|
|
|
* is NULL this function may return NULL if the default model depends on the
|
|
|
|
* capabilities.
|
|
|
|
*/
|
2019-01-18 14:59:02 +00:00
|
|
|
static int
|
2015-09-07 06:51:00 +00:00
|
|
|
qemuDomainDefaultNetModel(const virDomainDef *def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
maint: avoid 'const fooPtr' in domain_conf
'const fooPtr' is the same as 'foo * const' (the pointer won't
change, but it's contents can). But in general, if an interface
is trying to be const-correct, it should be using 'const foo *'
(the pointer is to data that can't be changed).
Fix up offenders in src/conf/domain_conf, and their fallout.
Several things to note: virObjectLock() requires a non-const
argument; if this were C++, we could treat the locking field
as 'mutable' and allow locking an otherwise 'const' object, but
that is a more invasive change, so I instead dropped attempts
to be const-correct on domain lookup. virXMLPropString and
friends require a non-const xmlNodePtr - this is because libxml2
is not a const-correct library. We could make the src/util/virxml
wrappers cast away const, but I figured it was easier to not
try to mark xmlNodePtr as const. Finally, virDomainDeviceDefCopy
was a rather hard conversion - it calls virDomainDeviceDefPostParse,
which in turn in the xen driver was actually modifying the domain
outside of the current device being visited. We should not be
adding a device on the first per-device callback, but waiting until
after all per-device callbacks are complete.
* src/conf/domain_conf.h (virDomainObjListFindByID)
(virDomainObjListFindByUUID, virDomainObjListFindByName)
(virDomainObjAssignDef, virDomainObjListAdd): Drop attempt at
const.
(virDomainDeviceDefCopy): Use intended type.
(virDomainDeviceDefParse, virDomainDeviceDefPostParseCallback)
(virDomainVideoDefaultType, virDomainVideoDefaultRAM)
(virDomainChrGetDomainPtrs): Make const-correct.
* src/conf/domain_conf.c (virDomainObjListFindByID)
(virDomainObjListFindByUUID, virDomainObjListFindByName)
(virDomainDeviceDefCopy, virDomainObjListAdd)
(virDomainObjAssignDef, virDomainHostdevSubsysUsbDefParseXML)
(virDomainHostdevSubsysPciOrigStatesDefParseXML)
(virDomainHostdevSubsysPciDefParseXML)
(virDomainHostdevSubsysScsiDefParseXML)
(virDomainControllerModelTypeFromString)
(virDomainTPMDefParseXML, virDomainTimerDefParseXML)
(virDomainSoundCodecDefParseXML, virDomainSoundDefParseXML)
(virDomainWatchdogDefParseXML, virDomainRNGDefParseXML)
(virDomainMemballoonDefParseXML, virDomainNVRAMDefParseXML)
(virSysinfoParseXML, virDomainVideoAccelDefParseXML)
(virDomainVideoDefParseXML, virDomainHostdevDefParseXML)
(virDomainRedirdevDefParseXML)
(virDomainRedirFilterUsbDevDefParseXML)
(virDomainRedirFilterDefParseXML, virDomainIdMapEntrySort)
(virDomainIdmapDefParseXML, virDomainVcpuPinDefParseXML)
(virDiskNameToBusDeviceIndex, virDomainDeviceDefCopy)
(virDomainVideoDefaultType, virDomainHostdevAssignAddress)
(virDomainDeviceDefPostParseInternal, virDomainDeviceDefPostParse)
(virDomainChrGetDomainPtrs, virDomainControllerSCSINextUnit)
(virDomainSCSIDriveAddressIsUsed)
(virDomainDriveAddressIsUsedByDisk)
(virDomainDriveAddressIsUsedByHostdev): Fix fallout.
* src/openvz/openvz_driver.c (openvzDomainDeviceDefPostParse):
Likewise.
* src/libxl/libxl_domain.c (libxlDomainDeviceDefPostParse):
Likewise.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainDefaultNetModel): Likewise.
* src/lxc/lxc_domain.c (virLXCDomainDeviceDefPostParse):
Likewise.
* src/uml/uml_driver.c (umlDomainDeviceDefPostParse): Likewise.
* src/xen/xen_driver.c (xenDomainDeviceDefPostParse): Split...
(xenDomainDefPostParse): ...since per-device callback is not the
time to be adding a device.
Signed-off-by: Eric Blake <eblake@redhat.com>
2013-10-08 15:08:25 +00:00
|
|
|
{
|
2015-02-18 15:44:19 +00:00
|
|
|
if (ARCH_IS_S390(def->os.arch))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_VIRTIO;
|
2013-07-30 22:51:30 +00:00
|
|
|
|
2018-11-28 21:45:14 +00:00
|
|
|
if (def->os.arch == VIR_ARCH_ARMV6L ||
|
|
|
|
def->os.arch == VIR_ARCH_ARMV7L ||
|
2014-02-14 14:09:00 +00:00
|
|
|
def->os.arch == VIR_ARCH_AARCH64) {
|
2013-07-30 22:51:30 +00:00
|
|
|
if (STREQ(def->os.machine, "versatilepb"))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_SMC91C111;
|
2013-07-30 22:51:30 +00:00
|
|
|
|
2018-08-22 09:15:20 +00:00
|
|
|
if (qemuDomainIsARMVirt(def))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_VIRTIO;
|
2013-11-19 21:49:40 +00:00
|
|
|
|
2013-07-30 22:51:30 +00:00
|
|
|
/* Incomplete. vexpress (and a few others) use this, but not all
|
|
|
|
* arm boards */
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_LAN9118;
|
2013-07-30 22:51:30 +00:00
|
|
|
}
|
|
|
|
|
2018-08-28 15:40:27 +00:00
|
|
|
/* virtio is a sensible default for RISC-V virt guests */
|
|
|
|
if (qemuDomainIsRISCVVirt(def))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_VIRTIO;
|
2018-08-28 15:40:27 +00:00
|
|
|
|
2017-08-16 13:49:15 +00:00
|
|
|
/* In all other cases the model depends on the capabilities. If they were
|
|
|
|
* not provided don't report any default. */
|
|
|
|
if (!qemuCaps)
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_UNKNOWN;
|
2017-08-16 13:49:15 +00:00
|
|
|
|
2015-09-07 06:51:00 +00:00
|
|
|
/* Try several network devices in turn; each of these devices is
|
|
|
|
* less likely be supported out-of-the-box by the guest operating
|
|
|
|
* system than the previous one */
|
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_RTL8139))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_RTL8139;
|
2015-09-07 06:51:00 +00:00
|
|
|
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_E1000))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_E1000;
|
2015-09-07 06:51:00 +00:00
|
|
|
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VIRTIO_NET))
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_VIRTIO;
|
2015-09-07 06:51:00 +00:00
|
|
|
|
|
|
|
/* We've had no luck detecting support for any network device,
|
|
|
|
* but we have to return something: might as well be rtl8139 */
|
2019-01-18 14:59:02 +00:00
|
|
|
return VIR_DOMAIN_NET_MODEL_RTL8139;
|
2013-07-30 22:51:30 +00:00
|
|
|
}
|
2013-03-11 11:12:08 +00:00
|
|
|
|
2016-07-08 15:25:03 +00:00
|
|
|
|
|
|
|
/*
|
2017-05-11 12:09:35 +00:00
|
|
|
* Clear auto generated unix socket paths:
|
|
|
|
*
|
|
|
|
* libvirt 1.2.18 and older:
|
|
|
|
* {cfg->channelTargetDir}/{dom-name}.{target-name}
|
|
|
|
*
|
|
|
|
* libvirt 1.2.19 - 1.3.2:
|
|
|
|
* {cfg->channelTargetDir}/domain-{dom-name}/{target-name}
|
|
|
|
*
|
|
|
|
* libvirt 1.3.3 and newer:
|
|
|
|
* {cfg->channelTargetDir}/domain-{dom-id}-{short-dom-name}/{target-name}
|
|
|
|
*
|
|
|
|
* The unix socket path was stored in config XML until libvirt 1.3.0.
|
|
|
|
* If someone specifies the same path as we generate, they shouldn't do it.
|
|
|
|
*
|
|
|
|
* This function clears the path for migration as well, so we need to clear
|
|
|
|
* the path even if we are not storing it in the XML.
|
2016-07-08 15:25:03 +00:00
|
|
|
*/
|
2020-01-09 18:33:47 +00:00
|
|
|
static void
|
2016-07-08 15:25:03 +00:00
|
|
|
qemuDomainChrDefDropDefaultPath(virDomainChrDefPtr chr,
|
|
|
|
virQEMUDriverPtr driver)
|
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = NULL;
|
2020-07-02 22:26:41 +00:00
|
|
|
g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *regexp = NULL;
|
2016-07-08 15:25:03 +00:00
|
|
|
|
2017-05-11 12:09:35 +00:00
|
|
|
if (chr->deviceType != VIR_DOMAIN_CHR_DEVICE_TYPE_CHANNEL ||
|
|
|
|
chr->targetType != VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO ||
|
|
|
|
chr->source->type != VIR_DOMAIN_CHR_TYPE_UNIX ||
|
|
|
|
!chr->source->data.nix.path) {
|
2020-01-09 18:33:47 +00:00
|
|
|
return;
|
2016-07-08 15:25:03 +00:00
|
|
|
}
|
|
|
|
|
2017-05-11 12:09:35 +00:00
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
|
|
|
virBufferEscapeRegex(&buf, "^%s", cfg->channelTargetDir);
|
|
|
|
virBufferAddLit(&buf, "/([^/]+\\.)|(domain-[^/]+/)");
|
|
|
|
virBufferEscapeRegex(&buf, "%s$", chr->target.name);
|
|
|
|
|
|
|
|
regexp = virBufferContentAndReset(&buf);
|
|
|
|
|
|
|
|
if (virStringMatch(chr->source->data.nix.path, regexp))
|
|
|
|
VIR_FREE(chr->source->data.nix.path);
|
2016-07-08 15:25:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-03 15:34:51 +00:00
|
|
|
static int
|
|
|
|
qemuDomainShmemDefPostParse(virDomainShmemDefPtr shm)
|
|
|
|
{
|
|
|
|
/* This was the default since the introduction of this device. */
|
|
|
|
if (shm->model != VIR_DOMAIN_SHMEM_MODEL_IVSHMEM_DOORBELL && !shm->size)
|
|
|
|
shm->size = 4 << 20;
|
|
|
|
|
|
|
|
/* Nothing more to check/change for IVSHMEM */
|
|
|
|
if (shm->model == VIR_DOMAIN_SHMEM_MODEL_IVSHMEM)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!shm->server.enabled) {
|
|
|
|
if (shm->model == VIR_DOMAIN_SHMEM_MODEL_IVSHMEM_DOORBELL) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("shmem model '%s' is supported "
|
|
|
|
"only with server option enabled"),
|
|
|
|
virDomainShmemModelTypeToString(shm->model));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (shm->msi.enabled) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("shmem model '%s' doesn't support "
|
|
|
|
"msi"),
|
|
|
|
virDomainShmemModelTypeToString(shm->model));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (shm->model == VIR_DOMAIN_SHMEM_MODEL_IVSHMEM_PLAIN) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("shmem model '%s' is supported "
|
|
|
|
"only with server option disabled"),
|
|
|
|
virDomainShmemModelTypeToString(shm->model));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (shm->size) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("shmem model '%s' does not support size setting"),
|
|
|
|
virDomainShmemModelTypeToString(shm->model));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
shm->msi.enabled = true;
|
|
|
|
if (!shm->msi.ioeventfd)
|
|
|
|
shm->msi.ioeventfd = VIR_TRISTATE_SWITCH_ON;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-04-13 10:25:25 +00:00
|
|
|
#define QEMU_USB_XHCI_MAXPORTS 15
|
2016-11-28 16:37:39 +00:00
|
|
|
|
|
|
|
|
2017-03-01 18:31:19 +00:00
|
|
|
static int
|
|
|
|
qemuDomainControllerDefPostParse(virDomainControllerDefPtr cont,
|
|
|
|
const virDomainDef *def,
|
2017-03-01 18:58:22 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
unsigned int parseFlags)
|
2017-03-01 18:31:19 +00:00
|
|
|
{
|
2017-03-01 18:41:56 +00:00
|
|
|
switch ((virDomainControllerType)cont->type) {
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_SCSI:
|
2018-01-30 19:19:47 +00:00
|
|
|
/* Set the default SCSI controller model if not already set */
|
|
|
|
if (qemuDomainSetSCSIControllerModel(def, cont, qemuCaps) < 0)
|
|
|
|
return -1;
|
2017-03-01 18:41:56 +00:00
|
|
|
break;
|
2017-03-01 18:31:19 +00:00
|
|
|
|
2017-03-01 18:41:56 +00:00
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_USB:
|
2018-02-14 10:51:26 +00:00
|
|
|
if (cont->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_DEFAULT && qemuCaps) {
|
2017-03-01 18:41:56 +00:00
|
|
|
/* Pick a suitable default model for the USB controller if none
|
2017-08-16 13:54:10 +00:00
|
|
|
* has been selected by the user and we have the qemuCaps for
|
2020-07-09 04:42:21 +00:00
|
|
|
* figuring out which controllers are supported.
|
2017-03-01 18:41:56 +00:00
|
|
|
*
|
|
|
|
* We rely on device availability instead of setting the model
|
|
|
|
* unconditionally because, for some machine types, there's a
|
|
|
|
* chance we will get away with using the legacy USB controller
|
|
|
|
* when the relevant device is not available.
|
|
|
|
*
|
|
|
|
* See qemuBuildControllerDevCommandLine() */
|
2017-04-27 15:41:56 +00:00
|
|
|
|
|
|
|
/* Default USB controller is piix3-uhci if available. */
|
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_PIIX3_USB_UHCI))
|
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_PIIX3_UHCI;
|
|
|
|
|
|
|
|
if (ARCH_IS_S390(def->os.arch)) {
|
|
|
|
if (cont->info.type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE) {
|
|
|
|
/* set the default USB model to none for s390 unless an
|
|
|
|
* address is found */
|
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_NONE;
|
|
|
|
}
|
2017-03-01 18:41:56 +00:00
|
|
|
} else if (ARCH_IS_PPC64(def->os.arch)) {
|
2017-03-01 18:58:22 +00:00
|
|
|
/* To not break migration we need to set default USB controller
|
|
|
|
* for ppc64 to pci-ohci if we cannot change ABI of the VM.
|
2017-04-27 15:56:41 +00:00
|
|
|
* The nec-usb-xhci or qemu-xhci controller is used as default
|
|
|
|
* only for newly defined domains or devices. */
|
2017-03-01 18:58:22 +00:00
|
|
|
if ((parseFlags & VIR_DOMAIN_DEF_PARSE_ABI_UPDATE) &&
|
2017-04-27 15:56:41 +00:00
|
|
|
virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_QEMU_XHCI)) {
|
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_QEMU_XHCI;
|
|
|
|
} else if ((parseFlags & VIR_DOMAIN_DEF_PARSE_ABI_UPDATE) &&
|
2017-03-01 18:58:22 +00:00
|
|
|
virQEMUCapsGet(qemuCaps, QEMU_CAPS_NEC_USB_XHCI)) {
|
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_NEC_XHCI;
|
|
|
|
} else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_PCI_OHCI)) {
|
2017-03-01 18:41:56 +00:00
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_PCI_OHCI;
|
2017-04-27 15:41:56 +00:00
|
|
|
} else {
|
|
|
|
/* Explicitly fallback to legacy USB controller for PPC64. */
|
|
|
|
cont->model = -1;
|
2017-03-01 18:58:22 +00:00
|
|
|
}
|
2017-04-27 15:53:51 +00:00
|
|
|
} else if (def->os.arch == VIR_ARCH_AARCH64) {
|
2017-04-27 15:56:41 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_QEMU_XHCI))
|
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_QEMU_XHCI;
|
|
|
|
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_NEC_USB_XHCI))
|
2017-04-27 15:53:51 +00:00
|
|
|
cont->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_NEC_XHCI;
|
2017-03-01 18:41:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* forbid usb model 'qusb1' and 'qusb2' in this kind of hyperviosr */
|
|
|
|
if (cont->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_QUSB1 ||
|
|
|
|
cont->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_QUSB2) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("USB controller model type 'qusb1' or 'qusb2' "
|
|
|
|
"is not supported in %s"),
|
|
|
|
virDomainVirtTypeToString(def->virtType));
|
|
|
|
return -1;
|
|
|
|
}
|
2017-04-13 10:25:25 +00:00
|
|
|
if ((cont->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_NEC_XHCI ||
|
|
|
|
cont->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_QEMU_XHCI) &&
|
|
|
|
cont->opts.usbopts.ports > QEMU_USB_XHCI_MAXPORTS) {
|
2017-03-01 18:41:56 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2017-04-13 10:25:25 +00:00
|
|
|
_("'%s' controller only supports up to '%u' ports"),
|
|
|
|
virDomainControllerModelUSBTypeToString(cont->model),
|
|
|
|
QEMU_USB_XHCI_MAXPORTS);
|
2017-03-01 18:41:56 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
2017-03-01 18:31:19 +00:00
|
|
|
|
2017-03-01 18:41:56 +00:00
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_PCI:
|
2017-02-27 19:18:32 +00:00
|
|
|
|
|
|
|
/* pSeries guests can have multiple pci-root controllers,
|
|
|
|
* but other machine types only support a single one */
|
|
|
|
if (!qemuDomainIsPSeries(def) &&
|
|
|
|
(cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT ||
|
|
|
|
cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT) &&
|
|
|
|
cont->idx != 0) {
|
2017-02-24 15:45:13 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR, "%s",
|
|
|
|
_("pci-root and pcie-root controllers "
|
|
|
|
"should have index 0"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-03-01 18:31:19 +00:00
|
|
|
if (cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_EXPANDER_BUS &&
|
2017-04-18 10:43:58 +00:00
|
|
|
!qemuDomainIsI440FX(def)) {
|
2017-03-01 18:31:19 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("pci-expander-bus controllers are only supported "
|
|
|
|
"on 440fx-based machinetypes"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCIE_EXPANDER_BUS &&
|
2017-04-18 10:43:58 +00:00
|
|
|
!qemuDomainIsQ35(def)) {
|
2017-03-01 18:31:19 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("pcie-expander-bus controllers are only supported "
|
|
|
|
"on q35-based machinetypes"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-07-21 08:03:15 +00:00
|
|
|
/* if a PCI expander bus or pci-root on Pseries has a NUMA node
|
|
|
|
* set, make sure that NUMA node is configured in the guest
|
|
|
|
* <cpu><numa> array. NUMA cell id's in this array are numbered
|
2017-03-01 18:31:19 +00:00
|
|
|
* from 0 .. size-1.
|
|
|
|
*/
|
2017-07-21 08:03:15 +00:00
|
|
|
if (cont->opts.pciopts.numaNode >= 0 &&
|
|
|
|
cont->opts.pciopts.numaNode >=
|
2018-04-25 12:42:34 +00:00
|
|
|
(int)virDomainNumaGetNodeCount(def->numa)) {
|
2017-03-01 18:31:19 +00:00
|
|
|
virReportError(VIR_ERR_XML_ERROR,
|
|
|
|
_("%s with index %d is "
|
|
|
|
"configured for a NUMA node (%d) "
|
|
|
|
"not present in the domain's "
|
|
|
|
"<cpu><numa> array (%zu)"),
|
|
|
|
virDomainControllerModelPCITypeToString(cont->model),
|
|
|
|
cont->idx, cont->opts.pciopts.numaNode,
|
|
|
|
virDomainNumaGetNodeCount(def->numa));
|
|
|
|
return -1;
|
|
|
|
}
|
2017-03-01 18:41:56 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_SATA:
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_VIRTIO_SERIAL:
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_CCID:
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_IDE:
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_FDC:
|
2019-03-06 22:59:29 +00:00
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_XENBUS:
|
2019-02-17 13:04:00 +00:00
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_ISA:
|
2017-03-01 18:41:56 +00:00
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_LAST:
|
|
|
|
break;
|
2017-03-01 18:31:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-08 14:40:42 +00:00
|
|
|
static int
|
|
|
|
qemuDomainChrDefPostParse(virDomainChrDefPtr chr,
|
|
|
|
const virDomainDef *def,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
unsigned int parseFlags)
|
|
|
|
{
|
2017-11-10 12:41:06 +00:00
|
|
|
/* Historically, isa-serial and the default matched, so in order to
|
|
|
|
* maintain backwards compatibility we map them here. The actual default
|
|
|
|
* will be picked below based on the architecture and machine type. */
|
|
|
|
if (chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_SERIAL &&
|
|
|
|
chr->targetType == VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_ISA) {
|
|
|
|
chr->targetType = VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the default serial type */
|
|
|
|
if (chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_SERIAL &&
|
|
|
|
chr->targetType == VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_NONE) {
|
|
|
|
if (ARCH_IS_X86(def->os.arch)) {
|
|
|
|
chr->targetType = VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_ISA;
|
|
|
|
} else if (qemuDomainIsPSeries(def)) {
|
2017-11-08 14:31:21 +00:00
|
|
|
chr->targetType = VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SPAPR_VIO;
|
2018-08-27 08:25:17 +00:00
|
|
|
} else if (qemuDomainIsARMVirt(def) || qemuDomainIsRISCVVirt(def)) {
|
2017-11-09 16:14:57 +00:00
|
|
|
chr->targetType = VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SYSTEM;
|
2017-11-14 15:27:04 +00:00
|
|
|
} else if (ARCH_IS_S390(def->os.arch)) {
|
|
|
|
chr->targetType = VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SCLP;
|
2017-11-10 12:41:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-20 12:02:30 +00:00
|
|
|
/* Set the default target model */
|
|
|
|
if (chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_SERIAL &&
|
|
|
|
chr->targetModel == VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_NONE) {
|
2018-04-25 12:42:34 +00:00
|
|
|
switch ((virDomainChrSerialTargetType)chr->targetType) {
|
2017-11-20 12:02:30 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_ISA:
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_ISA_SERIAL;
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_USB:
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_USB_SERIAL;
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_PCI:
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_PCI_SERIAL;
|
|
|
|
break;
|
2017-11-08 14:31:21 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SPAPR_VIO:
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_SPAPR_VTY;
|
|
|
|
break;
|
2017-11-09 16:14:57 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SYSTEM:
|
2018-08-27 08:25:17 +00:00
|
|
|
if (qemuDomainIsARMVirt(def)) {
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_PL011;
|
|
|
|
} else if (qemuDomainIsRISCVVirt(def)) {
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_16550A;
|
|
|
|
}
|
2017-11-09 16:14:57 +00:00
|
|
|
break;
|
2017-11-14 15:27:04 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SCLP:
|
|
|
|
chr->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_SCLPCONSOLE;
|
|
|
|
break;
|
2017-11-20 12:02:30 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_NONE:
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_LAST:
|
|
|
|
/* Nothing to do */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-08 14:40:42 +00:00
|
|
|
/* clear auto generated unix socket path for inactive definitions */
|
|
|
|
if (parseFlags & VIR_DOMAIN_DEF_PARSE_INACTIVE) {
|
2020-01-09 18:33:47 +00:00
|
|
|
qemuDomainChrDefDropDefaultPath(chr, driver);
|
2017-11-08 14:40:42 +00:00
|
|
|
|
|
|
|
/* For UNIX chardev if no path is provided we generate one.
|
|
|
|
* This also implies that the mode is 'bind'. */
|
|
|
|
if (chr->source &&
|
|
|
|
chr->source->type == VIR_DOMAIN_CHR_TYPE_UNIX &&
|
|
|
|
!chr->source->data.nix.path) {
|
|
|
|
chr->source->data.nix.listen = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-01 18:31:19 +00:00
|
|
|
|
2018-05-24 14:55:20 +00:00
|
|
|
|
2018-05-24 16:24:13 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDeviceDiskDefPostParseRestoreSecAlias:
|
|
|
|
*
|
|
|
|
* Re-generate aliases for objects related to the storage source if they
|
|
|
|
* were not stored in the status XML by an older libvirt.
|
|
|
|
*
|
|
|
|
* Note that qemuCaps should be always present for a status XML.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuDomainDeviceDiskDefPostParseRestoreSecAlias(virDomainDiskDefPtr disk,
|
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
unsigned int parseFlags)
|
|
|
|
{
|
|
|
|
qemuDomainStorageSourcePrivatePtr priv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(disk->src);
|
|
|
|
bool restoreAuthSecret = false;
|
|
|
|
bool restoreEncSecret = false;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *authalias = NULL;
|
|
|
|
g_autofree char *encalias = NULL;
|
2018-05-24 16:24:13 +00:00
|
|
|
|
|
|
|
if (!(parseFlags & VIR_DOMAIN_DEF_PARSE_STATUS) ||
|
|
|
|
!qemuCaps ||
|
|
|
|
virStorageSourceIsEmpty(disk->src) ||
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_SECRET))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* network storage authentication secret */
|
|
|
|
if (disk->src->auth &&
|
|
|
|
(!priv || !priv->secinfo)) {
|
|
|
|
|
|
|
|
/* only RBD and iSCSI (with capability) were supporting authentication
|
|
|
|
* using secret object at the time we did not format the alias into the
|
|
|
|
* status XML */
|
|
|
|
if (virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_NETWORK &&
|
|
|
|
(disk->src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD ||
|
|
|
|
(disk->src->protocol == VIR_STORAGE_NET_PROTOCOL_ISCSI &&
|
|
|
|
virQEMUCapsGet(qemuCaps, QEMU_CAPS_ISCSI_PASSWORD_SECRET))))
|
|
|
|
restoreAuthSecret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* disk encryption secret */
|
|
|
|
if (disk->src->encryption &&
|
|
|
|
disk->src->encryption->format == VIR_STORAGE_ENCRYPTION_FORMAT_LUKS &&
|
|
|
|
(!priv || !priv->encinfo))
|
|
|
|
restoreEncSecret = true;
|
|
|
|
|
|
|
|
if (!restoreAuthSecret && !restoreEncSecret)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!priv) {
|
|
|
|
if (!(disk->src->privateData = qemuDomainStorageSourcePrivateNew()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
priv = QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(disk->src);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (restoreAuthSecret) {
|
2020-03-06 14:28:18 +00:00
|
|
|
authalias = g_strdup_printf("%s-secret0", disk->info.alias);
|
2018-05-24 16:24:13 +00:00
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->secinfo, &authalias) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2018-05-24 16:24:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (restoreEncSecret) {
|
2020-03-06 14:28:18 +00:00
|
|
|
encalias = g_strdup_printf("%s-luks-secret0", disk->info.alias);
|
2018-05-24 16:24:13 +00:00
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->encinfo, &encalias) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2018-05-24 16:24:13 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2018-05-24 16:24:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-24 14:55:20 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDeviceDiskDefPostParse(virDomainDiskDefPtr disk,
|
2018-05-24 16:24:13 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2018-06-04 07:00:27 +00:00
|
|
|
unsigned int parseFlags)
|
2018-05-24 14:55:20 +00:00
|
|
|
{
|
|
|
|
/* set default disk types and drivers */
|
2020-11-06 03:32:37 +00:00
|
|
|
if (!virDomainDiskGetDriver(disk))
|
|
|
|
virDomainDiskSetDriver(disk, "qemu");
|
2018-05-24 14:55:20 +00:00
|
|
|
|
2018-06-04 07:00:27 +00:00
|
|
|
/* default disk format for drives */
|
|
|
|
if (virDomainDiskGetFormat(disk) == VIR_STORAGE_FILE_NONE &&
|
2018-10-04 12:43:46 +00:00
|
|
|
virDomainDiskGetType(disk) != VIR_STORAGE_TYPE_VOLUME)
|
2018-06-04 07:00:27 +00:00
|
|
|
virDomainDiskSetFormat(disk, VIR_STORAGE_FILE_RAW);
|
2018-05-24 14:55:20 +00:00
|
|
|
|
2018-06-04 07:00:27 +00:00
|
|
|
/* default disk format for mirrored drive */
|
|
|
|
if (disk->mirror &&
|
|
|
|
disk->mirror->format == VIR_STORAGE_FILE_NONE)
|
|
|
|
disk->mirror->format = VIR_STORAGE_FILE_RAW;
|
2018-05-24 14:55:20 +00:00
|
|
|
|
2018-05-24 16:24:13 +00:00
|
|
|
if (qemuDomainDeviceDiskDefPostParseRestoreSecAlias(disk, qemuCaps,
|
|
|
|
parseFlags) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-05-30 10:18:37 +00:00
|
|
|
/* regenerate TLS alias for old status XMLs */
|
|
|
|
if (parseFlags & VIR_DOMAIN_DEF_PARSE_STATUS &&
|
|
|
|
disk->src->haveTLS == VIR_TRISTATE_BOOL_YES &&
|
|
|
|
!disk->src->tlsAlias &&
|
|
|
|
!(disk->src->tlsAlias = qemuAliasTLSObjFromSrcAlias(disk->info.alias)))
|
|
|
|
return -1;
|
|
|
|
|
2018-05-24 14:55:20 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-24 15:01:55 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDeviceNetDefPostParse(virDomainNetDefPtr net,
|
|
|
|
const virDomainDef *def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
2020-10-14 17:08:27 +00:00
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_VDPA &&
|
|
|
|
!virDomainNetGetModelString(net))
|
|
|
|
net->model = VIR_DOMAIN_NET_MODEL_VIRTIO;
|
|
|
|
else if (net->type != VIR_DOMAIN_NET_TYPE_HOSTDEV &&
|
2020-06-25 07:27:30 +00:00
|
|
|
!virDomainNetGetModelString(net) &&
|
|
|
|
virDomainNetResolveActualType(net) != VIR_DOMAIN_NET_TYPE_HOSTDEV)
|
2019-01-18 14:59:02 +00:00
|
|
|
net->model = qemuDomainDefaultNetModel(def, qemuCaps);
|
2018-05-24 15:01:55 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-25 10:54:23 +00:00
|
|
|
static int
|
2019-11-25 10:54:25 +00:00
|
|
|
qemuDomainDefaultVideoDevice(const virDomainDef *def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
2019-11-25 10:54:23 +00:00
|
|
|
{
|
2019-12-06 09:45:27 +00:00
|
|
|
if (ARCH_IS_PPC64(def->os.arch))
|
2019-11-25 10:54:23 +00:00
|
|
|
return VIR_DOMAIN_VIDEO_TYPE_VGA;
|
2019-12-06 09:45:27 +00:00
|
|
|
if (qemuDomainIsARMVirt(def) ||
|
|
|
|
qemuDomainIsRISCVVirt(def) ||
|
|
|
|
ARCH_IS_S390(def->os.arch)) {
|
2019-11-25 10:54:23 +00:00
|
|
|
return VIR_DOMAIN_VIDEO_TYPE_VIRTIO;
|
2019-11-25 10:54:25 +00:00
|
|
|
}
|
2019-12-06 09:45:27 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_CIRRUS_VGA))
|
|
|
|
return VIR_DOMAIN_VIDEO_TYPE_CIRRUS;
|
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VGA))
|
|
|
|
return VIR_DOMAIN_VIDEO_TYPE_VGA;
|
|
|
|
return VIR_DOMAIN_VIDEO_TYPE_DEFAULT;
|
2019-11-25 10:54:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-24 15:01:55 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDeviceVideoDefPostParse(virDomainVideoDefPtr video,
|
2019-11-25 10:54:25 +00:00
|
|
|
const virDomainDef *def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
2018-05-24 15:01:55 +00:00
|
|
|
{
|
2019-11-25 10:54:23 +00:00
|
|
|
if (video->type == VIR_DOMAIN_VIDEO_TYPE_DEFAULT)
|
2019-11-25 10:54:25 +00:00
|
|
|
video->type = qemuDomainDefaultVideoDevice(def, qemuCaps);
|
2018-05-24 15:01:55 +00:00
|
|
|
|
|
|
|
if (video->type == VIR_DOMAIN_VIDEO_TYPE_QXL &&
|
|
|
|
!video->vgamem) {
|
|
|
|
video->vgamem = QEMU_QXL_VGAMEM_DEFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-24 15:01:55 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDevicePanicDefPostParse(virDomainPanicDefPtr panic,
|
|
|
|
const virDomainDef *def)
|
|
|
|
{
|
|
|
|
if (panic->model == VIR_DOMAIN_PANIC_MODEL_DEFAULT) {
|
|
|
|
if (qemuDomainIsPSeries(def))
|
|
|
|
panic->model = VIR_DOMAIN_PANIC_MODEL_PSERIES;
|
|
|
|
else if (ARCH_IS_S390(def->os.arch))
|
|
|
|
panic->model = VIR_DOMAIN_PANIC_MODEL_S390;
|
|
|
|
else
|
|
|
|
panic->model = VIR_DOMAIN_PANIC_MODEL_ISA;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-22 09:21:15 +00:00
|
|
|
static int
|
|
|
|
qemuDomainVsockDefPostParse(virDomainVsockDefPtr vsock)
|
|
|
|
{
|
|
|
|
if (vsock->model == VIR_DOMAIN_VSOCK_MODEL_DEFAULT)
|
|
|
|
vsock->model = VIR_DOMAIN_VSOCK_MODEL_VIRTIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-06-17 07:29:13 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDeviceHostdevDefPostParseRestoreSecAlias:
|
|
|
|
*
|
|
|
|
* Re-generate aliases for objects related to the storage source if they
|
|
|
|
* were not stored in the status XML by an older libvirt.
|
|
|
|
*
|
|
|
|
* Note that qemuCaps should be always present for a status XML.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuDomainDeviceHostdevDefPostParseRestoreSecAlias(virDomainHostdevDefPtr hostdev,
|
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
unsigned int parseFlags)
|
|
|
|
{
|
|
|
|
qemuDomainStorageSourcePrivatePtr priv;
|
|
|
|
virDomainHostdevSubsysSCSIPtr scsisrc = &hostdev->source.subsys.u.scsi;
|
|
|
|
virDomainHostdevSubsysSCSIiSCSIPtr iscsisrc = &scsisrc->u.iscsi;
|
|
|
|
g_autofree char *authalias = NULL;
|
|
|
|
|
|
|
|
if (!(parseFlags & VIR_DOMAIN_DEF_PARSE_STATUS) ||
|
|
|
|
!qemuCaps ||
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_SECRET))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
|
|
|
|
hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI ||
|
|
|
|
scsisrc->protocol != VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI ||
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_ISCSI_PASSWORD_SECRET) ||
|
|
|
|
!qemuDomainStorageSourceHasAuth(iscsisrc->src))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(priv = qemuDomainStorageSourcePrivateFetch(iscsisrc->src)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (priv->secinfo)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
authalias = g_strdup_printf("%s-secret0", hostdev->info->alias);
|
|
|
|
|
|
|
|
if (qemuStorageSourcePrivateDataAssignSecinfo(&priv->secinfo, &authalias) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-09-09 15:58:34 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDeviceHostdevDefPostParseRestoreBackendAlias:
|
|
|
|
*
|
|
|
|
* Re-generate backend alias if it wasn't stored in the status XML by an older
|
|
|
|
* libvirtd.
|
|
|
|
*
|
|
|
|
* Note that qemuCaps should be always present for a status XML.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuDomainDeviceHostdevDefPostParseRestoreBackendAlias(virDomainHostdevDefPtr hostdev,
|
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
unsigned int parseFlags)
|
|
|
|
{
|
|
|
|
virDomainHostdevSubsysSCSIPtr scsisrc = &hostdev->source.subsys.u.scsi;
|
|
|
|
virStorageSourcePtr src;
|
|
|
|
|
|
|
|
if (!(parseFlags & VIR_DOMAIN_DEF_PARSE_STATUS))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!qemuCaps ||
|
|
|
|
hostdev->mode != VIR_DOMAIN_HOSTDEV_MODE_SUBSYS ||
|
|
|
|
hostdev->source.subsys.type != VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI ||
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_BLOCKDEV_HOSTDEV_SCSI))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch ((virDomainHostdevSCSIProtocolType) scsisrc->protocol) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_NONE:
|
2020-09-22 09:04:17 +00:00
|
|
|
if (!scsisrc->u.host.src)
|
|
|
|
scsisrc->u.host.src = virStorageSourceNew();
|
2020-09-09 15:58:34 +00:00
|
|
|
|
|
|
|
src = scsisrc->u.host.src;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI:
|
|
|
|
src = scsisrc->u.iscsi.src;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virDomainHostdevSCSIProtocolType, scsisrc->protocol);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!src->nodestorage)
|
|
|
|
src->nodestorage = g_strdup_printf("libvirt-%s-backend", hostdev->info->alias);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-24 14:04:26 +00:00
|
|
|
static int
|
|
|
|
qemuDomainHostdevDefMdevPostParse(virDomainHostdevSubsysMediatedDevPtr mdevsrc,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
/* QEMU 2.12 added support for vfio-pci display type, we default to
|
|
|
|
* 'display=off' to stay safe from future changes */
|
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_VFIO_PCI_DISPLAY) &&
|
2018-08-30 17:07:34 +00:00
|
|
|
mdevsrc->model == VIR_MDEV_MODEL_TYPE_VFIO_PCI &&
|
2018-05-24 14:04:26 +00:00
|
|
|
mdevsrc->display == VIR_TRISTATE_SWITCH_ABSENT)
|
|
|
|
mdevsrc->display = VIR_TRISTATE_SWITCH_OFF;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainHostdevDefPostParse(virDomainHostdevDefPtr hostdev,
|
2020-06-17 07:29:13 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
unsigned int parseFlags)
|
2018-05-24 14:04:26 +00:00
|
|
|
{
|
|
|
|
virDomainHostdevSubsysPtr subsys = &hostdev->source.subsys;
|
|
|
|
|
2020-06-17 07:29:13 +00:00
|
|
|
if (qemuDomainDeviceHostdevDefPostParseRestoreSecAlias(hostdev, qemuCaps,
|
|
|
|
parseFlags) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-09-09 15:58:34 +00:00
|
|
|
if (qemuDomainDeviceHostdevDefPostParseRestoreBackendAlias(hostdev, qemuCaps,
|
|
|
|
parseFlags) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-05-24 14:04:26 +00:00
|
|
|
if (hostdev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS &&
|
|
|
|
hostdev->source.subsys.type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV &&
|
|
|
|
qemuDomainHostdevDefMdevPostParse(&subsys->u.mdev, qemuCaps) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-05 18:35:45 +00:00
|
|
|
static int
|
2020-02-05 18:35:46 +00:00
|
|
|
qemuDomainTPMDefPostParse(virDomainTPMDefPtr tpm,
|
|
|
|
virArch arch)
|
2020-02-05 18:35:45 +00:00
|
|
|
{
|
2020-02-05 18:35:46 +00:00
|
|
|
if (tpm->model == VIR_DOMAIN_TPM_MODEL_DEFAULT) {
|
|
|
|
if (ARCH_IS_PPC64(arch))
|
|
|
|
tpm->model = VIR_DOMAIN_TPM_MODEL_SPAPR;
|
|
|
|
else
|
|
|
|
tpm->model = VIR_DOMAIN_TPM_MODEL_TIS;
|
|
|
|
}
|
|
|
|
|
2020-02-05 18:35:45 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-19 16:33:52 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDeviceDefPostParse(virDomainDeviceDefPtr dev,
|
maint: avoid 'const fooPtr' in domain_conf
'const fooPtr' is the same as 'foo * const' (the pointer won't
change, but it's contents can). But in general, if an interface
is trying to be const-correct, it should be using 'const foo *'
(the pointer is to data that can't be changed).
Fix up offenders in src/conf/domain_conf, and their fallout.
Several things to note: virObjectLock() requires a non-const
argument; if this were C++, we could treat the locking field
as 'mutable' and allow locking an otherwise 'const' object, but
that is a more invasive change, so I instead dropped attempts
to be const-correct on domain lookup. virXMLPropString and
friends require a non-const xmlNodePtr - this is because libxml2
is not a const-correct library. We could make the src/util/virxml
wrappers cast away const, but I figured it was easier to not
try to mark xmlNodePtr as const. Finally, virDomainDeviceDefCopy
was a rather hard conversion - it calls virDomainDeviceDefPostParse,
which in turn in the xen driver was actually modifying the domain
outside of the current device being visited. We should not be
adding a device on the first per-device callback, but waiting until
after all per-device callbacks are complete.
* src/conf/domain_conf.h (virDomainObjListFindByID)
(virDomainObjListFindByUUID, virDomainObjListFindByName)
(virDomainObjAssignDef, virDomainObjListAdd): Drop attempt at
const.
(virDomainDeviceDefCopy): Use intended type.
(virDomainDeviceDefParse, virDomainDeviceDefPostParseCallback)
(virDomainVideoDefaultType, virDomainVideoDefaultRAM)
(virDomainChrGetDomainPtrs): Make const-correct.
* src/conf/domain_conf.c (virDomainObjListFindByID)
(virDomainObjListFindByUUID, virDomainObjListFindByName)
(virDomainDeviceDefCopy, virDomainObjListAdd)
(virDomainObjAssignDef, virDomainHostdevSubsysUsbDefParseXML)
(virDomainHostdevSubsysPciOrigStatesDefParseXML)
(virDomainHostdevSubsysPciDefParseXML)
(virDomainHostdevSubsysScsiDefParseXML)
(virDomainControllerModelTypeFromString)
(virDomainTPMDefParseXML, virDomainTimerDefParseXML)
(virDomainSoundCodecDefParseXML, virDomainSoundDefParseXML)
(virDomainWatchdogDefParseXML, virDomainRNGDefParseXML)
(virDomainMemballoonDefParseXML, virDomainNVRAMDefParseXML)
(virSysinfoParseXML, virDomainVideoAccelDefParseXML)
(virDomainVideoDefParseXML, virDomainHostdevDefParseXML)
(virDomainRedirdevDefParseXML)
(virDomainRedirFilterUsbDevDefParseXML)
(virDomainRedirFilterDefParseXML, virDomainIdMapEntrySort)
(virDomainIdmapDefParseXML, virDomainVcpuPinDefParseXML)
(virDiskNameToBusDeviceIndex, virDomainDeviceDefCopy)
(virDomainVideoDefaultType, virDomainHostdevAssignAddress)
(virDomainDeviceDefPostParseInternal, virDomainDeviceDefPostParse)
(virDomainChrGetDomainPtrs, virDomainControllerSCSINextUnit)
(virDomainSCSIDriveAddressIsUsed)
(virDomainDriveAddressIsUsedByDisk)
(virDomainDriveAddressIsUsedByHostdev): Fix fallout.
* src/openvz/openvz_driver.c (openvzDomainDeviceDefPostParse):
Likewise.
* src/libxl/libxl_domain.c (libxlDomainDeviceDefPostParse):
Likewise.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainDefaultNetModel): Likewise.
* src/lxc/lxc_domain.c (virLXCDomainDeviceDefPostParse):
Likewise.
* src/uml/uml_driver.c (umlDomainDeviceDefPostParse): Likewise.
* src/xen/xen_driver.c (xenDomainDeviceDefPostParse): Split...
(xenDomainDefPostParse): ...since per-device callback is not the
time to be adding a device.
Signed-off-by: Eric Blake <eblake@redhat.com>
2013-10-08 15:08:25 +00:00
|
|
|
const virDomainDef *def,
|
2016-01-08 12:59:20 +00:00
|
|
|
unsigned int parseFlags,
|
2016-09-23 08:53:38 +00:00
|
|
|
void *opaque,
|
2016-09-16 14:40:22 +00:00
|
|
|
void *parseOpaque)
|
2013-02-19 16:33:52 +00:00
|
|
|
{
|
2013-03-11 09:24:29 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2017-08-16 14:00:25 +00:00
|
|
|
/* Note that qemuCaps may be NULL when this function is called. This
|
|
|
|
* function shall not fail in that case. It will be re-run on VM startup
|
|
|
|
* with the capabilities populated. */
|
2016-09-16 14:40:22 +00:00
|
|
|
virQEMUCapsPtr qemuCaps = parseOpaque;
|
2015-09-07 06:51:00 +00:00
|
|
|
int ret = -1;
|
2013-03-11 09:24:29 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
switch ((virDomainDeviceType) dev->type) {
|
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
|
|
|
ret = qemuDomainDeviceNetDefPostParse(dev->data.net, def, qemuCaps);
|
|
|
|
break;
|
2013-03-11 09:24:29 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
2018-05-24 16:24:13 +00:00
|
|
|
ret = qemuDomainDeviceDiskDefPostParse(dev->data.disk, qemuCaps,
|
2018-06-04 07:00:27 +00:00
|
|
|
parseFlags);
|
2018-05-24 15:18:50 +00:00
|
|
|
break;
|
2013-03-11 09:24:29 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_VIDEO:
|
2019-11-25 10:54:25 +00:00
|
|
|
ret = qemuDomainDeviceVideoDefPostParse(dev->data.video, def, qemuCaps);
|
2018-05-24 15:18:50 +00:00
|
|
|
break;
|
2014-11-20 18:52:00 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_PANIC:
|
|
|
|
ret = qemuDomainDevicePanicDefPostParse(dev->data.panic, def);
|
|
|
|
break;
|
2015-11-24 12:26:33 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_CONTROLLER:
|
|
|
|
ret = qemuDomainControllerDefPostParse(dev->data.controller, def,
|
|
|
|
qemuCaps, parseFlags);
|
|
|
|
break;
|
2016-03-04 19:35:20 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_SHMEM:
|
|
|
|
ret = qemuDomainShmemDefPostParse(dev->data.shmem);
|
|
|
|
break;
|
2016-08-03 15:34:51 +00:00
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_CHR:
|
|
|
|
ret = qemuDomainChrDefPostParse(dev->data.chr, def, driver, parseFlags);
|
|
|
|
break;
|
2017-11-08 14:40:42 +00:00
|
|
|
|
2018-05-22 09:21:15 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_VSOCK:
|
|
|
|
ret = qemuDomainVsockDefPostParse(dev->data.vsock);
|
|
|
|
break;
|
|
|
|
|
2018-05-24 14:04:26 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_HOSTDEV:
|
2020-06-17 07:29:13 +00:00
|
|
|
ret = qemuDomainHostdevDefPostParse(dev->data.hostdev, qemuCaps, parseFlags);
|
2018-05-24 14:04:26 +00:00
|
|
|
break;
|
|
|
|
|
2020-02-05 18:35:45 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_TPM:
|
2020-02-05 18:35:46 +00:00
|
|
|
ret = qemuDomainTPMDefPostParse(dev->data.tpm, def->os.arch);
|
2020-02-05 18:35:45 +00:00
|
|
|
break;
|
|
|
|
|
2018-05-24 15:18:50 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_LEASE:
|
|
|
|
case VIR_DOMAIN_DEVICE_FS:
|
|
|
|
case VIR_DOMAIN_DEVICE_INPUT:
|
|
|
|
case VIR_DOMAIN_DEVICE_SOUND:
|
|
|
|
case VIR_DOMAIN_DEVICE_WATCHDOG:
|
|
|
|
case VIR_DOMAIN_DEVICE_GRAPHICS:
|
|
|
|
case VIR_DOMAIN_DEVICE_HUB:
|
|
|
|
case VIR_DOMAIN_DEVICE_REDIRDEV:
|
|
|
|
case VIR_DOMAIN_DEVICE_SMARTCARD:
|
|
|
|
case VIR_DOMAIN_DEVICE_MEMBALLOON:
|
|
|
|
case VIR_DOMAIN_DEVICE_NVRAM:
|
|
|
|
case VIR_DOMAIN_DEVICE_RNG:
|
|
|
|
case VIR_DOMAIN_DEVICE_MEMORY:
|
|
|
|
case VIR_DOMAIN_DEVICE_IOMMU:
|
2020-07-18 06:23:57 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_AUDIO:
|
2018-05-24 15:18:50 +00:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_NONE:
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("unexpected VIR_DOMAIN_DEVICE_NONE"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virDomainDeviceType, dev->type);
|
|
|
|
break;
|
|
|
|
}
|
2013-03-11 09:24:29 +00:00
|
|
|
|
|
|
|
return ret;
|
2013-02-19 16:33:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-14 18:52:45 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefAssignAddresses(virDomainDef *def,
|
2019-10-14 12:45:33 +00:00
|
|
|
unsigned int parseFlags G_GNUC_UNUSED,
|
2016-09-23 09:04:39 +00:00
|
|
|
void *opaque,
|
2016-09-17 05:05:03 +00:00
|
|
|
void *parseOpaque)
|
2016-05-14 18:52:45 +00:00
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
2017-08-16 14:00:25 +00:00
|
|
|
/* Note that qemuCaps may be NULL when this function is called. This
|
|
|
|
* function shall not fail in that case. It will be re-run on VM startup
|
|
|
|
* with the capabilities populated. */
|
2016-09-17 05:05:03 +00:00
|
|
|
virQEMUCapsPtr qemuCaps = parseOpaque;
|
2016-06-16 15:15:53 +00:00
|
|
|
bool newDomain = parseFlags & VIR_DOMAIN_DEF_PARSE_ABI_UPDATE;
|
2016-05-14 18:52:45 +00:00
|
|
|
|
2017-08-16 14:00:25 +00:00
|
|
|
/* Skip address assignment if @qemuCaps is not present. In such case devices
|
|
|
|
* which are automatically added may be missing. Additionally @qemuCaps should
|
|
|
|
* only be missing when reloading configs, thus addresses were already
|
|
|
|
* assigned. */
|
|
|
|
if (!qemuCaps)
|
|
|
|
return 1;
|
|
|
|
|
2017-08-15 13:25:23 +00:00
|
|
|
return qemuDomainAssignAddresses(def, qemuCaps, driver, NULL, newDomain);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainPostParseDataAlloc(const virDomainDef *def,
|
2019-10-14 12:45:33 +00:00
|
|
|
unsigned int parseFlags G_GNUC_UNUSED,
|
2017-08-15 13:25:23 +00:00
|
|
|
void *opaque,
|
|
|
|
void **parseOpaque)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = opaque;
|
|
|
|
|
|
|
|
if (!(*parseOpaque = virQEMUCapsCacheLookup(driver->qemuCapsCache,
|
2020-07-15 14:53:11 +00:00
|
|
|
def->emulator)))
|
2017-08-16 14:00:25 +00:00
|
|
|
return 1;
|
2016-05-14 18:52:45 +00:00
|
|
|
|
2017-08-15 13:25:23 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainPostParseDataFree(void *parseOpaque)
|
|
|
|
{
|
|
|
|
virQEMUCapsPtr qemuCaps = parseOpaque;
|
2016-05-14 18:52:45 +00:00
|
|
|
|
|
|
|
virObjectUnref(qemuCaps);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-19 16:33:52 +00:00
|
|
|
virDomainDefParserConfig virQEMUDriverDomainDefParserConfig = {
|
2017-08-15 13:16:20 +00:00
|
|
|
.domainPostParseBasicCallback = qemuDomainDefPostParseBasic,
|
2017-08-15 13:25:23 +00:00
|
|
|
.domainPostParseDataAlloc = qemuDomainPostParseDataAlloc,
|
|
|
|
.domainPostParseDataFree = qemuDomainPostParseDataFree,
|
2013-02-19 16:33:52 +00:00
|
|
|
.devicesPostParseCallback = qemuDomainDeviceDefPostParse,
|
2013-03-11 11:12:08 +00:00
|
|
|
.domainPostParseCallback = qemuDomainDefPostParse,
|
2016-05-14 18:52:45 +00:00
|
|
|
.assignAddressesCallback = qemuDomainDefAssignAddresses,
|
2020-03-26 21:31:12 +00:00
|
|
|
.domainValidateCallback = qemuValidateDomainDef,
|
2020-03-26 21:31:25 +00:00
|
|
|
.deviceValidateCallback = qemuValidateDomainDeviceDef,
|
2016-06-21 15:59:37 +00:00
|
|
|
|
2016-02-12 13:57:45 +00:00
|
|
|
.features = VIR_DOMAIN_DEF_FEATURE_MEMORY_HOTPLUG |
|
2016-08-04 12:36:24 +00:00
|
|
|
VIR_DOMAIN_DEF_FEATURE_OFFLINE_VCPUPIN |
|
2017-10-19 06:39:40 +00:00
|
|
|
VIR_DOMAIN_DEF_FEATURE_INDIVIDUAL_VCPUS |
|
2019-02-25 13:15:28 +00:00
|
|
|
VIR_DOMAIN_DEF_FEATURE_USER_ALIAS |
|
2019-01-18 20:57:32 +00:00
|
|
|
VIR_DOMAIN_DEF_FEATURE_FW_AUTOSELECT |
|
|
|
|
VIR_DOMAIN_DEF_FEATURE_NET_MODEL_STRING,
|
2013-02-19 16:33:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2020-06-24 10:45:23 +00:00
|
|
|
void
|
2019-06-27 13:40:39 +00:00
|
|
|
qemuDomainObjSaveStatus(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr obj)
|
2011-06-06 08:34:33 +00:00
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2013-01-10 21:03:14 +00:00
|
|
|
|
|
|
|
if (virDomainObjIsActive(obj)) {
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(obj, driver->xmlopt, cfg->stateDir) < 0)
|
2013-01-10 21:03:14 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", obj->def->name);
|
2011-06-06 08:28:38 +00:00
|
|
|
}
|
2011-06-06 08:34:33 +00:00
|
|
|
}
|
|
|
|
|
2019-06-27 13:40:39 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
qemuDomainSaveStatus(virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
qemuDomainObjSaveStatus(QEMU_DOMAIN_PRIVATE(obj)->driver, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-04 16:51:36 +00:00
|
|
|
void
|
|
|
|
qemuDomainSaveConfig(virDomainObjPtr obj)
|
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = QEMU_DOMAIN_PRIVATE(obj)->driver;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = NULL;
|
2019-07-04 16:51:36 +00:00
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(obj))
|
|
|
|
def = obj->newDef;
|
|
|
|
else
|
|
|
|
def = obj->def;
|
|
|
|
|
|
|
|
if (!def)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainDefSave(def, driver->xmlopt, cfg->configDir) < 0)
|
2019-07-04 16:51:36 +00:00
|
|
|
VIR_WARN("Failed to save config of vm %s", obj->def->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
/*
|
|
|
|
* obj must be locked before calling
|
|
|
|
*
|
|
|
|
* To be called immediately before any QEMU monitor API call
|
2020-01-10 23:32:14 +00:00
|
|
|
* Must have already called qemuDomainObjBeginJob() and checked
|
|
|
|
* that the VM is still active; may not be used for nested async
|
|
|
|
* jobs.
|
2013-02-06 18:17:20 +00:00
|
|
|
*
|
|
|
|
* To be followed with qemuDomainObjExitMonitor() once complete
|
|
|
|
*/
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
|
2014-05-13 12:45:31 +00:00
|
|
|
int ret;
|
|
|
|
if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0)
|
|
|
|
return ret;
|
2011-06-30 09:23:50 +00:00
|
|
|
if (!virDomainObjIsActive(obj)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("domain is no longer running"));
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuDomainObjEndJob(driver, obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2012-04-10 14:39:33 +00:00
|
|
|
} else if (priv->job.asyncOwner == virThreadSelfID()) {
|
|
|
|
VIR_WARN("This thread seems to be the async job owner; entering"
|
|
|
|
" monitor without asking for a nested job is dangerous");
|
2019-11-07 13:05:43 +00:00
|
|
|
} else if (priv->job.owner != virThreadSelfID()) {
|
|
|
|
VIR_WARN("Entering a monitor without owning a job. "
|
|
|
|
"Job %s owner %s (%llu)",
|
|
|
|
qemuDomainJobTypeToString(priv->job.active),
|
|
|
|
priv->job.ownerAPI, priv->job.owner);
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
|
|
|
|
2013-10-31 11:27:10 +00:00
|
|
|
VIR_DEBUG("Entering monitor (mon=%p vm=%p name=%s)",
|
|
|
|
priv->mon, obj, obj->def->name);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(priv->mon);
|
2012-07-11 13:35:47 +00:00
|
|
|
virObjectRef(priv->mon);
|
2011-11-29 12:33:23 +00:00
|
|
|
ignore_value(virTimeMillisNow(&priv->monStart));
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(obj);
|
2011-06-30 09:23:50 +00:00
|
|
|
|
|
|
|
return 0;
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
static void ATTRIBUTE_NONNULL(1)
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver,
|
2011-07-03 21:55:47 +00:00
|
|
|
virDomainObjPtr obj)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2012-07-11 13:35:47 +00:00
|
|
|
bool hasRefs;
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2020-05-15 15:36:00 +00:00
|
|
|
qemuMonitorWatchDispose();
|
|
|
|
virObjectUnref(priv->mon);
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2020-05-15 15:36:00 +00:00
|
|
|
hasRefs = !qemuMonitorWasDisposed();
|
2012-07-11 13:35:47 +00:00
|
|
|
if (hasRefs)
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(priv->mon);
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(obj);
|
2013-10-31 11:27:10 +00:00
|
|
|
VIR_DEBUG("Exited monitor (mon=%p vm=%p name=%s)",
|
|
|
|
priv->mon, obj, obj->def->name);
|
2010-12-16 16:12:02 +00:00
|
|
|
|
2011-05-31 16:34:20 +00:00
|
|
|
priv->monStart = 0;
|
2012-07-11 13:35:47 +00:00
|
|
|
if (!hasRefs)
|
2010-12-16 16:12:02 +00:00
|
|
|
priv->mon = NULL;
|
2011-06-30 09:23:50 +00:00
|
|
|
|
2016-01-28 12:48:17 +00:00
|
|
|
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
|
|
|
|
qemuDomainObjEndJob(driver, obj);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj)
|
2011-07-03 21:55:47 +00:00
|
|
|
{
|
2013-02-06 18:17:20 +00:00
|
|
|
ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE));
|
2011-07-03 21:55:47 +00:00
|
|
|
}
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
/* obj must NOT be locked before calling
|
2011-07-03 21:55:47 +00:00
|
|
|
*
|
|
|
|
* Should be paired with an earlier qemuDomainObjEnterMonitor() call
|
2014-12-12 15:57:21 +00:00
|
|
|
*
|
|
|
|
* Returns -1 if the domain is no longer alive after exiting the monitor.
|
|
|
|
* In that case, the caller should be careful when using obj's data,
|
|
|
|
* e.g. the live definition in vm->def has been freed by qemuProcessStop
|
|
|
|
* and replaced by the persistent definition, so pointers stolen
|
|
|
|
* from the live definition could no longer be valid.
|
2011-07-03 21:55:47 +00:00
|
|
|
*/
|
2014-12-12 15:57:21 +00:00
|
|
|
int qemuDomainObjExitMonitor(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr obj)
|
2011-07-03 21:55:47 +00:00
|
|
|
{
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjExitMonitorInternal(driver, obj);
|
2014-12-12 15:57:21 +00:00
|
|
|
if (!virDomainObjIsActive(obj)) {
|
2018-05-05 12:04:21 +00:00
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
2015-03-19 03:14:39 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("domain is no longer running"));
|
2014-12-12 15:57:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
2011-07-03 21:55:47 +00:00
|
|
|
}
|
2010-12-16 16:12:02 +00:00
|
|
|
|
|
|
|
/*
|
2013-02-06 18:17:20 +00:00
|
|
|
* obj must be locked before calling
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
*
|
|
|
|
* To be called immediately before any QEMU monitor API call.
|
2013-02-06 18:17:20 +00:00
|
|
|
* Must have already either called qemuDomainObjBeginJob()
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
* and checked that the VM is still active, with asyncJob of
|
|
|
|
* QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
|
|
|
|
* with the same asyncJob.
|
|
|
|
*
|
|
|
|
* Returns 0 if job was started, in which case this must be followed with
|
2014-05-13 12:45:31 +00:00
|
|
|
* qemuDomainObjExitMonitor(); -2 if waiting for the nested job times out;
|
|
|
|
* or -1 if the job could not be started (probably because the vm exited
|
|
|
|
* in the meantime).
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
*/
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
virDomainObjPtr obj,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
2013-02-06 18:17:20 +00:00
|
|
|
return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
/*
|
|
|
|
* obj must be locked before calling
|
|
|
|
*
|
|
|
|
* To be called immediately before any QEMU agent API call.
|
2020-01-10 23:32:14 +00:00
|
|
|
* Must have already called qemuDomainObjBeginAgentJob() and
|
|
|
|
* checked that the VM is still active.
|
2013-02-06 18:17:20 +00:00
|
|
|
*
|
|
|
|
* To be followed with qemuDomainObjExitAgent() once complete
|
|
|
|
*/
|
2016-11-14 14:24:23 +00:00
|
|
|
qemuAgentPtr
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterAgent(virDomainObjPtr obj)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2016-11-14 14:24:23 +00:00
|
|
|
qemuAgentPtr agent = priv->agent;
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2013-10-31 11:27:10 +00:00
|
|
|
VIR_DEBUG("Entering agent (agent=%p vm=%p name=%s)",
|
|
|
|
priv->agent, obj, obj->def->name);
|
2016-11-14 14:24:23 +00:00
|
|
|
|
|
|
|
virObjectLock(agent);
|
|
|
|
virObjectRef(agent);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(obj);
|
2016-11-14 14:24:23 +00:00
|
|
|
|
|
|
|
return agent;
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
|
|
|
|
/* obj must NOT be locked before calling
|
|
|
|
*
|
|
|
|
* Should be paired with an earlier qemuDomainObjEnterAgent() call
|
|
|
|
*/
|
|
|
|
void
|
2016-11-14 14:24:23 +00:00
|
|
|
qemuDomainObjExitAgent(virDomainObjPtr obj, qemuAgentPtr agent)
|
2011-10-05 17:31:54 +00:00
|
|
|
{
|
2016-11-14 14:24:23 +00:00
|
|
|
virObjectUnlock(agent);
|
|
|
|
virObjectUnref(agent);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(obj);
|
2011-10-05 17:31:54 +00:00
|
|
|
|
2016-11-14 14:24:23 +00:00
|
|
|
VIR_DEBUG("Exited agent (agent=%p vm=%p name=%s)",
|
|
|
|
agent, obj, obj->def->name);
|
2011-10-05 17:31:54 +00:00
|
|
|
}
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
void qemuDomainObjEnterRemote(virDomainObjPtr obj)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
2013-10-31 11:27:10 +00:00
|
|
|
VIR_DEBUG("Entering remote (vm=%p name=%s)",
|
|
|
|
obj, obj->def->name);
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectUnlock(obj);
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
|
|
|
|
2018-06-28 09:38:52 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainObjExitRemote(virDomainObjPtr obj,
|
|
|
|
bool checkActive)
|
2010-12-16 16:12:02 +00:00
|
|
|
{
|
2013-01-09 21:00:32 +00:00
|
|
|
virObjectLock(obj);
|
2013-10-31 11:27:10 +00:00
|
|
|
VIR_DEBUG("Exited remote (vm=%p name=%s)",
|
|
|
|
obj, obj->def->name);
|
2018-06-28 09:38:52 +00:00
|
|
|
|
|
|
|
if (checkActive && !virDomainObjIsActive(obj)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("domain '%s' is not running"),
|
|
|
|
obj->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2010-12-16 16:12:02 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
|
2017-06-14 11:42:16 +00:00
|
|
|
static virDomainDefPtr
|
|
|
|
qemuDomainDefFromXML(virQEMUDriverPtr driver,
|
2019-08-02 15:36:56 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2017-06-14 11:42:16 +00:00
|
|
|
const char *xml)
|
|
|
|
{
|
|
|
|
virDomainDefPtr def;
|
|
|
|
|
2019-11-27 12:29:21 +00:00
|
|
|
def = virDomainDefParseString(xml, driver->xmlopt, qemuCaps,
|
2017-06-14 11:42:16 +00:00
|
|
|
VIR_DOMAIN_DEF_PARSE_INACTIVE |
|
|
|
|
VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE);
|
|
|
|
|
|
|
|
return def;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-11 13:03:17 +00:00
|
|
|
virDomainDefPtr
|
|
|
|
qemuDomainDefCopy(virQEMUDriverPtr driver,
|
2019-08-02 15:36:56 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2013-06-11 13:03:17 +00:00
|
|
|
virDomainDefPtr src,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *xml = NULL;
|
2013-06-11 13:03:17 +00:00
|
|
|
|
2019-08-05 14:05:20 +00:00
|
|
|
if (!(xml = qemuDomainDefFormatXML(driver, qemuCaps, src, flags)))
|
2017-06-14 11:42:16 +00:00
|
|
|
return NULL;
|
2013-06-11 13:03:17 +00:00
|
|
|
|
2020-05-04 15:03:42 +00:00
|
|
|
return qemuDomainDefFromXML(driver, qemuCaps, xml);
|
2013-06-11 13:03:17 +00:00
|
|
|
}
|
|
|
|
|
2017-05-19 13:20:31 +00:00
|
|
|
|
2019-10-18 12:33:00 +00:00
|
|
|
int
|
|
|
|
qemuDomainMakeCPUMigratable(virCPUDefPtr cpu)
|
|
|
|
{
|
|
|
|
if (cpu->mode == VIR_CPU_MODE_CUSTOM &&
|
|
|
|
STREQ_NULLABLE(cpu->model, "Icelake-Server")) {
|
|
|
|
/* Originally Icelake-Server CPU model contained pconfig CPU feature.
|
|
|
|
* It was never actually enabled and thus it was removed. To enable
|
|
|
|
* migration to QEMU 3.1.0 (with both new and old libvirt), we
|
|
|
|
* explicitly disable pconfig in migration XML (otherwise old libvirt
|
|
|
|
* would think it was implicitly enabled on the source). New libvirt
|
|
|
|
* will drop it from the XML before starting the domain on new QEMU.
|
|
|
|
*/
|
|
|
|
if (virCPUDefUpdateFeature(cpu, "pconfig", VIR_CPU_FEATURE_DISABLE) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-05-19 13:20:31 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefFormatBufInternal(virQEMUDriverPtr driver,
|
2019-08-05 14:05:20 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2017-05-19 13:20:31 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
virCPUDefPtr origCPU,
|
|
|
|
unsigned int flags,
|
|
|
|
virBuffer *buf)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2020-11-12 20:27:04 +00:00
|
|
|
g_autoptr(virDomainDef) copy = NULL;
|
2013-02-01 17:04:15 +00:00
|
|
|
|
2019-02-14 20:25:01 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_XML_COMMON_FLAGS | VIR_DOMAIN_XML_UPDATE_CPU, -1);
|
|
|
|
|
2016-07-08 14:44:02 +00:00
|
|
|
if (!(flags & (VIR_DOMAIN_XML_UPDATE_CPU | VIR_DOMAIN_XML_MIGRATABLE)))
|
|
|
|
goto format;
|
|
|
|
|
2019-11-27 12:41:59 +00:00
|
|
|
if (!(copy = virDomainDefCopy(def, driver->xmlopt, qemuCaps,
|
2016-07-08 14:44:02 +00:00
|
|
|
flags & VIR_DOMAIN_XML_MIGRATABLE)))
|
2020-11-12 20:37:31 +00:00
|
|
|
return -1;
|
2016-07-08 14:44:02 +00:00
|
|
|
|
|
|
|
def = copy;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Update guest CPU requirements according to host CPU */
|
2011-12-19 14:41:16 +00:00
|
|
|
if ((flags & VIR_DOMAIN_XML_UPDATE_CPU) &&
|
2016-07-08 14:44:02 +00:00
|
|
|
def->cpu &&
|
2016-06-22 13:53:48 +00:00
|
|
|
(def->cpu->mode != VIR_CPU_MODE_CUSTOM ||
|
|
|
|
def->cpu->model)) {
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virQEMUCaps) qCaps = NULL;
|
2019-08-05 14:05:20 +00:00
|
|
|
|
|
|
|
if (qemuCaps) {
|
|
|
|
qCaps = virObjectRef(qemuCaps);
|
|
|
|
} else {
|
|
|
|
if (!(qCaps = virQEMUCapsCacheLookupCopy(driver->qemuCapsCache,
|
2019-10-24 06:52:21 +00:00
|
|
|
def->virtType,
|
2019-08-05 14:05:20 +00:00
|
|
|
def->emulator,
|
|
|
|
def->os.machine)))
|
2020-11-12 20:37:31 +00:00
|
|
|
return -1;
|
2019-08-05 14:05:20 +00:00
|
|
|
}
|
2017-06-30 15:05:22 +00:00
|
|
|
|
|
|
|
if (virCPUUpdate(def->os.arch, def->cpu,
|
2019-08-05 14:05:20 +00:00
|
|
|
virQEMUCapsGetHostModel(qCaps, def->virtType,
|
2017-06-30 15:05:22 +00:00
|
|
|
VIR_QEMU_CAPS_HOST_CPU_MIGRATABLE)) < 0)
|
2020-11-12 20:37:31 +00:00
|
|
|
return -1;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2012-10-08 09:58:05 +00:00
|
|
|
if ((flags & VIR_DOMAIN_XML_MIGRATABLE)) {
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2013-04-25 17:22:39 +00:00
|
|
|
int toremove = 0;
|
2013-04-22 12:16:13 +00:00
|
|
|
virDomainControllerDefPtr usb = NULL, pci = NULL;
|
2012-05-04 19:23:17 +00:00
|
|
|
|
|
|
|
/* If only the default USB controller is present, we can remove it
|
|
|
|
* and make the XML compatible with older versions of libvirt which
|
|
|
|
* didn't support USB controllers in the XML but always added the
|
|
|
|
* default one to qemu anyway.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < def->ncontrollers; i++) {
|
|
|
|
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_USB) {
|
|
|
|
if (usb) {
|
|
|
|
usb = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
usb = def->controllers[i];
|
|
|
|
}
|
|
|
|
}
|
2016-08-12 15:10:40 +00:00
|
|
|
|
|
|
|
/* In order to maintain compatibility with version of libvirt that
|
|
|
|
* didn't support <controller type='usb'/> (<= 0.9.4), we need to
|
|
|
|
* drop the default USB controller, ie. a USB controller at index
|
|
|
|
* zero with no model or with the default piix3-ohci model.
|
|
|
|
*
|
|
|
|
* However, we only need to do so for x86 i440fx machine types,
|
|
|
|
* because other architectures and machine types were introduced
|
|
|
|
* when libvirt already supported <controller type='usb'/>.
|
2016-04-29 14:01:51 +00:00
|
|
|
*/
|
2018-11-28 16:08:49 +00:00
|
|
|
if (qemuDomainIsI440FX(def) &&
|
2016-08-12 15:10:40 +00:00
|
|
|
usb && usb->idx == 0 &&
|
2018-02-14 10:51:26 +00:00
|
|
|
(usb->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_DEFAULT ||
|
2018-03-25 18:54:07 +00:00
|
|
|
usb->model == VIR_DOMAIN_CONTROLLER_MODEL_USB_PIIX3_UHCI) &&
|
|
|
|
!virDomainDeviceAliasIsUserAlias(usb->info.alias)) {
|
2012-05-04 19:23:17 +00:00
|
|
|
VIR_DEBUG("Removing default USB controller from domain '%s'"
|
|
|
|
" for migration compatibility", def->name);
|
2013-04-25 17:22:39 +00:00
|
|
|
toremove++;
|
2013-04-22 12:16:13 +00:00
|
|
|
} else {
|
|
|
|
usb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the default PCI controller if there is only one present
|
|
|
|
* and its model is pci-root */
|
|
|
|
for (i = 0; i < def->ncontrollers; i++) {
|
|
|
|
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_PCI) {
|
|
|
|
if (pci) {
|
|
|
|
pci = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pci = def->controllers[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pci && pci->idx == 0 &&
|
2018-03-25 18:54:07 +00:00
|
|
|
pci->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT &&
|
|
|
|
!virDomainDeviceAliasIsUserAlias(pci->info.alias) &&
|
|
|
|
!pci->opts.pciopts.pcihole64) {
|
2013-07-10 19:19:32 +00:00
|
|
|
VIR_DEBUG("Removing default pci-root from domain '%s'"
|
2013-04-22 12:16:13 +00:00
|
|
|
" for migration compatibility", def->name);
|
2013-04-25 17:22:39 +00:00
|
|
|
toremove++;
|
2013-04-22 12:16:13 +00:00
|
|
|
} else {
|
|
|
|
pci = NULL;
|
|
|
|
}
|
|
|
|
|
2013-04-25 17:22:39 +00:00
|
|
|
if (toremove) {
|
2016-07-08 14:44:02 +00:00
|
|
|
virDomainControllerDefPtr *controllers = def->controllers;
|
|
|
|
int ncontrollers = def->ncontrollers;
|
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
def->controllers = g_new0(virDomainControllerDefPtr, ncontrollers - toremove);
|
2012-05-04 19:23:17 +00:00
|
|
|
def->ncontrollers = 0;
|
2020-10-05 10:26:10 +00:00
|
|
|
|
2012-05-04 19:23:17 +00:00
|
|
|
for (i = 0; i < ncontrollers; i++) {
|
2013-04-22 12:16:13 +00:00
|
|
|
if (controllers[i] != usb && controllers[i] != pci)
|
2012-05-04 19:23:17 +00:00
|
|
|
def->controllers[def->ncontrollers++] = controllers[i];
|
|
|
|
}
|
2016-07-08 14:44:02 +00:00
|
|
|
|
|
|
|
VIR_FREE(controllers);
|
|
|
|
virDomainControllerDefFree(pci);
|
|
|
|
virDomainControllerDefFree(usb);
|
2012-05-04 19:23:17 +00:00
|
|
|
}
|
2013-04-22 12:16:13 +00:00
|
|
|
|
2016-07-28 13:15:49 +00:00
|
|
|
/* Remove the panic device for selected models if present */
|
|
|
|
for (i = 0; i < def->npanics; i++) {
|
|
|
|
if (def->panics[i]->model == VIR_DOMAIN_PANIC_MODEL_S390 ||
|
|
|
|
def->panics[i]->model == VIR_DOMAIN_PANIC_MODEL_PSERIES) {
|
|
|
|
VIR_DELETE_ELEMENT(def->panics, i, def->npanics);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-09 18:33:47 +00:00
|
|
|
for (i = 0; i < def->nchannels; i++)
|
|
|
|
qemuDomainChrDefDropDefaultPath(def->channels[i], driver);
|
2017-05-19 13:20:31 +00:00
|
|
|
|
2017-11-08 14:31:21 +00:00
|
|
|
for (i = 0; i < def->nserials; i++) {
|
|
|
|
virDomainChrDefPtr serial = def->serials[i];
|
|
|
|
|
|
|
|
/* Historically, the native console type for some machine types
|
|
|
|
* was not set at all, which means it defaulted to ISA even
|
|
|
|
* though that was not even remotely accurate. To ensure migration
|
|
|
|
* towards older libvirt versions works for such guests, we switch
|
|
|
|
* it back to the default here */
|
|
|
|
if (flags & VIR_DOMAIN_XML_MIGRATABLE) {
|
2018-04-25 12:42:34 +00:00
|
|
|
switch ((virDomainChrSerialTargetType)serial->targetType) {
|
2017-11-08 14:31:21 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SPAPR_VIO:
|
2017-11-09 16:14:57 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SYSTEM:
|
2017-11-08 14:31:21 +00:00
|
|
|
serial->targetType = VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_NONE;
|
|
|
|
serial->targetModel = VIR_DOMAIN_CHR_SERIAL_TARGET_MODEL_NONE;
|
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_ISA:
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_PCI:
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_USB:
|
2017-11-14 15:27:04 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_SCLP:
|
2017-11-08 14:31:21 +00:00
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_NONE:
|
|
|
|
case VIR_DOMAIN_CHR_SERIAL_TARGET_TYPE_LAST:
|
|
|
|
/* Nothing to do */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-19 13:20:31 +00:00
|
|
|
/* Replace the CPU definition updated according to QEMU with the one
|
|
|
|
* used for starting the domain. The updated def will be sent
|
|
|
|
* separately for backward compatibility.
|
|
|
|
*/
|
|
|
|
if (origCPU) {
|
|
|
|
virCPUDefFree(def->cpu);
|
|
|
|
if (!(def->cpu = virCPUDefCopy(origCPU)))
|
2020-11-12 20:37:31 +00:00
|
|
|
return -1;
|
2017-05-19 13:20:31 +00:00
|
|
|
}
|
2019-10-18 12:33:00 +00:00
|
|
|
|
2019-11-18 12:46:14 +00:00
|
|
|
if (def->cpu && qemuDomainMakeCPUMigratable(def->cpu) < 0)
|
2020-11-12 20:37:31 +00:00
|
|
|
return -1;
|
2012-05-04 19:23:17 +00:00
|
|
|
}
|
|
|
|
|
2016-07-08 14:44:02 +00:00
|
|
|
format:
|
2020-11-12 20:37:31 +00:00
|
|
|
return virDomainDefFormatInternal(def, driver->xmlopt, buf,
|
|
|
|
virDomainDefFormatConvertXMLFlags(flags));
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2017-05-19 13:20:31 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainDefFormatBuf(virQEMUDriverPtr driver,
|
2019-08-05 14:05:20 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2017-05-19 13:20:31 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
unsigned int flags,
|
|
|
|
virBufferPtr buf)
|
|
|
|
{
|
2019-08-05 14:05:20 +00:00
|
|
|
return qemuDomainDefFormatBufInternal(driver, qemuCaps, def, NULL, flags, buf);
|
2017-05-19 13:20:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static char *
|
|
|
|
qemuDomainDefFormatXMLInternal(virQEMUDriverPtr driver,
|
2019-08-05 14:05:20 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2017-05-19 13:20:31 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
virCPUDefPtr origCPU,
|
|
|
|
unsigned int flags)
|
2012-05-04 19:00:13 +00:00
|
|
|
{
|
2020-07-02 22:26:41 +00:00
|
|
|
g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
2012-05-04 19:00:13 +00:00
|
|
|
|
2019-08-05 14:05:20 +00:00
|
|
|
if (qemuDomainDefFormatBufInternal(driver, qemuCaps, def, origCPU, flags, &buf) < 0)
|
2012-05-04 19:00:13 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return virBufferContentAndReset(&buf);
|
|
|
|
}
|
|
|
|
|
2017-05-19 13:20:31 +00:00
|
|
|
|
|
|
|
char *
|
|
|
|
qemuDomainDefFormatXML(virQEMUDriverPtr driver,
|
2019-08-05 14:05:20 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2017-05-19 13:20:31 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2019-08-05 14:05:20 +00:00
|
|
|
return qemuDomainDefFormatXMLInternal(driver, qemuCaps, def, NULL, flags);
|
2017-05-19 13:20:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
char *qemuDomainFormatXML(virQEMUDriverPtr driver,
|
2011-05-27 10:30:26 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-10-08 09:58:05 +00:00
|
|
|
unsigned int flags)
|
2011-05-27 10:30:26 +00:00
|
|
|
{
|
|
|
|
virDomainDefPtr def;
|
2017-05-19 13:20:31 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCPUDefPtr origCPU = NULL;
|
2011-05-27 10:30:26 +00:00
|
|
|
|
2016-06-22 13:53:48 +00:00
|
|
|
if ((flags & VIR_DOMAIN_XML_INACTIVE) && vm->newDef) {
|
2011-05-27 10:30:26 +00:00
|
|
|
def = vm->newDef;
|
2016-06-22 13:53:48 +00:00
|
|
|
} else {
|
2011-05-27 10:30:26 +00:00
|
|
|
def = vm->def;
|
2017-05-19 13:20:31 +00:00
|
|
|
origCPU = priv->origCPU;
|
2016-06-22 13:53:48 +00:00
|
|
|
}
|
2011-05-27 10:30:26 +00:00
|
|
|
|
2019-08-05 14:05:20 +00:00
|
|
|
return qemuDomainDefFormatXMLInternal(driver, priv->qemuCaps, def, origCPU, flags);
|
2011-05-27 10:30:26 +00:00
|
|
|
}
|
|
|
|
|
2012-03-09 15:42:46 +00:00
|
|
|
char *
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainDefFormatLive(virQEMUDriverPtr driver,
|
2019-08-05 14:05:20 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2012-03-09 15:42:46 +00:00
|
|
|
virDomainDefPtr def,
|
2017-05-19 13:20:31 +00:00
|
|
|
virCPUDefPtr origCPU,
|
2012-05-04 19:23:17 +00:00
|
|
|
bool inactive,
|
|
|
|
bool compatible)
|
2012-03-09 15:42:46 +00:00
|
|
|
{
|
|
|
|
unsigned int flags = QEMU_DOMAIN_FORMAT_LIVE_FLAGS;
|
|
|
|
|
|
|
|
if (inactive)
|
|
|
|
flags |= VIR_DOMAIN_XML_INACTIVE;
|
2012-10-08 09:58:05 +00:00
|
|
|
if (compatible)
|
|
|
|
flags |= VIR_DOMAIN_XML_MIGRATABLE;
|
2012-03-09 15:42:46 +00:00
|
|
|
|
2019-08-05 14:05:20 +00:00
|
|
|
return qemuDomainDefFormatXMLInternal(driver, qemuCaps, def, origCPU, flags);
|
2012-03-09 15:42:46 +00:00
|
|
|
}
|
|
|
|
|
2011-05-27 10:30:26 +00:00
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjTaint(virQEMUDriverPtr driver,
|
2011-05-04 10:59:20 +00:00
|
|
|
virDomainObjPtr obj,
|
2014-06-01 00:22:30 +00:00
|
|
|
virDomainTaintFlags taint,
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
2011-05-05 11:48:07 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *timestamp = NULL;
|
2017-01-20 08:46:02 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
2019-09-05 13:35:35 +00:00
|
|
|
int rc;
|
2011-05-05 11:48:07 +00:00
|
|
|
|
2017-01-20 08:46:02 +00:00
|
|
|
if (!virDomainObjTaint(obj, taint))
|
|
|
|
return;
|
|
|
|
|
|
|
|
virUUIDFormat(obj->def->uuid, uuidstr);
|
|
|
|
|
|
|
|
VIR_WARN("Domain id=%d name='%s' uuid=%s is tainted: %s",
|
|
|
|
obj->def->id,
|
|
|
|
obj->def->name,
|
|
|
|
uuidstr,
|
|
|
|
virDomainTaintTypeToString(taint));
|
|
|
|
|
|
|
|
/* We don't care about errors logging taint info, so
|
|
|
|
* preserve original error, and clear any error that
|
|
|
|
* is raised */
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2015-11-12 12:56:30 +00:00
|
|
|
|
2017-01-20 08:46:02 +00:00
|
|
|
if (!(timestamp = virTimeStringNow()))
|
|
|
|
goto cleanup;
|
|
|
|
|
2019-09-05 13:35:35 +00:00
|
|
|
if (logCtxt) {
|
|
|
|
rc = qemuDomainLogContextWrite(logCtxt,
|
|
|
|
"%s: Domain id=%d is tainted: %s\n",
|
|
|
|
timestamp,
|
|
|
|
obj->def->id,
|
|
|
|
virDomainTaintTypeToString(taint));
|
|
|
|
} else {
|
|
|
|
rc = qemuDomainLogAppendMessage(driver, obj,
|
|
|
|
"%s: Domain id=%d is tainted: %s\n",
|
|
|
|
timestamp,
|
|
|
|
obj->def->id,
|
|
|
|
virDomainTaintTypeToString(taint));
|
2017-01-20 08:46:02 +00:00
|
|
|
}
|
|
|
|
|
2019-09-05 13:35:35 +00:00
|
|
|
if (rc < 0)
|
2017-01-20 08:46:02 +00:00
|
|
|
virResetLastError();
|
|
|
|
|
|
|
|
cleanup:
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjCheckTaint(virQEMUDriverPtr driver,
|
2011-05-05 11:48:07 +00:00
|
|
|
virDomainObjPtr obj,
|
2020-10-04 17:51:27 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt,
|
|
|
|
bool incomingMigration)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2014-02-04 15:42:13 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = obj->privateData;
|
2019-06-17 15:55:52 +00:00
|
|
|
bool custom_hypervisor_feat = false;
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2020-03-31 15:42:43 +00:00
|
|
|
if (driver->privileged &&
|
2019-11-28 14:27:54 +00:00
|
|
|
(cfg->user == 0 ||
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->group == 0))
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2014-02-04 15:42:13 +00:00
|
|
|
if (priv->hookRun)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOOK, logCtxt);
|
2014-02-04 15:42:13 +00:00
|
|
|
|
2011-05-04 10:59:20 +00:00
|
|
|
if (obj->def->namespaceData) {
|
2019-06-14 12:18:37 +00:00
|
|
|
qemuDomainXmlNsDefPtr qemuxmlns = obj->def->namespaceData;
|
|
|
|
if (qemuxmlns->num_args || qemuxmlns->num_env)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CUSTOM_ARGV, logCtxt);
|
2019-06-17 15:55:52 +00:00
|
|
|
if (qemuxmlns->ncapsadd > 0 || qemuxmlns->ncapsdel > 0)
|
|
|
|
custom_hypervisor_feat = true;
|
|
|
|
}
|
|
|
|
|
2019-06-18 08:04:32 +00:00
|
|
|
if (custom_hypervisor_feat ||
|
|
|
|
(cfg->capabilityfilters && *cfg->capabilityfilters)) {
|
2019-06-17 15:55:52 +00:00
|
|
|
qemuDomainObjTaint(driver, obj,
|
|
|
|
VIR_DOMAIN_TAINT_CUSTOM_HYPERVISOR_FEATURE, logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
2020-10-04 17:51:27 +00:00
|
|
|
if (obj->def->cpu &&
|
|
|
|
obj->def->cpu->mode == VIR_CPU_MODE_HOST_PASSTHROUGH &&
|
|
|
|
incomingMigration)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOST_CPU, logCtxt);
|
2011-08-18 10:56:56 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < obj->def->ndisks; i++)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjCheckDiskTaint(driver, obj, obj->def->disks[i], logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
|
2014-09-09 22:51:02 +00:00
|
|
|
for (i = 0; i < obj->def->nhostdevs; i++)
|
|
|
|
qemuDomainObjCheckHostdevTaint(driver, obj, obj->def->hostdevs[i],
|
2015-11-12 12:56:30 +00:00
|
|
|
logCtxt);
|
2014-09-09 22:51:02 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < obj->def->nnets; i++)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjCheckNetTaint(driver, obj, obj->def->nets[i], logCtxt);
|
2013-01-10 21:03:14 +00:00
|
|
|
|
2015-06-26 08:59:33 +00:00
|
|
|
if (obj->def->os.dtb)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CUSTOM_DTB, logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjCheckDiskTaint(virQEMUDriverPtr driver,
|
2011-05-04 10:59:20 +00:00
|
|
|
virDomainObjPtr obj,
|
2011-05-05 11:48:07 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
2014-09-18 17:54:18 +00:00
|
|
|
if (disk->rawio == VIR_TRISTATE_BOOL_YES)
|
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES,
|
2015-11-12 12:56:30 +00:00
|
|
|
logCtxt);
|
2013-01-10 21:03:14 +00:00
|
|
|
|
2015-05-12 19:58:31 +00:00
|
|
|
if (disk->device == VIR_DOMAIN_DISK_DEVICE_CDROM &&
|
|
|
|
virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_BLOCK &&
|
2018-05-07 15:19:27 +00:00
|
|
|
disk->src->path && virFileIsCDROM(disk->src->path) == 1)
|
2015-05-12 19:58:31 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CDROM_PASSTHROUGH,
|
2015-11-12 12:56:30 +00:00
|
|
|
logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-09-09 22:51:02 +00:00
|
|
|
void qemuDomainObjCheckHostdevTaint(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr obj,
|
|
|
|
virDomainHostdevDefPtr hostdev,
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2014-09-09 22:51:02 +00:00
|
|
|
{
|
2016-11-15 18:25:41 +00:00
|
|
|
if (!virHostdevIsSCSIDevice(hostdev))
|
2016-11-15 18:25:39 +00:00
|
|
|
return;
|
2014-09-09 22:51:02 +00:00
|
|
|
|
2016-11-15 18:25:39 +00:00
|
|
|
if (hostdev->source.subsys.u.scsi.rawio == VIR_TRISTATE_BOOL_YES)
|
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logCtxt);
|
2014-09-09 22:51:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
void qemuDomainObjCheckNetTaint(virQEMUDriverPtr driver,
|
2011-05-04 10:59:20 +00:00
|
|
|
virDomainObjPtr obj,
|
2011-05-05 11:48:07 +00:00
|
|
|
virDomainNetDefPtr net,
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainLogContextPtr logCtxt)
|
2011-05-04 10:59:20 +00:00
|
|
|
{
|
config: report error when script given for inappropriate interface type
This fixes https://bugzilla.redhat.com/show_bug.cgi?id=638633
Although scripts are not used by interfaces of type other than
"ethernet" in qemu, due to the fact that the parser stores the script
name in a union that is only valid when type is ethernet or bridge,
there is no way for anyone except the parser itself to catch the
problem of specifying an interface script for an inappropriate
interface type (by the time the parsed data gets back to the code that
called the parser, all evidence that a script was specified is
forgotten).
Since the parser itself should be agnostic to which type of interface
allows scripts (an example of why: a script specified for an interface
of type bridge is valid for xen domains, but not for qemu domains),
the solution here is to move the script out of the union(s) in the
DomainNetDef, always populate it when specified (regardless of
interface type), and let the driver decide whether or not it is
appropriate.
Currently the qemu, xen, libxml, and uml drivers recognize the script
parameter and do something with it (the uml driver only to report that
it isn't supported). Those drivers have been updated to log a
CONFIG_UNSUPPORTED error when a script is specified for an interface
type that's inappropriate for that particular hypervisor.
(NB: There was earlier discussion of solving this problem by adding a
VALIDATE flag to all libvirt APIs that accept XML, which would cause
the XML to be validated against the RNG files. One statement during
that discussion was that the RNG shouldn't contain hypervisor-specific
things, though, and a proper solution to this problem would require
that (again, because a script for an interface of type "bridge" is
accepted by xen, but not by qemu).
2012-01-06 17:59:47 +00:00
|
|
|
/* script is only useful for NET_TYPE_ETHERNET (qemu) and
|
|
|
|
* NET_TYPE_BRIDGE (xen), but could be (incorrectly) specified for
|
|
|
|
* any interface type. In any case, it's adding user sauce into
|
|
|
|
* the soup, so it should taint the domain.
|
|
|
|
*/
|
|
|
|
if (net->script != NULL)
|
2015-11-12 12:56:30 +00:00
|
|
|
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_SHELL_SCRIPTS, logCtxt);
|
2011-05-04 10:59:20 +00:00
|
|
|
}
|
2011-05-05 11:38:04 +00:00
|
|
|
|
|
|
|
|
2015-11-12 12:43:29 +00:00
|
|
|
qemuDomainLogContextPtr qemuDomainLogContextNew(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainLogContextMode mode)
|
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2020-03-16 12:10:24 +00:00
|
|
|
qemuDomainLogContextPtr ctxt = QEMU_DOMAIN_LOG_CONTEXT(g_object_new(QEMU_TYPE_DOMAIN_LOG_CONTEXT, NULL));
|
2015-11-12 12:43:29 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
VIR_DEBUG("Context new %p stdioLogD=%d", ctxt, cfg->stdioLogD);
|
2015-11-12 12:43:29 +00:00
|
|
|
ctxt->writefd = -1;
|
|
|
|
ctxt->readfd = -1;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
ctxt->path = g_strdup_printf("%s/%s.log", cfg->logDir, vm->def->name);
|
2015-12-03 17:20:35 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
if (cfg->stdioLogD) {
|
2020-03-31 15:42:43 +00:00
|
|
|
ctxt->manager = virLogManagerNew(driver->privileged);
|
2015-11-03 11:13:25 +00:00
|
|
|
if (!ctxt->manager)
|
|
|
|
goto error;
|
2015-11-12 12:43:29 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
ctxt->writefd = virLogManagerDomainOpenLogFile(ctxt->manager,
|
|
|
|
"qemu",
|
|
|
|
vm->def->uuid,
|
|
|
|
vm->def->name,
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path,
|
2015-11-03 11:13:25 +00:00
|
|
|
0,
|
|
|
|
&ctxt->inode,
|
|
|
|
&ctxt->pos);
|
|
|
|
if (ctxt->writefd < 0)
|
|
|
|
goto error;
|
|
|
|
} else {
|
2015-12-03 17:20:35 +00:00
|
|
|
if ((ctxt->writefd = open(ctxt->path, O_WRONLY | O_CREAT | O_APPEND, S_IRUSR | S_IWUSR)) < 0) {
|
2015-11-03 11:13:25 +00:00
|
|
|
virReportSystemError(errno, _("failed to create logfile %s"),
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path);
|
2015-11-12 12:43:29 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2015-11-03 11:13:25 +00:00
|
|
|
if (virSetCloseExec(ctxt->writefd) < 0) {
|
2015-11-12 12:43:29 +00:00
|
|
|
virReportSystemError(errno, _("failed to set close-on-exec flag on %s"),
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path);
|
2015-11-12 12:43:29 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
/* For unprivileged startup we must truncate the file since
|
|
|
|
* we can't rely on logrotate. We don't use O_TRUNC since
|
|
|
|
* it is better for SELinux policy if we truncate afterwards */
|
|
|
|
if (mode == QEMU_DOMAIN_LOG_CONTEXT_MODE_START &&
|
2020-03-31 15:42:43 +00:00
|
|
|
!driver->privileged &&
|
2015-11-03 11:13:25 +00:00
|
|
|
ftruncate(ctxt->writefd, 0) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to truncate %s"),
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path);
|
2015-11-03 11:13:25 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mode == QEMU_DOMAIN_LOG_CONTEXT_MODE_START) {
|
2020-10-23 17:39:03 +00:00
|
|
|
if ((ctxt->readfd = open(ctxt->path, O_RDONLY)) < 0) {
|
2015-11-03 11:13:25 +00:00
|
|
|
virReportSystemError(errno, _("failed to open logfile %s"),
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path);
|
2015-11-03 11:13:25 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (virSetCloseExec(ctxt->readfd) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to set close-on-exec flag on %s"),
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path);
|
2015-11-03 11:13:25 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-12 12:43:29 +00:00
|
|
|
if ((ctxt->pos = lseek(ctxt->writefd, 0, SEEK_END)) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to seek in log file %s"),
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path);
|
2015-11-12 12:43:29 +00:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ctxt;
|
|
|
|
|
|
|
|
error:
|
2020-03-16 12:10:24 +00:00
|
|
|
g_clear_object(&ctxt);
|
2020-01-09 18:33:46 +00:00
|
|
|
return NULL;
|
2015-11-12 12:43:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int qemuDomainLogContextWrite(qemuDomainLogContextPtr ctxt,
|
|
|
|
const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list argptr;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *message = NULL;
|
2015-11-12 12:43:29 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
va_start(argptr, fmt);
|
|
|
|
|
2019-10-22 12:11:15 +00:00
|
|
|
message = g_strdup_vprintf(fmt, argptr);
|
2015-11-03 11:13:25 +00:00
|
|
|
if (!ctxt->manager &&
|
|
|
|
lseek(ctxt->writefd, 0, SEEK_END) < 0) {
|
2015-11-12 12:43:29 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2015-11-03 11:13:25 +00:00
|
|
|
_("Unable to seek to end of domain logfile"));
|
2015-11-12 12:43:29 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (safewrite(ctxt->writefd, message, strlen(message)) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to write to domain logfile"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
va_end(argptr);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ssize_t qemuDomainLogContextRead(qemuDomainLogContextPtr ctxt,
|
|
|
|
char **msg)
|
|
|
|
{
|
2020-08-03 15:28:06 +00:00
|
|
|
char *buf;
|
|
|
|
size_t buflen;
|
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
VIR_DEBUG("Context read %p manager=%p inode=%llu pos=%llu",
|
|
|
|
ctxt, ctxt->manager,
|
|
|
|
(unsigned long long)ctxt->inode,
|
|
|
|
(unsigned long long)ctxt->pos);
|
2020-08-03 15:28:06 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
if (ctxt->manager) {
|
|
|
|
buf = virLogManagerDomainReadLogFile(ctxt->manager,
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path,
|
2015-11-03 11:13:25 +00:00
|
|
|
ctxt->inode,
|
|
|
|
ctxt->pos,
|
|
|
|
1024 * 128,
|
|
|
|
0);
|
|
|
|
if (!buf)
|
|
|
|
return -1;
|
|
|
|
buflen = strlen(buf);
|
|
|
|
} else {
|
|
|
|
ssize_t got;
|
2015-11-12 12:43:29 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
buflen = 1024 * 128;
|
2015-11-12 12:43:29 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
/* Best effort jump to start of messages */
|
|
|
|
ignore_value(lseek(ctxt->readfd, ctxt->pos, SEEK_SET));
|
2015-11-12 12:43:29 +00:00
|
|
|
|
2020-10-05 10:26:10 +00:00
|
|
|
buf = g_new0(char, buflen);
|
2015-11-12 12:43:29 +00:00
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
got = saferead(ctxt->readfd, buf, buflen - 1);
|
|
|
|
if (got < 0) {
|
|
|
|
VIR_FREE(buf);
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to read from log file"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf[got] = '\0';
|
|
|
|
|
2020-09-11 11:42:12 +00:00
|
|
|
buf = g_renew(char, buf, got + 1);
|
2015-11-03 11:13:25 +00:00
|
|
|
buflen = got;
|
|
|
|
}
|
2015-11-12 12:43:29 +00:00
|
|
|
|
|
|
|
*msg = buf;
|
|
|
|
|
2015-11-03 11:13:25 +00:00
|
|
|
return buflen;
|
2015-11-12 12:43:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-07 14:19:03 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainLogAppendMessage:
|
|
|
|
*
|
|
|
|
* This is a best-effort attempt to add a log message to the qemu log file
|
|
|
|
* either by using virtlogd or the legacy approach */
|
|
|
|
int
|
|
|
|
qemuDomainLogAppendMessage(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *fmt,
|
|
|
|
...)
|
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-06-07 14:19:03 +00:00
|
|
|
virLogManagerPtr manager = NULL;
|
|
|
|
va_list ap;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *path = NULL;
|
2016-06-07 14:19:03 +00:00
|
|
|
int writefd = -1;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *message = NULL;
|
2016-06-07 14:19:03 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
2019-10-22 12:11:15 +00:00
|
|
|
message = g_strdup_vprintf(fmt, ap);
|
2016-06-07 14:19:03 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Append log message (vm='%s' message='%s) stdioLogD=%d",
|
|
|
|
vm->def->name, message, cfg->stdioLogD);
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
path = g_strdup_printf("%s/%s.log", cfg->logDir, vm->def->name);
|
2016-06-07 14:19:03 +00:00
|
|
|
|
|
|
|
if (cfg->stdioLogD) {
|
2020-03-31 15:42:43 +00:00
|
|
|
if (!(manager = virLogManagerNew(driver->privileged)))
|
2016-06-07 14:19:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virLogManagerDomainAppendMessage(manager, "qemu", vm->def->uuid,
|
|
|
|
vm->def->name, path, message, 0) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
if ((writefd = open(path, O_WRONLY | O_CREAT | O_APPEND, S_IRUSR | S_IWUSR)) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to create logfile %s"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (safewrite(writefd, message, strlen(message)) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
va_end(ap);
|
|
|
|
VIR_FORCE_CLOSE(writefd);
|
|
|
|
virLogManagerFree(manager);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-12 12:43:29 +00:00
|
|
|
int qemuDomainLogContextGetWriteFD(qemuDomainLogContextPtr ctxt)
|
|
|
|
{
|
|
|
|
return ctxt->writefd;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void qemuDomainLogContextMarkPosition(qemuDomainLogContextPtr ctxt)
|
|
|
|
{
|
2015-11-03 11:13:25 +00:00
|
|
|
if (ctxt->manager)
|
|
|
|
virLogManagerDomainGetLogFilePosition(ctxt->manager,
|
2015-12-03 17:20:35 +00:00
|
|
|
ctxt->path,
|
2015-11-03 11:13:25 +00:00
|
|
|
0,
|
|
|
|
&ctxt->inode,
|
|
|
|
&ctxt->pos);
|
|
|
|
else
|
|
|
|
ctxt->pos = lseek(ctxt->writefd, 0, SEEK_END);
|
2015-11-12 12:43:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-23 13:05:09 +00:00
|
|
|
virLogManagerPtr qemuDomainLogContextGetManager(qemuDomainLogContextPtr ctxt)
|
|
|
|
{
|
|
|
|
return ctxt->manager;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* Locate an appropriate 'qemu-img' binary. */
|
|
|
|
const char *
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuFindQemuImgBinary(virQEMUDriverPtr driver)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
2013-01-23 12:19:15 +00:00
|
|
|
if (!driver->qemuImgBinary)
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
2016-04-15 21:00:40 +00:00
|
|
|
"%s", _("unable to find qemu-img"));
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
return driver->qemuImgBinary;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
|
2019-03-22 04:45:25 +00:00
|
|
|
virDomainMomentObjPtr snapshot,
|
2017-06-01 22:27:33 +00:00
|
|
|
virDomainXMLOptionPtr xmlopt,
|
2019-03-05 02:25:33 +00:00
|
|
|
const char *snapshotDir)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *newxml = NULL;
|
|
|
|
g_autofree char *snapDir = NULL;
|
|
|
|
g_autofree char *snapFile = NULL;
|
2011-09-21 19:08:51 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
snapshot: Drop virDomainSnapshotDef.current
The only use for the 'current' member of virDomainSnapshotDef was with
the PARSE/FORMAT_INTERNAL flag for controlling an internal-use
<active> element marking whether a particular snapshot definition was
current, and even then, only by the qemu driver on output, and by qemu
and test driver on input. But this duplicates vm->snapshot_current,
and gets in the way of potential simplifications to have qemu store a
single file for all snapshots rather than one file per snapshot. Get
rid of the member by adding a bool* parameter during parse (ignored if
the PARSE_INTERNAL flag is not set), and by adding a new flag during
format (if FORMAT_INTERNAL is set, the value printed in <active>
depends on the new FORMAT_CURRENT).
Then update the qemu driver accordingly, which involves hoisting
assignments to vm->current_snapshot to occur prior to any point where
a snapshot XML file is written (although qemu kept
vm->current_snapshot and snapshot->def_current in sync by the end of
the function, they were not always identical in the middle of
functions, so the shuffling gets a bit interesting). Later patches
will clean up some of that confusing churn to vm->current_snapshot.
Note: even if later patches refactor qemu to no longer use
FORMAT_INTERNAL for output (by storing bulk snapshot XML instead), we
will always need PARSE_INTERNAL for input (because on upgrade, a new
libvirt still has to parse XML left from a previous libvirt).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Ferlan <jferlan@redhat.com>
2019-03-19 03:56:19 +00:00
|
|
|
unsigned int flags = VIR_DOMAIN_SNAPSHOT_FORMAT_SECURE |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_FORMAT_INTERNAL;
|
2019-03-18 21:13:50 +00:00
|
|
|
virDomainSnapshotDefPtr def = virDomainSnapshotObjGetDef(snapshot);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2019-03-21 20:00:08 +00:00
|
|
|
if (virDomainSnapshotGetCurrent(vm->snapshots) == snapshot)
|
snapshot: Drop virDomainSnapshotDef.current
The only use for the 'current' member of virDomainSnapshotDef was with
the PARSE/FORMAT_INTERNAL flag for controlling an internal-use
<active> element marking whether a particular snapshot definition was
current, and even then, only by the qemu driver on output, and by qemu
and test driver on input. But this duplicates vm->snapshot_current,
and gets in the way of potential simplifications to have qemu store a
single file for all snapshots rather than one file per snapshot. Get
rid of the member by adding a bool* parameter during parse (ignored if
the PARSE_INTERNAL flag is not set), and by adding a new flag during
format (if FORMAT_INTERNAL is set, the value printed in <active>
depends on the new FORMAT_CURRENT).
Then update the qemu driver accordingly, which involves hoisting
assignments to vm->current_snapshot to occur prior to any point where
a snapshot XML file is written (although qemu kept
vm->current_snapshot and snapshot->def_current in sync by the end of
the function, they were not always identical in the middle of
functions, so the shuffling gets a bit interesting). Later patches
will clean up some of that confusing churn to vm->current_snapshot.
Note: even if later patches refactor qemu to no longer use
FORMAT_INTERNAL for output (by storing bulk snapshot XML instead), we
will always need PARSE_INTERNAL for input (because on upgrade, a new
libvirt still has to parse XML left from a previous libvirt).
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Ferlan <jferlan@redhat.com>
2019-03-19 03:56:19 +00:00
|
|
|
flags |= VIR_DOMAIN_SNAPSHOT_FORMAT_CURRENT;
|
2011-09-21 19:08:51 +00:00
|
|
|
virUUIDFormat(vm->def->uuid, uuidstr);
|
2019-11-27 13:10:21 +00:00
|
|
|
newxml = virDomainSnapshotDefFormat(uuidstr, def, xmlopt, flags);
|
2012-03-09 15:42:46 +00:00
|
|
|
if (newxml == NULL)
|
2011-09-21 19:08:51 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
snapDir = g_strdup_printf("%s/%s", snapshotDir, vm->def->name);
|
2011-09-21 19:08:51 +00:00
|
|
|
if (virFileMakePath(snapDir) < 0) {
|
|
|
|
virReportSystemError(errno, _("cannot create snapshot directory '%s'"),
|
|
|
|
snapDir);
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
snapFile = g_strdup_printf("%s/%s.xml", snapDir, def->parent.name);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return virXMLSaveFile(snapFile, NULL, "snapshot-edit", newxml);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
|
2019-04-10 15:42:11 +00:00
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* The domain is expected to be locked and inactive. Return -1 on normal
|
|
|
|
* failure, 1 if we skipped a disk due to try_all. */
|
2012-03-17 15:54:44 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotForEachQcow2Raw(virQEMUDriverPtr driver,
|
2012-03-17 15:54:44 +00:00
|
|
|
virDomainDefPtr def,
|
|
|
|
const char *name,
|
|
|
|
const char *op,
|
|
|
|
bool try_all,
|
|
|
|
int ndisks)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
2020-04-22 14:25:24 +00:00
|
|
|
const char *qemuimgbin;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-09-21 19:08:51 +00:00
|
|
|
bool skipped = false;
|
|
|
|
|
2020-04-22 14:25:24 +00:00
|
|
|
qemuimgbin = qemuFindQemuImgBinary(driver);
|
|
|
|
if (qemuimgbin == NULL) {
|
2011-09-21 19:08:51 +00:00
|
|
|
/* qemuFindQemuImgBinary set the error */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-03-17 15:54:44 +00:00
|
|
|
for (i = 0; i < ndisks; i++) {
|
2020-04-22 14:25:24 +00:00
|
|
|
g_autoptr(virCommand) cmd = virCommandNewArgList(qemuimgbin, "snapshot",
|
|
|
|
op, name, NULL);
|
2020-11-23 09:22:30 +00:00
|
|
|
int format = virDomainDiskGetFormat(def->disks[i]);
|
2020-04-22 14:25:24 +00:00
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* FIXME: we also need to handle LVM here */
|
2020-11-23 09:22:30 +00:00
|
|
|
if (def->disks[i]->device != VIR_DOMAIN_DISK_DEVICE_DISK)
|
|
|
|
continue;
|
|
|
|
|
2020-11-23 09:30:31 +00:00
|
|
|
if (!virStorageSourceIsLocalStorage(def->disks[i]->src)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("can't manipulate inactive snapshots of disk '%s'"),
|
|
|
|
def->disks[i]->dst);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-23 09:22:30 +00:00
|
|
|
if (format > 0 && format != VIR_STORAGE_FILE_QCOW2) {
|
|
|
|
if (try_all) {
|
|
|
|
/* Continue on even in the face of error, since other
|
|
|
|
* disks in this VM may have the same snapshot name.
|
|
|
|
*/
|
|
|
|
VIR_WARN("skipping snapshot action on %s",
|
|
|
|
def->disks[i]->dst);
|
|
|
|
skipped = true;
|
|
|
|
continue;
|
|
|
|
} else if (STREQ(op, "-c") && i) {
|
|
|
|
/* We must roll back partial creation by deleting
|
|
|
|
* all earlier snapshots. */
|
|
|
|
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
|
|
|
|
"-d", false, i);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
2020-11-23 09:22:30 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("Disk device '%s' does not support snapshotting"),
|
|
|
|
def->disks[i]->dst);
|
|
|
|
return -1;
|
|
|
|
}
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2020-11-23 09:22:30 +00:00
|
|
|
virCommandAddArg(cmd, virDomainDiskGetSource(def->disks[i]));
|
|
|
|
|
|
|
|
if (virCommandRun(cmd, NULL) < 0) {
|
|
|
|
if (try_all) {
|
|
|
|
VIR_WARN("skipping snapshot action on %s",
|
|
|
|
def->disks[i]->dst);
|
|
|
|
skipped = true;
|
|
|
|
continue;
|
|
|
|
} else if (STREQ(op, "-c") && i) {
|
|
|
|
/* We must roll back partial creation by deleting
|
|
|
|
* all earlier snapshots. */
|
|
|
|
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
|
|
|
|
"-d", false, i);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
2020-11-23 09:22:30 +00:00
|
|
|
return -1;
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return skipped ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
2012-03-17 15:54:44 +00:00
|
|
|
/* The domain is expected to be locked and inactive. Return -1 on normal
|
|
|
|
* failure, 1 if we skipped a disk due to try_all. */
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotForEachQcow2(virQEMUDriverPtr driver,
|
2020-11-23 10:13:19 +00:00
|
|
|
virDomainDefPtr def,
|
2019-03-22 04:45:25 +00:00
|
|
|
virDomainMomentObjPtr snap,
|
2012-03-17 15:54:44 +00:00
|
|
|
const char *op,
|
|
|
|
bool try_all)
|
|
|
|
{
|
2019-03-22 04:44:33 +00:00
|
|
|
return qemuDomainSnapshotForEachQcow2Raw(driver, def, snap->def->name,
|
2012-03-17 15:54:44 +00:00
|
|
|
op, try_all, def->ndisks);
|
|
|
|
}
|
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* Discard one snapshot (or its metadata), without reparenting any children. */
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotDiscard(virQEMUDriverPtr driver,
|
2011-09-21 19:08:51 +00:00
|
|
|
virDomainObjPtr vm,
|
2019-03-22 04:45:25 +00:00
|
|
|
virDomainMomentObjPtr snap,
|
2018-10-18 00:24:34 +00:00
|
|
|
bool update_parent,
|
2011-09-21 19:08:51 +00:00
|
|
|
bool metadata_only)
|
|
|
|
{
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *snapFile = NULL;
|
2011-09-21 19:08:51 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2019-03-22 04:45:25 +00:00
|
|
|
virDomainMomentObjPtr parentsnap = NULL;
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
if (!metadata_only) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
/* Ignore any skipped disks */
|
2020-11-23 10:13:19 +00:00
|
|
|
|
|
|
|
/* Prefer action on the disks in use at the time the snapshot was
|
|
|
|
* created; but fall back to current definition if dealing with a
|
|
|
|
* snapshot created prior to libvirt 0.9.5. */
|
|
|
|
virDomainDefPtr def = snap->def->dom;
|
|
|
|
|
|
|
|
if (!def)
|
|
|
|
def = vm->def;
|
|
|
|
|
|
|
|
return qemuDomainSnapshotForEachQcow2(driver, def, snap, "-d", true);
|
2011-09-21 19:08:51 +00:00
|
|
|
} else {
|
|
|
|
priv = vm->privateData;
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-09-21 19:08:51 +00:00
|
|
|
/* we continue on even in the face of error */
|
2019-03-22 04:44:33 +00:00
|
|
|
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
|
2014-12-16 09:40:58 +00:00
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
snapFile = g_strdup_printf("%s/%s/%s.xml", cfg->snapshotDir, vm->def->name,
|
|
|
|
snap->def->name);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2019-03-21 20:00:08 +00:00
|
|
|
if (snap == virDomainSnapshotGetCurrent(vm->snapshots)) {
|
|
|
|
virDomainSnapshotSetCurrent(vm->snapshots, NULL);
|
2019-05-08 16:39:13 +00:00
|
|
|
if (update_parent && snap->def->parent_name) {
|
snapshot: make virDomainSnapshotObjList opaque
We were failing to react to allocation failure when initializing
a snapshot object list. Changing things to store a pointer
instead of a complete object adds one more possible point of
allocation failure, but at the same time, will make it easier to
react to failure now, as well as making it easier for a future
patch to split all virDomainSnapshotPtr handling into a separate
file, as I continue to add even more snapshot code.
Luckily, there was only one client outside of domain_conf.c that
was actually peeking inside the object, and a new wrapper function
was easy.
* src/conf/domain_conf.h (_virDomainObj): Use a pointer.
(virDomainSnapshotObjListInit): Rename.
(virDomainSnapshotObjListFree, virDomainSnapshotForEach): New
declarations.
(_virDomainSnapshotObjList): Move definitions...
* src/conf/domain_conf.c: ...here.
(virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit):
Rename...
(virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to
these.
(virDomainSnapshotForEach): New function.
(virDomainObjDispose, virDomainListPopulate): Adjust callers.
* src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard)
(qemuDomainSnapshotDiscardAllMetadata): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotLoad)
(qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotListNames, qemuDomainSnapshotNum)
(qemuDomainListAllSnapshots)
(qemuDomainSnapshotListChildrenNames)
(qemuDomainSnapshotNumChildren)
(qemuDomainSnapshotListAllChildren)
(qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent)
(qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent)
(qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot)
(qemuDomainSnapshotDelete): Likewise.
* src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
|
|
|
parentsnap = virDomainSnapshotFindByName(vm->snapshots,
|
2019-05-08 16:39:13 +00:00
|
|
|
snap->def->parent_name);
|
2011-09-21 19:08:51 +00:00
|
|
|
if (!parentsnap) {
|
|
|
|
VIR_WARN("missing parent snapshot matching name '%s'",
|
2019-05-08 16:39:13 +00:00
|
|
|
snap->def->parent_name);
|
2011-09-21 19:08:51 +00:00
|
|
|
} else {
|
2019-03-21 20:00:08 +00:00
|
|
|
virDomainSnapshotSetCurrent(vm->snapshots, parentsnap);
|
2019-11-27 13:19:09 +00:00
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, parentsnap,
|
2017-06-01 22:27:33 +00:00
|
|
|
driver->xmlopt,
|
2013-01-10 21:03:14 +00:00
|
|
|
cfg->snapshotDir) < 0) {
|
2011-09-21 19:08:51 +00:00
|
|
|
VIR_WARN("failed to set parent snapshot '%s' as current",
|
2019-05-08 16:39:13 +00:00
|
|
|
snap->def->parent_name);
|
2019-03-21 20:00:08 +00:00
|
|
|
virDomainSnapshotSetCurrent(vm->snapshots, NULL);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlink(snapFile) < 0)
|
|
|
|
VIR_WARN("Failed to unlink %s", snapFile);
|
2018-10-18 00:24:34 +00:00
|
|
|
if (update_parent)
|
2019-03-22 04:45:25 +00:00
|
|
|
virDomainMomentDropParent(snap);
|
snapshot: make virDomainSnapshotObjList opaque
We were failing to react to allocation failure when initializing
a snapshot object list. Changing things to store a pointer
instead of a complete object adds one more possible point of
allocation failure, but at the same time, will make it easier to
react to failure now, as well as making it easier for a future
patch to split all virDomainSnapshotPtr handling into a separate
file, as I continue to add even more snapshot code.
Luckily, there was only one client outside of domain_conf.c that
was actually peeking inside the object, and a new wrapper function
was easy.
* src/conf/domain_conf.h (_virDomainObj): Use a pointer.
(virDomainSnapshotObjListInit): Rename.
(virDomainSnapshotObjListFree, virDomainSnapshotForEach): New
declarations.
(_virDomainSnapshotObjList): Move definitions...
* src/conf/domain_conf.c: ...here.
(virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit):
Rename...
(virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to
these.
(virDomainSnapshotForEach): New function.
(virDomainObjDispose, virDomainListPopulate): Adjust callers.
* src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard)
(qemuDomainSnapshotDiscardAllMetadata): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotLoad)
(qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotListNames, qemuDomainSnapshotNum)
(qemuDomainListAllSnapshots)
(qemuDomainSnapshotListChildrenNames)
(qemuDomainSnapshotNumChildren)
(qemuDomainSnapshotListAllChildren)
(qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent)
(qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent)
(qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot)
(qemuDomainSnapshotDelete): Likewise.
* src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
|
|
|
virDomainSnapshotObjListRemove(vm->snapshots, snap);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Hash iterator callback to discard multiple snapshots. */
|
2019-03-27 07:12:37 +00:00
|
|
|
int qemuDomainMomentDiscardAll(void *payload,
|
2020-10-21 11:31:16 +00:00
|
|
|
const char *name G_GNUC_UNUSED,
|
2019-03-27 07:12:37 +00:00
|
|
|
void *data)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
2019-03-27 07:12:37 +00:00
|
|
|
virDomainMomentObjPtr moment = payload;
|
|
|
|
virQEMUMomentRemovePtr curr = data;
|
2011-09-21 19:08:51 +00:00
|
|
|
int err;
|
|
|
|
|
2019-03-27 07:12:37 +00:00
|
|
|
if (!curr->found && curr->current == moment)
|
|
|
|
curr->found = true;
|
|
|
|
err = curr->momentDiscard(curr->driver, curr->vm, moment, false,
|
|
|
|
curr->metadata_only);
|
2011-09-21 19:08:51 +00:00
|
|
|
if (err && !curr->err)
|
|
|
|
curr->err = err;
|
2016-02-12 09:03:50 +00:00
|
|
|
return 0;
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
|
2011-09-21 19:08:51 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
2019-03-27 07:12:37 +00:00
|
|
|
virQEMUMomentRemove rem = {
|
|
|
|
.driver = driver,
|
|
|
|
.vm = vm,
|
2019-05-10 14:38:31 +00:00
|
|
|
.metadata_only = true,
|
|
|
|
.momentDiscard = qemuDomainSnapshotDiscard,
|
2019-03-27 07:12:37 +00:00
|
|
|
};
|
2011-09-21 19:08:51 +00:00
|
|
|
|
2019-03-27 07:12:37 +00:00
|
|
|
virDomainSnapshotForEach(vm->snapshots, qemuDomainMomentDiscardAll, &rem);
|
2019-03-17 03:38:33 +00:00
|
|
|
virDomainSnapshotObjListRemoveAll(vm->snapshots);
|
2011-09-21 19:08:51 +00:00
|
|
|
|
|
|
|
return rem.err;
|
|
|
|
}
|
|
|
|
|
2017-08-15 07:12:43 +00:00
|
|
|
|
2018-09-21 04:35:08 +00:00
|
|
|
static void
|
|
|
|
qemuDomainRemoveInactiveCommon(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
2011-09-21 19:08:51 +00:00
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *snapDir = NULL;
|
|
|
|
g_autofree char *chkDir = NULL;
|
2015-09-22 13:25:00 +00:00
|
|
|
|
2011-09-21 19:08:51 +00:00
|
|
|
/* Remove any snapshot metadata prior to removing the domain */
|
|
|
|
if (qemuDomainSnapshotDiscardAllMetadata(driver, vm) < 0) {
|
|
|
|
VIR_WARN("unable to remove all snapshots for domain %s",
|
|
|
|
vm->def->name);
|
2019-10-22 13:26:14 +00:00
|
|
|
} else {
|
|
|
|
snapDir = g_strdup_printf("%s/%s", cfg->snapshotDir, vm->def->name);
|
|
|
|
|
|
|
|
if (rmdir(snapDir) < 0 && errno != ENOENT)
|
|
|
|
VIR_WARN("unable to remove snapshot directory %s", snapDir);
|
2011-09-21 22:08:42 +00:00
|
|
|
}
|
2019-04-10 15:42:11 +00:00
|
|
|
/* Remove any checkpoint metadata prior to removing the domain */
|
2019-09-20 11:47:04 +00:00
|
|
|
if (qemuCheckpointDiscardAllMetadata(driver, vm) < 0) {
|
2019-04-10 15:42:11 +00:00
|
|
|
VIR_WARN("unable to remove all checkpoints for domain %s",
|
|
|
|
vm->def->name);
|
2019-10-22 13:26:14 +00:00
|
|
|
} else {
|
|
|
|
chkDir = g_strdup_printf("%s/%s", cfg->checkpointDir,
|
|
|
|
vm->def->name);
|
|
|
|
if (rmdir(chkDir) < 0 && errno != ENOENT)
|
|
|
|
VIR_WARN("unable to remove checkpoint directory %s", chkDir);
|
2019-04-10 15:42:11 +00:00
|
|
|
}
|
2017-04-04 16:22:31 +00:00
|
|
|
qemuExtDevicesCleanupHost(driver, vm->def);
|
2017-08-15 07:12:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-21 04:35:08 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainRemoveInactive:
|
|
|
|
*
|
|
|
|
* The caller must hold a lock to the vm.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainRemoveInactive(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
if (vm->persistent) {
|
|
|
|
/* Short-circuit, we don't want to remove a persistent domain */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainRemoveInactiveCommon(driver, vm);
|
|
|
|
|
|
|
|
virDomainObjListRemove(driver->domains, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-21 04:35:09 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainRemoveInactiveLocked:
|
|
|
|
*
|
|
|
|
* The caller must hold a lock to the vm and must hold the
|
|
|
|
* lock on driver->domains in order to call the remove obj
|
|
|
|
* from locked list method.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuDomainRemoveInactiveLocked(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
if (vm->persistent) {
|
|
|
|
/* Short-circuit, we don't want to remove a persistent domain */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainRemoveInactiveCommon(driver, vm);
|
|
|
|
|
|
|
|
virDomainObjListRemoveLocked(driver->domains, vm);
|
|
|
|
}
|
|
|
|
|
2017-08-15 07:12:43 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainRemoveInactiveJob:
|
|
|
|
*
|
|
|
|
* Just like qemuDomainRemoveInactive but it tries to grab a
|
|
|
|
* QEMU_JOB_MODIFY first. Even though it doesn't succeed in
|
|
|
|
* grabbing the job the control carries with
|
|
|
|
* qemuDomainRemoveInactive call.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
bool haveJob;
|
|
|
|
|
|
|
|
haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
|
|
|
|
|
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2014-10-30 13:38:35 +00:00
|
|
|
|
|
|
|
if (haveJob)
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuDomainObjEndJob(driver, vm);
|
2011-09-21 19:08:51 +00:00
|
|
|
}
|
2011-09-28 10:10:13 +00:00
|
|
|
|
2017-08-15 07:12:43 +00:00
|
|
|
|
2018-09-21 04:35:09 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainRemoveInactiveJobLocked:
|
|
|
|
*
|
|
|
|
* Similar to qemuDomainRemoveInactiveJob, except that the caller must
|
|
|
|
* also hold the lock @driver->domains
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainRemoveInactiveJobLocked(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
bool haveJob;
|
|
|
|
|
|
|
|
haveJob = qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) >= 0;
|
|
|
|
|
|
|
|
qemuDomainRemoveInactiveLocked(driver, vm);
|
|
|
|
|
|
|
|
if (haveJob)
|
|
|
|
qemuDomainObjEndJob(driver, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
|
2011-09-28 10:10:13 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
bool value)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2011-09-28 10:10:13 +00:00
|
|
|
|
|
|
|
if (priv->fakeReboot == value)
|
2020-01-09 18:33:45 +00:00
|
|
|
return;
|
2011-09-28 10:10:13 +00:00
|
|
|
|
|
|
|
priv->fakeReboot = value;
|
|
|
|
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
|
2011-09-28 10:10:13 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
}
|
2011-10-18 08:51:06 +00:00
|
|
|
|
2016-06-02 13:20:40 +00:00
|
|
|
static void
|
2013-08-07 07:11:15 +00:00
|
|
|
qemuDomainCheckRemoveOptionalDisk(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2014-05-15 11:11:12 +00:00
|
|
|
size_t diskIndex)
|
2013-08-07 07:11:15 +00:00
|
|
|
{
|
|
|
|
char uuid[VIR_UUID_STRING_BUFLEN];
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2014-05-15 11:11:12 +00:00
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[diskIndex];
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
const char *src = virDomainDiskGetSource(disk);
|
2013-08-07 07:11:15 +00:00
|
|
|
|
|
|
|
virUUIDFormat(vm->def->uuid, uuid);
|
|
|
|
|
|
|
|
VIR_DEBUG("Dropping disk '%s' on domain '%s' (UUID '%s') "
|
|
|
|
"due to inaccessible source '%s'",
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
disk->dst, vm->def->name, uuid, src);
|
2013-08-07 07:11:15 +00:00
|
|
|
|
|
|
|
if (disk->device == VIR_DOMAIN_DISK_DEVICE_CDROM ||
|
|
|
|
disk->device == VIR_DOMAIN_DISK_DEVICE_FLOPPY) {
|
|
|
|
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
event = virDomainEventDiskChangeNewFromObj(vm, src, NULL,
|
2013-08-07 07:11:15 +00:00
|
|
|
disk->info.alias,
|
|
|
|
VIR_DOMAIN_EVENT_DISK_CHANGE_MISSING_ON_START);
|
2017-03-31 13:59:54 +00:00
|
|
|
virDomainDiskEmptySource(disk);
|
2016-09-05 13:50:18 +00:00
|
|
|
/* keeping the old startup policy would be invalid for new images */
|
|
|
|
disk->startupPolicy = VIR_DOMAIN_STARTUP_POLICY_DEFAULT;
|
2013-08-07 07:11:15 +00:00
|
|
|
} else {
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
event = virDomainEventDiskChangeNewFromObj(vm, src, NULL,
|
2013-08-07 07:11:15 +00:00
|
|
|
disk->info.alias,
|
|
|
|
VIR_DOMAIN_EVENT_DISK_DROP_MISSING_ON_START);
|
2014-05-15 11:11:12 +00:00
|
|
|
virDomainDiskRemove(vm->def, diskIndex);
|
|
|
|
virDomainDiskDefFree(disk);
|
2013-08-07 07:11:15 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2013-08-07 07:11:15 +00:00
|
|
|
}
|
|
|
|
|
2017-10-03 10:51:47 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainCheckDiskStartupPolicy:
|
|
|
|
* @driver: qemu driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @disk: index of disk to check
|
|
|
|
* @cold_boot: true if a new VM is being started
|
|
|
|
*
|
|
|
|
* This function should be called when the source storage for a disk device is
|
|
|
|
* missing. The function checks whether the startup policy for the disk allows
|
|
|
|
* removal of the source (or disk) according to the state of the VM.
|
|
|
|
*
|
|
|
|
* The function returns 0 if the source or disk was dropped and -1 if the state
|
|
|
|
* of the VM does not allow this. This function does not report errors, but
|
|
|
|
* clears any reported error if 0 is returned.
|
|
|
|
*/
|
|
|
|
int
|
2013-07-26 12:37:21 +00:00
|
|
|
qemuDomainCheckDiskStartupPolicy(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2014-05-15 11:11:12 +00:00
|
|
|
size_t diskIndex,
|
2013-07-26 12:37:21 +00:00
|
|
|
bool cold_boot)
|
|
|
|
{
|
2014-05-15 11:11:12 +00:00
|
|
|
int startupPolicy = vm->def->disks[diskIndex]->startupPolicy;
|
2014-06-27 14:34:07 +00:00
|
|
|
int device = vm->def->disks[diskIndex]->device;
|
2013-07-26 12:37:21 +00:00
|
|
|
|
2014-06-01 00:22:29 +00:00
|
|
|
switch ((virDomainStartupPolicy) startupPolicy) {
|
2013-07-26 12:37:21 +00:00
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_OPTIONAL:
|
2014-06-27 14:34:07 +00:00
|
|
|
/* Once started with an optional disk, qemu saves its section
|
|
|
|
* in the migration stream, so later, when restoring from it
|
|
|
|
* we must make sure the sections match. */
|
|
|
|
if (!cold_boot &&
|
|
|
|
device != VIR_DOMAIN_DISK_DEVICE_FLOPPY &&
|
|
|
|
device != VIR_DOMAIN_DISK_DEVICE_CDROM)
|
2016-06-02 13:20:40 +00:00
|
|
|
return -1;
|
2013-07-26 12:37:21 +00:00
|
|
|
break;
|
|
|
|
|
2016-08-01 15:52:02 +00:00
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_DEFAULT:
|
2013-07-26 12:37:21 +00:00
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_MANDATORY:
|
2016-06-02 13:20:40 +00:00
|
|
|
return -1;
|
2013-07-26 12:37:21 +00:00
|
|
|
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_REQUISITE:
|
2013-07-31 07:55:05 +00:00
|
|
|
if (cold_boot)
|
2016-06-02 13:20:40 +00:00
|
|
|
return -1;
|
2013-07-26 12:37:21 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_STARTUP_POLICY_LAST:
|
|
|
|
/* this should never happen */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-06-02 13:20:40 +00:00
|
|
|
qemuDomainCheckRemoveOptionalDisk(driver, vm, diskIndex);
|
2016-08-01 15:52:02 +00:00
|
|
|
virResetLastError();
|
2013-07-26 12:37:21 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-15 08:20:07 +00:00
|
|
|
|
2012-03-16 06:52:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The vm must be locked when any of the following cleanup functions is
|
|
|
|
* called.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainCleanupAdd(virDomainObjPtr vm,
|
|
|
|
qemuDomainCleanupCallback cb)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-03-16 06:52:26 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
|
|
|
|
|
|
|
|
for (i = 0; i < priv->ncleanupCallbacks; i++) {
|
|
|
|
if (priv->cleanupCallbacks[i] == cb)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VIR_RESIZE_N(priv->cleanupCallbacks,
|
|
|
|
priv->ncleanupCallbacks_max,
|
2013-07-04 10:14:12 +00:00
|
|
|
priv->ncleanupCallbacks, 1) < 0)
|
2012-03-16 06:52:26 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
priv->cleanupCallbacks[priv->ncleanupCallbacks++] = cb;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
qemuDomainCleanupRemove(virDomainObjPtr vm,
|
|
|
|
qemuDomainCleanupCallback cb)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-03-16 06:52:26 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
|
|
|
|
|
|
|
|
for (i = 0; i < priv->ncleanupCallbacks; i++) {
|
2013-10-15 17:07:42 +00:00
|
|
|
if (priv->cleanupCallbacks[i] == cb)
|
|
|
|
VIR_DELETE_ELEMENT_INPLACE(priv->cleanupCallbacks,
|
|
|
|
i, priv->ncleanupCallbacks);
|
2012-03-16 06:52:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_SHRINK_N(priv->cleanupCallbacks,
|
|
|
|
priv->ncleanupCallbacks_max,
|
|
|
|
priv->ncleanupCallbacks_max - priv->ncleanupCallbacks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainCleanupRun(virQEMUDriverPtr driver,
|
2012-03-16 06:52:26 +00:00
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%s", driver, vm->def->name);
|
|
|
|
|
|
|
|
/* run cleanup callbacks in reverse order */
|
2020-05-05 11:45:44 +00:00
|
|
|
while (priv->ncleanupCallbacks)
|
|
|
|
priv->cleanupCallbacks[--priv->ncleanupCallbacks](driver, vm);
|
2012-03-16 06:52:26 +00:00
|
|
|
|
|
|
|
VIR_FREE(priv->cleanupCallbacks);
|
|
|
|
priv->ncleanupCallbacks_max = 0;
|
|
|
|
}
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
|
2019-12-03 14:17:23 +00:00
|
|
|
void
|
2014-02-07 17:42:27 +00:00
|
|
|
qemuDomainGetImageIds(virQEMUDriverConfigPtr cfg,
|
|
|
|
virDomainObjPtr vm,
|
2014-06-30 13:40:57 +00:00
|
|
|
virStorageSourcePtr src,
|
2017-10-16 12:10:09 +00:00
|
|
|
virStorageSourcePtr parentSrc,
|
2014-02-07 17:42:27 +00:00
|
|
|
uid_t *uid, gid_t *gid)
|
|
|
|
{
|
|
|
|
virSecurityLabelDefPtr vmlabel;
|
|
|
|
virSecurityDeviceLabelDefPtr disklabel;
|
|
|
|
|
|
|
|
if (uid)
|
|
|
|
*uid = -1;
|
|
|
|
if (gid)
|
|
|
|
*gid = -1;
|
|
|
|
|
|
|
|
if (cfg) {
|
|
|
|
if (uid)
|
|
|
|
*uid = cfg->user;
|
|
|
|
|
|
|
|
if (gid)
|
|
|
|
*gid = cfg->group;
|
|
|
|
}
|
|
|
|
|
2014-06-12 08:50:43 +00:00
|
|
|
if (vm && (vmlabel = virDomainDefGetSecurityLabelDef(vm->def, "dac")) &&
|
|
|
|
vmlabel->label)
|
2014-02-07 17:42:27 +00:00
|
|
|
virParseOwnershipIds(vmlabel->label, uid, gid);
|
|
|
|
|
2017-10-16 12:10:09 +00:00
|
|
|
if (parentSrc &&
|
|
|
|
(disklabel = virStorageSourceGetSecurityLabelDef(parentSrc, "dac")) &&
|
|
|
|
disklabel->label)
|
|
|
|
virParseOwnershipIds(disklabel->label, uid, gid);
|
|
|
|
|
2014-06-30 13:40:57 +00:00
|
|
|
if ((disklabel = virStorageSourceGetSecurityLabelDef(src, "dac")) &&
|
qemuDomainGetImageIds: Skip <seclabel/> without label
It's easy to shed the daemon these days. With this XML snippet:
<disk type='file' device='disk'>
<driver name='qemu' type='raw'/>
<source file='/some/dummy/path/test.bin'>
<seclabel model='dac' relabel='no'/>
</source>
<target dev='vdb' bus='virtio'/>
<readonly/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
</disk>
I get the SIGSEGV when starting the domain. The thing is, when
starting a domain, we check for its disk presence. For some reason,
when determining the disk chain, we parse the <seclabel/> (don't ask
me why). However, there's no label attribute in the XML, so we end up
calling virParseOwnershipIds() over NULL string:
[Switching to Thread 0x7ffff10c4700 (LWP 30956)]
__strchr_sse42 () at ../sysdeps/x86_64/multiarch/strchr.S:136
136 ../sysdeps/x86_64/multiarch/strchr.S: No such file or directory.
(gdb) bt
#0 __strchr_sse42 () at ../sysdeps/x86_64/multiarch/strchr.S:136
#1 0x00007ffff749f800 in virParseOwnershipIds (label=0x0, uidPtr=uidPtr@entry=0x7ffff10c2df0, gidPtr=gidPtr@entry=0x7ffff10c2df4) at util/virutil.c:2115
#2 0x00007fffe929f006 in qemuDomainGetImageIds (gid=0x7ffff10c2df4, uid=0x7ffff10c2df0, disk=0x7fffe40cb000, vm=0x7fffe40a6410, cfg=0x7fffe409ae00) at qemu/qemu_domain.c:2385
#3 qemuDomainDetermineDiskChain (driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, disk=disk@entry=0x7fffe40cb000, force=force@entry=false) at qemu/qemu_domain.c:2414
#4 0x00007fffe929f128 in qemuDomainCheckDiskPresence (driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, cold_boot=cold_boot@entry=true) at qemu/qemu_domain.c:2250
#5 0x00007fffe92b6fc8 in qemuProcessStart (conn=conn@entry=0x7fffd4000b60, driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, migrateFrom=migrateFrom@entry=0x0, stdin_fd=stdin_fd@entry=-1, stdin_path=stdin_path@entry=0x0, snapshot=snapshot@entry=0x0,
vmop=vmop@entry=VIR_NETDEV_VPORT_PROFILE_OP_CREATE, flags=flags@entry=1) at qemu/qemu_process.c:3813
#6 0x00007fffe93087e8 in qemuDomainObjStart (conn=0x7fffd4000b60, driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, flags=flags@entry=0) at qemu/qemu_driver.c:6051
#7 0x00007fffe9308e32 in qemuDomainCreateWithFlags (dom=0x7fffcc000d50, flags=0) at qemu/qemu_driver.c:6105
#8 0x00007ffff753c5cc in virDomainCreate (domain=domain@entry=0x7fffcc000d50) at libvirt.c:8861
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2014-03-24 15:44:17 +00:00
|
|
|
disklabel->label)
|
2014-02-07 17:42:27 +00:00
|
|
|
virParseOwnershipIds(disklabel->label, uid, gid);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-06-30 13:48:45 +00:00
|
|
|
int
|
|
|
|
qemuDomainStorageFileInit(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2017-10-16 12:10:09 +00:00
|
|
|
virStorageSourcePtr src,
|
|
|
|
virStorageSourcePtr parent)
|
2014-06-30 13:48:45 +00:00
|
|
|
{
|
2020-01-09 18:33:45 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2014-06-30 13:48:45 +00:00
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
|
2017-10-16 12:10:09 +00:00
|
|
|
qemuDomainGetImageIds(cfg, vm, src, parent, &uid, &gid);
|
2014-06-30 13:48:45 +00:00
|
|
|
|
|
|
|
if (virStorageFileInitAs(src, uid, gid) < 0)
|
2020-01-09 18:33:46 +00:00
|
|
|
return -1;
|
2014-06-30 13:48:45 +00:00
|
|
|
|
2020-01-09 18:33:46 +00:00
|
|
|
return 0;
|
2014-06-30 13:48:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-12-12 16:53:33 +00:00
|
|
|
char *
|
|
|
|
qemuDomainStorageAlias(const char *device, int depth)
|
|
|
|
{
|
|
|
|
char *alias;
|
|
|
|
|
2016-06-29 17:34:00 +00:00
|
|
|
device = qemuAliasDiskDriveSkipPrefix(device);
|
2014-12-12 16:53:33 +00:00
|
|
|
|
|
|
|
if (!depth)
|
2019-10-18 11:27:03 +00:00
|
|
|
alias = g_strdup(device);
|
2014-12-12 16:53:33 +00:00
|
|
|
else
|
2019-10-22 13:26:14 +00:00
|
|
|
alias = g_strdup_printf("%s.%d", device, depth);
|
2014-12-12 16:53:33 +00:00
|
|
|
return alias;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-04 14:58:08 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainStorageSourceValidateDepth:
|
|
|
|
* @src: storage source chain to validate
|
|
|
|
* @add: offsets the calculated number of images
|
|
|
|
* @diskdst: optional disk target to use in error message
|
|
|
|
*
|
|
|
|
* The XML parser limits the maximum element nesting to 256 layers. As libvirt
|
|
|
|
* reports the chain into the status and in some cases the config XML we must
|
|
|
|
* validate that any user-provided chains will not exceed the XML nesting limit
|
|
|
|
* when formatted to the XML.
|
|
|
|
*
|
|
|
|
* This function validates that the storage source chain starting @src is at
|
|
|
|
* most 200 layers deep. @add modifies the calculated value to offset the number
|
|
|
|
* to allow checking cases when new layers are going to be added to the chain.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and -1 if the chain is too deep. Error is reported.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainStorageSourceValidateDepth(virStorageSourcePtr src,
|
|
|
|
int add,
|
|
|
|
const char *diskdst)
|
|
|
|
{
|
|
|
|
virStorageSourcePtr n;
|
|
|
|
size_t nlayers = 0;
|
|
|
|
|
|
|
|
for (n = src; virStorageSourceIsBacking(n); n = n->backingStore)
|
|
|
|
nlayers++;
|
|
|
|
|
|
|
|
nlayers += add;
|
|
|
|
|
|
|
|
if (nlayers > 200) {
|
|
|
|
if (diskdst)
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("backing chains more than 200 layers deep are not "
|
|
|
|
"supported for disk '%s'"), diskdst);
|
|
|
|
else
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("backing chains more than 200 layers deep are not "
|
|
|
|
"supported"));
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-08 12:58:48 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainPrepareStorageSourceConfig:
|
|
|
|
* @src: storage source to configure
|
|
|
|
* @cfg: qemu driver config object
|
|
|
|
* @qemuCaps: capabilities of qemu
|
|
|
|
*
|
|
|
|
* Set properties of @src based on the qemu driver config @cfg.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuDomainPrepareStorageSourceConfig(virStorageSourcePtr src,
|
|
|
|
virQEMUDriverConfigPtr cfg,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
|
|
|
if (!cfg)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (src->type == VIR_STORAGE_TYPE_NETWORK &&
|
|
|
|
src->protocol == VIR_STORAGE_NET_PROTOCOL_GLUSTER &&
|
|
|
|
virQEMUCapsGet(qemuCaps, QEMU_CAPS_GLUSTER_DEBUG_LEVEL)) {
|
|
|
|
src->debug = true;
|
|
|
|
src->debugLevel = cfg->glusterDebugLevel;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-16 14:33:07 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDetermineDiskChain:
|
|
|
|
* @driver: qemu driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @disk: disk definition
|
|
|
|
* @disksrc: source to determine the chain for, may be NULL
|
|
|
|
* @report_broken: report broken chain verbosely
|
|
|
|
*
|
|
|
|
* Prepares and initializes the backing chain of disk @disk. In cases where
|
|
|
|
* a new source is to be associated with @disk the @disksrc parameter can be
|
|
|
|
* used to override the source. If @report_broken is true missing images
|
|
|
|
* in the backing chain are reported.
|
|
|
|
*/
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuDomainDetermineDiskChain(virQEMUDriverPtr driver,
|
2014-02-07 17:42:27 +00:00
|
|
|
virDomainObjPtr vm,
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
2019-01-16 14:33:07 +00:00
|
|
|
virStorageSourcePtr disksrc,
|
2014-09-11 16:59:32 +00:00
|
|
|
bool report_broken)
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2019-01-16 14:17:24 +00:00
|
|
|
virStorageSourcePtr src; /* iterator for the backing chain declared in XML */
|
|
|
|
virStorageSourcePtr n; /* iterator for the backing chain detected from disk */
|
2017-12-05 15:40:27 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-05-06 11:48:35 +00:00
|
|
|
bool blockdev = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV);
|
|
|
|
bool isSD = qemuDiskBusIsSD(disk->bus);
|
2014-02-07 17:42:27 +00:00
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
|
2019-01-16 14:33:07 +00:00
|
|
|
if (!disksrc)
|
|
|
|
disksrc = disk->src;
|
|
|
|
|
2019-09-04 13:18:37 +00:00
|
|
|
if (virStorageSourceIsEmpty(disksrc))
|
|
|
|
return 0;
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
|
2017-11-24 11:04:14 +00:00
|
|
|
/* There is no need to check the backing chain for disks without backing
|
|
|
|
* support */
|
2019-01-16 14:17:24 +00:00
|
|
|
if (virStorageSourceIsLocalStorage(disksrc) &&
|
|
|
|
disksrc->format > VIR_STORAGE_FILE_NONE &&
|
|
|
|
disksrc->format < VIR_STORAGE_FILE_BACKING) {
|
2017-11-24 11:04:14 +00:00
|
|
|
|
2019-01-16 14:17:24 +00:00
|
|
|
if (!virFileExists(disksrc->path)) {
|
2017-11-24 11:04:14 +00:00
|
|
|
if (report_broken)
|
2019-01-16 14:17:24 +00:00
|
|
|
virStorageFileReportBrokenChain(errno, disksrc, disksrc);
|
2017-11-24 11:04:14 +00:00
|
|
|
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2017-11-24 11:04:14 +00:00
|
|
|
}
|
|
|
|
|
2017-11-24 11:59:59 +00:00
|
|
|
/* terminate the chain for such images as the code below would do */
|
2020-09-22 09:04:17 +00:00
|
|
|
if (!disksrc->backingStore)
|
|
|
|
disksrc->backingStore = virStorageSourceNew();
|
2017-11-24 11:59:59 +00:00
|
|
|
|
2018-04-24 14:59:00 +00:00
|
|
|
/* host cdrom requires special treatment in qemu, so we need to check
|
|
|
|
* whether a block device is a cdrom */
|
|
|
|
if (disk->device == VIR_DOMAIN_DISK_DEVICE_CDROM &&
|
2019-01-16 14:17:24 +00:00
|
|
|
disksrc->format == VIR_STORAGE_FILE_RAW &&
|
|
|
|
virStorageSourceIsBlockLocal(disksrc) &&
|
|
|
|
virFileIsCDROM(disksrc->path) == 1)
|
|
|
|
disksrc->hostcdrom = true;
|
2018-04-24 14:59:00 +00:00
|
|
|
|
2019-09-04 13:18:37 +00:00
|
|
|
return 0;
|
2017-11-24 11:04:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 14:17:24 +00:00
|
|
|
src = disksrc;
|
2017-11-24 11:09:53 +00:00
|
|
|
/* skip to the end of the chain if there is any */
|
|
|
|
while (virStorageSourceHasBacking(src)) {
|
2018-04-25 13:09:24 +00:00
|
|
|
if (report_broken) {
|
|
|
|
int rv = virStorageFileSupportsAccess(src);
|
2017-09-29 10:02:29 +00:00
|
|
|
|
2018-04-25 13:09:24 +00:00
|
|
|
if (rv < 0)
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2017-09-29 10:02:29 +00:00
|
|
|
|
2018-04-25 13:09:24 +00:00
|
|
|
if (rv > 0) {
|
2019-01-16 14:17:24 +00:00
|
|
|
if (qemuDomainStorageFileInit(driver, vm, src, disksrc) < 0)
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2018-04-25 13:09:24 +00:00
|
|
|
|
|
|
|
if (virStorageFileAccess(src, F_OK) < 0) {
|
2019-01-16 14:17:24 +00:00
|
|
|
virStorageFileReportBrokenChain(errno, src, disksrc);
|
2018-04-25 13:09:24 +00:00
|
|
|
virStorageFileDeinit(src);
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2018-04-25 13:09:24 +00:00
|
|
|
}
|
|
|
|
|
2017-11-24 11:09:53 +00:00
|
|
|
virStorageFileDeinit(src);
|
2017-09-29 10:02:29 +00:00
|
|
|
}
|
|
|
|
}
|
2017-11-24 11:09:53 +00:00
|
|
|
src = src->backingStore;
|
2017-09-29 10:02:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We skipped to the end of the chain. Skip detection if there's the
|
|
|
|
* terminator. (An allocated but empty backingStore) */
|
2019-09-04 14:58:08 +00:00
|
|
|
if (src->backingStore) {
|
|
|
|
if (qemuDomainStorageSourceValidateDepth(disksrc, 0, disk->dst) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-09-04 13:18:37 +00:00
|
|
|
return 0;
|
2019-09-04 14:58:08 +00:00
|
|
|
}
|
2014-02-07 17:42:27 +00:00
|
|
|
|
2019-01-16 14:17:24 +00:00
|
|
|
qemuDomainGetImageIds(cfg, vm, src, disksrc, &uid, &gid);
|
2014-02-07 17:42:27 +00:00
|
|
|
|
2018-06-04 08:25:17 +00:00
|
|
|
if (virStorageFileGetMetadata(src, uid, gid, report_broken) < 0)
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2017-09-29 10:02:29 +00:00
|
|
|
|
2018-06-20 08:39:21 +00:00
|
|
|
for (n = src->backingStore; virStorageSourceIsBacking(n); n = n->backingStore) {
|
2020-02-25 12:28:10 +00:00
|
|
|
/* convert detected ISO format to 'raw' as qemu would not understand it */
|
|
|
|
if (n->format == VIR_STORAGE_FILE_ISO)
|
|
|
|
n->format = VIR_STORAGE_FILE_RAW;
|
|
|
|
|
2020-05-06 11:48:35 +00:00
|
|
|
/* mask-out blockdev for 'sd' disks */
|
|
|
|
if (qemuDomainValidateStorageSource(n, priv->qemuCaps, isSD) < 0)
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2018-05-29 15:28:11 +00:00
|
|
|
|
2019-10-08 12:58:48 +00:00
|
|
|
qemuDomainPrepareStorageSourceConfig(n, cfg, priv->qemuCaps);
|
2019-10-08 13:14:22 +00:00
|
|
|
qemuDomainPrepareDiskSourceData(disk, n);
|
2017-10-17 11:33:12 +00:00
|
|
|
|
2020-05-06 11:48:35 +00:00
|
|
|
if (blockdev && !isSD &&
|
2017-10-17 11:33:12 +00:00
|
|
|
qemuDomainPrepareStorageSourceBlockdev(disk, n, priv, cfg) < 0)
|
2019-09-04 13:18:37 +00:00
|
|
|
return -1;
|
2018-05-29 15:05:05 +00:00
|
|
|
}
|
2017-12-05 15:40:27 +00:00
|
|
|
|
2019-09-04 14:58:08 +00:00
|
|
|
if (qemuDomainStorageSourceValidateDepth(disksrc, 0, disk->dst) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-09-04 13:18:37 +00:00
|
|
|
return 0;
|
storage: cache backing chain while qemu domain is live
Technically, we should not be re-probing any file that qemu might
be currently writing to. As such, we should cache the backing
file chain prior to starting qemu. This patch adds the cache,
but does not use it until the next patch.
Ultimately, we want to also store the chain in domain XML, so that
it is remembered across libvirtd restarts, and so that the only
kosher way to modify the backing chain of an offline domain will be
through libvirt API calls, but we aren't there yet. So for now, we
merely invalidate the cache any time we do a live operation that
alters the chain (block-pull, block-commit, external disk snapshot),
as well as tear down the cache when the domain is not running.
* src/conf/domain_conf.h (_virDomainDiskDef): New field.
* src/conf/domain_conf.c (virDomainDiskDefFree): Clean new field.
* src/qemu/qemu_domain.h (qemuDomainDetermineDiskChain): New
prototype.
* src/qemu/qemu_domain.c (qemuDomainDetermineDiskChain): New
function.
* src/qemu/qemu_driver.c (qemuDomainAttachDeviceDiskLive)
(qemuDomainChangeDiskMediaLive): Pre-populate chain.
(qemuDomainSnapshotCreateSingleDiskActive): Uncache chain before
snapshot.
* src/qemu/qemu_process.c (qemuProcessHandleBlockJob): Update
chain after block pull.
2012-10-09 22:08:14 +00:00
|
|
|
}
|
2013-06-28 14:16:44 +00:00
|
|
|
|
2015-03-13 16:22:04 +00:00
|
|
|
|
2020-01-17 13:28:14 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDiskGetTopNodename:
|
|
|
|
*
|
|
|
|
* @disk: disk definition object
|
|
|
|
*
|
|
|
|
* Returns the pointer to the node-name of the topmost layer used by @disk as
|
|
|
|
* backend. Currently returns the nodename of the copy-on-read filter if enabled
|
|
|
|
* or the nodename of the top image's format driver. Empty disks return NULL.
|
|
|
|
* This must be used only when VIR_QEMU_CAPS_BLOCKDEV is enabled.
|
|
|
|
*/
|
|
|
|
const char *
|
|
|
|
qemuDomainDiskGetTopNodename(virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
qemuDomainDiskPrivatePtr priv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
|
|
|
if (virStorageSourceIsEmpty(disk->src))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (disk->copy_on_read == VIR_TRISTATE_SWITCH_ON)
|
|
|
|
return priv->nodeCopyOnRead;
|
|
|
|
|
|
|
|
return disk->src->nodeformat;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-06-26 04:58:47 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDiskGetBackendAlias:
|
|
|
|
* @disk: disk definition
|
|
|
|
* @qemuCaps: emulator capabilities
|
|
|
|
* @backendAlias: filled with the alias of the disk storage backend
|
|
|
|
*
|
|
|
|
* Returns the correct alias for the disk backend. This may be the alias of
|
|
|
|
* -drive for legacy setup or the correct node name for -blockdev setups.
|
|
|
|
*
|
|
|
|
* @backendAlias may be NULL on success if the backend does not exist
|
|
|
|
* (disk is empty). Caller is responsible for freeing @backendAlias.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error with libvirt error reported.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainDiskGetBackendAlias(virDomainDiskDefPtr disk,
|
2018-07-18 11:58:59 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2018-06-26 04:58:47 +00:00
|
|
|
char **backendAlias)
|
|
|
|
{
|
|
|
|
*backendAlias = NULL;
|
|
|
|
|
2020-05-06 11:48:35 +00:00
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_BLOCKDEV) ||
|
|
|
|
qemuDiskBusIsSD(disk->bus)) {
|
2018-07-18 11:58:59 +00:00
|
|
|
if (!(*backendAlias = qemuAliasDiskDriveFromDisk(disk)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-17 13:28:14 +00:00
|
|
|
*backendAlias = g_strdup(qemuDomainDiskGetTopNodename(disk));
|
2018-06-26 04:58:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-18 11:02:43 +00:00
|
|
|
typedef enum {
|
|
|
|
/* revoke access to the image instead of allowing it */
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_REVOKE = 1 << 0,
|
2019-04-18 12:36:38 +00:00
|
|
|
/* operate on full backing chain rather than single image */
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN = 1 << 1,
|
2019-06-11 14:38:02 +00:00
|
|
|
/* force permissions to read-only/read-write when allowing */
|
|
|
|
/* currently does not properly work with QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN */
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_FORCE_READ_ONLY = 1 << 2,
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_FORCE_READ_WRITE = 1 << 3,
|
2019-04-18 14:10:52 +00:00
|
|
|
/* don't revoke permissions when modification has failed */
|
2019-06-11 14:38:02 +00:00
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_SKIP_REVOKE = 1 << 4,
|
2019-04-18 14:16:57 +00:00
|
|
|
/* VM already has access to the source and we are just modifying it */
|
2019-06-11 14:38:02 +00:00
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_MODIFY_ACCESS = 1 << 5,
|
2020-02-27 10:20:51 +00:00
|
|
|
/* whether the image is the top image of the backing chain (e.g. disk source) */
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN_TOP = 1 << 6,
|
2019-04-18 11:02:43 +00:00
|
|
|
} qemuDomainStorageSourceAccessFlags;
|
|
|
|
|
|
|
|
|
2019-07-09 15:39:33 +00:00
|
|
|
static int
|
|
|
|
qemuDomainStorageSourceAccessModifyNVMe(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr src,
|
|
|
|
bool revoke)
|
|
|
|
{
|
|
|
|
bool revoke_maxmemlock = false;
|
|
|
|
bool revoke_hostdev = false;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!virStorageSourceChainHasNVMe(src))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Modifying access for a NVMe disk src=%p revoke=%d",
|
|
|
|
src, revoke);
|
|
|
|
|
|
|
|
if (revoke) {
|
|
|
|
revoke_maxmemlock = true;
|
|
|
|
revoke_hostdev = true;
|
|
|
|
ret = 0;
|
|
|
|
goto revoke;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainAdjustMaxMemLock(vm, true) < 0)
|
|
|
|
goto revoke;
|
|
|
|
|
|
|
|
revoke_maxmemlock = true;
|
|
|
|
|
|
|
|
if (qemuHostdevPrepareOneNVMeDisk(driver, vm->def->name, src) < 0)
|
|
|
|
goto revoke;
|
|
|
|
|
|
|
|
revoke_hostdev = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
revoke:
|
|
|
|
if (revoke_maxmemlock) {
|
|
|
|
if (qemuDomainAdjustMaxMemLock(vm, false) < 0)
|
|
|
|
VIR_WARN("Unable to change max memlock limit");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (revoke_hostdev)
|
|
|
|
qemuHostdevReAttachOneNVMeDisk(driver, vm->def->name, src);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-18 08:04:26 +00:00
|
|
|
/**
|
2019-04-18 08:36:30 +00:00
|
|
|
* qemuDomainStorageSourceAccessModify:
|
2019-04-18 08:04:26 +00:00
|
|
|
* @driver: qemu driver struct
|
|
|
|
* @vm: domain object
|
|
|
|
* @src: Source to prepare
|
2019-04-18 11:02:43 +00:00
|
|
|
* @flags: bitwise or of qemuDomainStorageSourceAccessFlags
|
2019-04-18 08:04:26 +00:00
|
|
|
*
|
|
|
|
* Setup the locks, cgroups and security permissions on a disk source and its
|
2019-04-18 11:02:43 +00:00
|
|
|
* backing chain.
|
2019-04-18 08:04:26 +00:00
|
|
|
*
|
|
|
|
* Returns 0 on success and -1 on error. Reports libvirt error.
|
|
|
|
*/
|
2019-04-18 08:18:51 +00:00
|
|
|
static int
|
2019-04-18 08:36:30 +00:00
|
|
|
qemuDomainStorageSourceAccessModify(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr src,
|
2019-04-18 11:02:43 +00:00
|
|
|
qemuDomainStorageSourceAccessFlags flags)
|
2019-04-18 08:04:26 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2019-04-18 08:04:26 +00:00
|
|
|
const char *srcstr = NULLSTR(src->path);
|
|
|
|
int ret = -1;
|
|
|
|
virErrorPtr orig_err = NULL;
|
2019-04-18 12:36:38 +00:00
|
|
|
bool chain = flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN;
|
2019-06-12 11:49:57 +00:00
|
|
|
bool force_ro = flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_FORCE_READ_ONLY;
|
|
|
|
bool force_rw = flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_FORCE_READ_WRITE;
|
|
|
|
bool revoke = flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_REVOKE;
|
2020-02-27 10:20:51 +00:00
|
|
|
bool chain_top = flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN_TOP;
|
2019-04-18 12:36:38 +00:00
|
|
|
int rc;
|
2019-04-18 13:48:39 +00:00
|
|
|
bool was_readonly = src->readonly;
|
2019-04-18 14:06:19 +00:00
|
|
|
bool revoke_cgroup = false;
|
|
|
|
bool revoke_label = false;
|
|
|
|
bool revoke_namespace = false;
|
2019-07-09 15:39:33 +00:00
|
|
|
bool revoke_nvme = false;
|
2019-04-18 14:06:19 +00:00
|
|
|
bool revoke_lockspace = false;
|
2019-04-18 13:48:39 +00:00
|
|
|
|
2019-06-12 11:49:57 +00:00
|
|
|
VIR_DEBUG("src='%s' readonly=%d force_ro=%d force_rw=%d revoke=%d chain=%d",
|
|
|
|
NULLSTR(src->path), src->readonly, force_ro, force_rw, revoke, chain);
|
|
|
|
|
|
|
|
if (force_ro)
|
2019-04-18 13:48:39 +00:00
|
|
|
src->readonly = true;
|
2019-04-18 08:04:26 +00:00
|
|
|
|
2019-06-12 11:49:57 +00:00
|
|
|
if (force_rw)
|
2019-06-11 14:38:02 +00:00
|
|
|
src->readonly = false;
|
|
|
|
|
2019-04-18 08:04:26 +00:00
|
|
|
/* just tear down the disk access */
|
2019-06-12 11:49:57 +00:00
|
|
|
if (revoke) {
|
2019-04-18 08:04:26 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2019-04-18 14:06:19 +00:00
|
|
|
revoke_cgroup = true;
|
|
|
|
revoke_label = true;
|
|
|
|
revoke_namespace = true;
|
2019-07-09 15:39:33 +00:00
|
|
|
revoke_nvme = true;
|
2019-04-18 14:06:19 +00:00
|
|
|
revoke_lockspace = true;
|
2019-04-18 08:04:26 +00:00
|
|
|
ret = 0;
|
2019-04-18 14:06:19 +00:00
|
|
|
goto revoke;
|
2019-04-18 08:04:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainLockImageAttach(driver->lockManager, cfg->uri, vm, src) < 0)
|
2019-04-18 14:06:19 +00:00
|
|
|
goto revoke;
|
|
|
|
|
|
|
|
revoke_lockspace = true;
|
2019-04-18 08:04:26 +00:00
|
|
|
|
2020-02-14 12:59:02 +00:00
|
|
|
if (!(flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_MODIFY_ACCESS)) {
|
|
|
|
if (qemuDomainStorageSourceAccessModifyNVMe(driver, vm, src, false) < 0)
|
|
|
|
goto revoke;
|
2019-07-09 15:39:33 +00:00
|
|
|
|
2020-02-14 12:59:02 +00:00
|
|
|
revoke_nvme = true;
|
2019-07-09 15:39:33 +00:00
|
|
|
|
2019-04-18 14:16:57 +00:00
|
|
|
if (qemuDomainNamespaceSetupDisk(vm, src) < 0)
|
|
|
|
goto revoke;
|
2019-04-18 14:06:19 +00:00
|
|
|
|
2019-04-18 14:16:57 +00:00
|
|
|
revoke_namespace = true;
|
|
|
|
}
|
2019-04-18 08:04:26 +00:00
|
|
|
|
2020-02-27 10:20:51 +00:00
|
|
|
if (qemuSecuritySetImageLabel(driver, vm, src, chain, chain_top) < 0)
|
2019-04-18 14:06:19 +00:00
|
|
|
goto revoke;
|
|
|
|
|
|
|
|
revoke_label = true;
|
2019-04-18 08:04:26 +00:00
|
|
|
|
2019-04-18 12:36:38 +00:00
|
|
|
if (chain)
|
|
|
|
rc = qemuSetupImageChainCgroup(vm, src);
|
|
|
|
else
|
|
|
|
rc = qemuSetupImageCgroup(vm, src);
|
|
|
|
|
|
|
|
if (rc < 0)
|
2019-04-18 14:06:19 +00:00
|
|
|
goto revoke;
|
|
|
|
|
|
|
|
revoke_cgroup = true;
|
2019-04-18 08:04:26 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
|
2019-04-18 14:06:19 +00:00
|
|
|
revoke:
|
2019-04-18 14:10:52 +00:00
|
|
|
if (flags & QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_SKIP_REVOKE)
|
|
|
|
goto cleanup;
|
|
|
|
|
2019-04-18 14:06:19 +00:00
|
|
|
if (revoke_cgroup) {
|
|
|
|
if (chain)
|
|
|
|
rc = qemuTeardownImageChainCgroup(vm, src);
|
|
|
|
else
|
|
|
|
rc = qemuTeardownImageCgroup(vm, src);
|
2019-04-18 12:36:38 +00:00
|
|
|
|
2019-04-18 14:06:19 +00:00
|
|
|
if (rc < 0)
|
|
|
|
VIR_WARN("Unable to tear down cgroup access on %s", srcstr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (revoke_label) {
|
|
|
|
if (qemuSecurityRestoreImageLabel(driver, vm, src, chain) < 0)
|
|
|
|
VIR_WARN("Unable to restore security label on %s", srcstr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (revoke_namespace) {
|
|
|
|
if (qemuDomainNamespaceTeardownDisk(vm, src) < 0)
|
|
|
|
VIR_WARN("Unable to remove /dev entry for %s", srcstr);
|
|
|
|
}
|
|
|
|
|
2019-07-09 15:39:33 +00:00
|
|
|
if (revoke_nvme)
|
|
|
|
qemuDomainStorageSourceAccessModifyNVMe(driver, vm, src, true);
|
|
|
|
|
2019-04-18 14:06:19 +00:00
|
|
|
if (revoke_lockspace) {
|
|
|
|
if (virDomainLockImageDetach(driver->lockManager, vm, src) < 0)
|
|
|
|
VIR_WARN("Unable to release lock on %s", srcstr);
|
|
|
|
}
|
2019-04-18 08:04:26 +00:00
|
|
|
|
|
|
|
cleanup:
|
2019-04-18 13:48:39 +00:00
|
|
|
src->readonly = was_readonly;
|
2019-04-18 08:04:26 +00:00
|
|
|
virErrorRestore(&orig_err);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-04-18 08:18:51 +00:00
|
|
|
int
|
|
|
|
qemuDomainStorageSourceChainAccessAllow(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr src)
|
|
|
|
{
|
2020-02-27 10:20:51 +00:00
|
|
|
qemuDomainStorageSourceAccessFlags flags = QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN |
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN_TOP;
|
2019-04-18 11:02:43 +00:00
|
|
|
|
|
|
|
return qemuDomainStorageSourceAccessModify(driver, vm, src, flags);
|
2019-04-18 08:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainStorageSourceChainAccessRevoke(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr src)
|
|
|
|
{
|
2019-04-18 12:36:38 +00:00
|
|
|
qemuDomainStorageSourceAccessFlags flags = QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_REVOKE |
|
2020-02-27 10:20:51 +00:00
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN |
|
|
|
|
QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN_TOP;
|
2019-04-18 11:02:43 +00:00
|
|
|
|
|
|
|
return qemuDomainStorageSourceAccessModify(driver, vm, src, flags);
|
2019-04-18 08:18:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-29 14:39:57 +00:00
|
|
|
/**
|
2019-04-18 07:41:38 +00:00
|
|
|
* qemuDomainStorageSourceAccessRevoke:
|
2016-02-29 14:39:57 +00:00
|
|
|
*
|
|
|
|
* Revoke access to a single backing chain element. This restores the labels,
|
|
|
|
* removes cgroup ACLs for devices and removes locks.
|
|
|
|
*/
|
|
|
|
void
|
2019-04-18 07:41:38 +00:00
|
|
|
qemuDomainStorageSourceAccessRevoke(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr elem)
|
2016-02-29 14:39:57 +00:00
|
|
|
{
|
2019-04-18 12:40:27 +00:00
|
|
|
qemuDomainStorageSourceAccessFlags flags = QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_REVOKE;
|
2017-02-03 16:01:31 +00:00
|
|
|
|
2019-04-18 12:40:27 +00:00
|
|
|
ignore_value(qemuDomainStorageSourceAccessModify(driver, vm, elem, flags));
|
2016-02-29 14:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2019-04-18 07:41:38 +00:00
|
|
|
* qemuDomainStorageSourceAccessAllow:
|
2017-11-22 15:42:53 +00:00
|
|
|
* @driver: qemu driver data
|
|
|
|
* @vm: domain object
|
|
|
|
* @elem: source structure to set access for
|
|
|
|
* @readonly: setup read-only access if true
|
|
|
|
* @newSource: @elem describes a storage source which @vm can't access yet
|
2020-02-27 10:20:51 +00:00
|
|
|
* @chainTop: @elem is top parent of backing chain
|
2016-02-29 14:39:57 +00:00
|
|
|
*
|
|
|
|
* Allow a VM access to a single element of a disk backing chain; this helper
|
|
|
|
* ensures that the lock manager, cgroup device controller, and security manager
|
2017-11-22 15:42:53 +00:00
|
|
|
* labelling are all aware of each new file before it is added to a chain.
|
|
|
|
*
|
|
|
|
* When modifying permissions of @elem which @vm can already access (is in the
|
|
|
|
* backing chain) @newSource needs to be set to false.
|
2020-02-27 10:20:51 +00:00
|
|
|
*
|
|
|
|
* The @chainTop flag must be set if the @elem image is the topmost image of a
|
|
|
|
* given backing chain or meant to become the topmost image (for e.g.
|
|
|
|
* snapshots, or blockcopy or even in the end for active layer block commit,
|
|
|
|
* where we discard the top of the backing chain so one of the intermediates
|
|
|
|
* (the base) becomes the top of the chain).
|
2017-11-22 15:42:53 +00:00
|
|
|
*/
|
2016-02-29 14:39:57 +00:00
|
|
|
int
|
2019-04-18 07:41:38 +00:00
|
|
|
qemuDomainStorageSourceAccessAllow(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr elem,
|
|
|
|
bool readonly,
|
2020-02-27 10:20:51 +00:00
|
|
|
bool newSource,
|
|
|
|
bool chainTop)
|
2016-02-29 14:39:57 +00:00
|
|
|
{
|
2019-04-18 14:20:35 +00:00
|
|
|
qemuDomainStorageSourceAccessFlags flags = QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_SKIP_REVOKE;
|
2016-02-29 14:39:57 +00:00
|
|
|
|
2019-04-18 14:20:35 +00:00
|
|
|
if (readonly)
|
2019-06-11 14:38:02 +00:00
|
|
|
flags |= QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_FORCE_READ_ONLY;
|
|
|
|
else
|
|
|
|
flags |= QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_FORCE_READ_WRITE;
|
2017-02-03 16:01:31 +00:00
|
|
|
|
2019-04-18 14:20:35 +00:00
|
|
|
if (!newSource)
|
2019-06-11 13:16:09 +00:00
|
|
|
flags |= QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_MODIFY_ACCESS;
|
2016-02-29 14:39:57 +00:00
|
|
|
|
2020-02-27 10:20:51 +00:00
|
|
|
if (chainTop)
|
|
|
|
flags |= QEMU_DOMAIN_STORAGE_SOURCE_ACCESS_CHAIN_TOP;
|
|
|
|
|
2019-04-18 14:20:35 +00:00
|
|
|
return qemuDomainStorageSourceAccessModify(driver, vm, elem, flags);
|
2016-02-29 14:39:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-09-15 13:25:18 +00:00
|
|
|
/*
|
|
|
|
* Makes sure the @disk differs from @orig_disk only by the source
|
|
|
|
* path and nothing else. Fields that are being checked and the
|
|
|
|
* information whether they are nullable (may not be specified) or is
|
|
|
|
* taken from the virDomainDiskDefFormat() code.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainDiskChangeSupported(virDomainDiskDefPtr disk,
|
|
|
|
virDomainDiskDefPtr orig_disk)
|
|
|
|
{
|
2017-11-03 12:09:47 +00:00
|
|
|
#define CHECK_EQ(field, field_name, nullable) \
|
|
|
|
do { \
|
|
|
|
if (nullable && !disk->field) \
|
|
|
|
break; \
|
|
|
|
if (disk->field != orig_disk->field) { \
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, \
|
|
|
|
_("cannot modify field '%s' of the disk"), \
|
|
|
|
field_name); \
|
|
|
|
return false; \
|
|
|
|
} \
|
2015-09-15 13:25:18 +00:00
|
|
|
} while (0)
|
|
|
|
|
2019-03-28 13:46:58 +00:00
|
|
|
#define CHECK_STREQ_NULLABLE(field, field_name) \
|
|
|
|
do { \
|
|
|
|
if (!disk->field) \
|
|
|
|
break; \
|
|
|
|
if (STRNEQ_NULLABLE(disk->field, orig_disk->field)) { \
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, \
|
|
|
|
_("cannot modify field '%s' of the disk"), \
|
|
|
|
field_name); \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2015-09-15 13:25:18 +00:00
|
|
|
CHECK_EQ(device, "device", false);
|
2015-09-15 13:41:18 +00:00
|
|
|
CHECK_EQ(bus, "bus", false);
|
|
|
|
if (STRNEQ(disk->dst, orig_disk->dst)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("cannot modify field '%s' of the disk"),
|
2016-04-28 15:50:40 +00:00
|
|
|
"target");
|
2015-09-15 13:41:18 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
CHECK_EQ(tray_status, "tray", true);
|
|
|
|
CHECK_EQ(removable, "removable", true);
|
2015-09-15 13:25:18 +00:00
|
|
|
|
|
|
|
if (disk->geometry.cylinders &&
|
|
|
|
disk->geometry.heads &&
|
|
|
|
disk->geometry.sectors) {
|
|
|
|
CHECK_EQ(geometry.cylinders, "geometry cylinders", false);
|
|
|
|
CHECK_EQ(geometry.heads, "geometry heads", false);
|
|
|
|
CHECK_EQ(geometry.sectors, "geometry sectors", false);
|
|
|
|
CHECK_EQ(geometry.trans, "BIOS-translation-modus", true);
|
|
|
|
}
|
|
|
|
|
|
|
|
CHECK_EQ(blockio.logical_block_size,
|
|
|
|
"blockio logical_block_size", false);
|
|
|
|
CHECK_EQ(blockio.physical_block_size,
|
|
|
|
"blockio physical_block_size", false);
|
|
|
|
|
|
|
|
CHECK_EQ(blkdeviotune.total_bytes_sec,
|
|
|
|
"blkdeviotune total_bytes_sec",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.read_bytes_sec,
|
|
|
|
"blkdeviotune read_bytes_sec",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.write_bytes_sec,
|
|
|
|
"blkdeviotune write_bytes_sec",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.total_iops_sec,
|
|
|
|
"blkdeviotune total_iops_sec",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.read_iops_sec,
|
|
|
|
"blkdeviotune read_iops_sec",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.write_iops_sec,
|
|
|
|
"blkdeviotune write_iops_sec",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.total_bytes_sec_max,
|
|
|
|
"blkdeviotune total_bytes_sec_max",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.read_bytes_sec_max,
|
|
|
|
"blkdeviotune read_bytes_sec_max",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.write_bytes_sec_max,
|
|
|
|
"blkdeviotune write_bytes_sec_max",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.total_iops_sec_max,
|
|
|
|
"blkdeviotune total_iops_sec_max",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.read_iops_sec_max,
|
|
|
|
"blkdeviotune read_iops_sec_max",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.write_iops_sec_max,
|
|
|
|
"blkdeviotune write_iops_sec_max",
|
|
|
|
true);
|
|
|
|
CHECK_EQ(blkdeviotune.size_iops_sec,
|
|
|
|
"blkdeviotune size_iops_sec",
|
|
|
|
true);
|
2019-03-28 14:01:45 +00:00
|
|
|
CHECK_STREQ_NULLABLE(blkdeviotune.group_name,
|
|
|
|
"blkdeviotune group name");
|
2015-09-15 13:25:18 +00:00
|
|
|
|
2019-03-28 14:21:25 +00:00
|
|
|
CHECK_STREQ_NULLABLE(serial,
|
|
|
|
"serial");
|
|
|
|
CHECK_STREQ_NULLABLE(wwn,
|
|
|
|
"wwn");
|
|
|
|
CHECK_STREQ_NULLABLE(vendor,
|
|
|
|
"vendor");
|
|
|
|
CHECK_STREQ_NULLABLE(product,
|
|
|
|
"product");
|
2015-09-15 13:25:18 +00:00
|
|
|
|
2015-09-15 13:41:18 +00:00
|
|
|
CHECK_EQ(cachemode, "cache", true);
|
|
|
|
CHECK_EQ(error_policy, "error_policy", true);
|
|
|
|
CHECK_EQ(rerror_policy, "rerror_policy", true);
|
|
|
|
CHECK_EQ(iomode, "io", true);
|
|
|
|
CHECK_EQ(ioeventfd, "ioeventfd", true);
|
|
|
|
CHECK_EQ(event_idx, "event_idx", true);
|
|
|
|
CHECK_EQ(copy_on_read, "copy_on_read", true);
|
2016-04-28 15:57:33 +00:00
|
|
|
/* "snapshot" is a libvirt internal field and thus can be changed */
|
2015-09-15 15:13:01 +00:00
|
|
|
/* startupPolicy is allowed to be updated. Therefore not checked here. */
|
2015-09-15 13:41:18 +00:00
|
|
|
CHECK_EQ(transient, "transient", true);
|
2016-12-06 14:30:13 +00:00
|
|
|
|
|
|
|
/* Note: For some address types the address auto generation for
|
|
|
|
* @disk has still not happened at this point (e.g. driver
|
|
|
|
* specific addresses) therefore we can't catch these possible
|
|
|
|
* address modifications here. */
|
|
|
|
if (disk->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE &&
|
|
|
|
!virDomainDeviceInfoAddressIsEqual(&disk->info, &orig_disk->info)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("cannot modify field '%s' of the disk"),
|
|
|
|
"address");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-06-12 14:05:10 +00:00
|
|
|
/* device alias is checked already in virDomainDefCompatibleDevice */
|
2017-12-13 14:12:01 +00:00
|
|
|
|
2015-09-15 13:25:18 +00:00
|
|
|
CHECK_EQ(info.bootIndex, "boot order", true);
|
2015-09-15 13:41:18 +00:00
|
|
|
CHECK_EQ(rawio, "rawio", true);
|
|
|
|
CHECK_EQ(sgio, "sgio", true);
|
|
|
|
CHECK_EQ(discard, "discard", true);
|
|
|
|
CHECK_EQ(iothread, "iothread", true);
|
|
|
|
|
2019-03-28 14:21:25 +00:00
|
|
|
CHECK_STREQ_NULLABLE(domain_name,
|
|
|
|
"backenddomain");
|
2015-09-15 13:25:18 +00:00
|
|
|
|
2016-04-28 14:45:02 +00:00
|
|
|
/* checks for fields stored in disk->src */
|
|
|
|
/* unfortunately 'readonly' and 'shared' can't be converted to tristate
|
|
|
|
* values thus we need to ignore the check if the new value is 'false' */
|
|
|
|
CHECK_EQ(src->readonly, "readonly", true);
|
|
|
|
CHECK_EQ(src->shared, "shared", true);
|
|
|
|
|
2017-12-14 10:47:02 +00:00
|
|
|
if (!virStoragePRDefIsEqual(disk->src->pr,
|
|
|
|
orig_disk->src->pr)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("cannot modify field '%s' of the disk"),
|
|
|
|
"reservations");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-09-15 13:25:18 +00:00
|
|
|
#undef CHECK_EQ
|
2019-03-28 13:46:58 +00:00
|
|
|
#undef CHECK_STREQ_NULLABLE
|
2015-09-15 13:25:18 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-22 16:22:05 +00:00
|
|
|
|
2015-03-13 16:22:04 +00:00
|
|
|
bool
|
|
|
|
qemuDomainDiskBlockJobIsActive(virDomainDiskDefPtr disk)
|
|
|
|
{
|
2015-05-13 09:20:36 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
2015-03-13 16:22:04 +00:00
|
|
|
if (disk->mirror) {
|
|
|
|
virReportError(VIR_ERR_BLOCK_COPY_ACTIVE,
|
|
|
|
_("disk '%s' already in active block job"),
|
|
|
|
disk->dst);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-10-17 06:57:08 +00:00
|
|
|
if (diskPriv->blockjob &&
|
2019-01-17 15:34:11 +00:00
|
|
|
qemuBlockJobIsRunning(diskPriv->blockjob)) {
|
2015-03-13 16:22:04 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("disk '%s' already in active block job"),
|
|
|
|
disk->dst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-13 07:47:21 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainHasBlockjob:
|
|
|
|
* @vm: domain object
|
|
|
|
* @copy_only: Reject only block copy job
|
|
|
|
*
|
|
|
|
* Return true if @vm has at least one disk involved in a current block
|
|
|
|
* copy/commit/pull job. If @copy_only is true this returns true only if the
|
|
|
|
* disk is involved in a block copy.
|
|
|
|
* */
|
|
|
|
bool
|
|
|
|
qemuDomainHasBlockjob(virDomainObjPtr vm,
|
|
|
|
bool copy_only)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2015-05-13 09:20:36 +00:00
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
2019-01-17 15:34:11 +00:00
|
|
|
if (!copy_only && diskPriv->blockjob &&
|
|
|
|
qemuBlockJobIsRunning(diskPriv->blockjob))
|
2015-05-13 07:47:21 +00:00
|
|
|
return true;
|
|
|
|
|
2015-05-13 09:20:36 +00:00
|
|
|
if (disk->mirror && disk->mirrorJob == VIR_DOMAIN_BLOCK_JOB_TYPE_COPY)
|
2015-05-13 07:47:21 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-19 13:08:29 +00:00
|
|
|
int
|
|
|
|
qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
|
2014-08-12 02:54:42 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
2013-07-19 13:08:29 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
char **aliases;
|
2014-12-16 09:40:58 +00:00
|
|
|
int rc;
|
2013-07-19 13:08:29 +00:00
|
|
|
|
2014-08-12 02:54:42 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2014-12-16 09:40:58 +00:00
|
|
|
rc = qemuMonitorGetDeviceAliases(priv->mon, &aliases);
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
if (rc < 0)
|
2013-07-19 13:08:29 +00:00
|
|
|
return -1;
|
|
|
|
|
2020-08-02 17:36:03 +00:00
|
|
|
g_strfreev(priv->qemuDevices);
|
2013-07-19 13:08:29 +00:00
|
|
|
priv->qemuDevices = aliases;
|
|
|
|
return 0;
|
|
|
|
}
|
qemu: Introduce qemuDomainDefCheckABIStability
https://bugzilla.redhat.com/show_bug.cgi?id=994364
Whenever we check for ABI stability, we have new xml (e.g. provided by
user, or obtained from snapshot, whatever) which we compare to old xml
and see if ABI won't break. However, if the new xml was produced via
virDomainGetXMLDesc(..., VIR_DOMAIN_XML_MIGRATABLE) it lacks some
devices, e.g. 'pci-root' controller. Hence, the ABI stability check
fails even though it is stable. Moreover, we can't simply fix
virDomainDefCheckABIStability because removing the correct devices is
task for the driver. For instance, qemu driver wants to remove the usb
controller too, while LXC driver doesn't. That's why we need special
qemu wrapper over virDomainDefCheckABIStability which removes the
correct devices from domain XML, produces MIGRATABLE xml and calls the
check ABI stability function.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2013-10-10 08:53:56 +00:00
|
|
|
|
2015-01-19 12:21:09 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *meminfo = NULL;
|
2015-01-19 12:21:09 +00:00
|
|
|
int rc;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (vm->def->nmems == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
rc = qemuMonitorGetMemoryDeviceInfo(priv->mon, &meminfo);
|
|
|
|
|
2017-05-25 02:20:16 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0) {
|
|
|
|
virHashFree(meminfo);
|
2015-01-19 12:21:09 +00:00
|
|
|
return -1;
|
2017-05-25 02:20:16 +00:00
|
|
|
}
|
2015-01-19 12:21:09 +00:00
|
|
|
|
|
|
|
/* if qemu doesn't support the info request, just carry on */
|
|
|
|
if (rc == -2)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nmems; i++) {
|
|
|
|
virDomainMemoryDefPtr mem = vm->def->mems[i];
|
|
|
|
qemuMonitorMemoryDeviceInfoPtr dimm;
|
|
|
|
|
|
|
|
if (!mem->info.alias)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!(dimm = virHashLookup(meminfo, mem->info.alias)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
mem->info.type = VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM;
|
|
|
|
mem->info.addr.dimm.slot = dimm->slot;
|
|
|
|
mem->info.addr.dimm.base = dimm->address;
|
|
|
|
}
|
|
|
|
|
|
|
|
virHashFree(meminfo);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-05-19 14:46:27 +00:00
|
|
|
static bool
|
|
|
|
qemuDomainABIStabilityCheck(const virDomainDef *src,
|
|
|
|
const virDomainDef *dst)
|
|
|
|
{
|
2017-10-17 08:42:54 +00:00
|
|
|
size_t i;
|
|
|
|
|
2017-05-19 14:46:27 +00:00
|
|
|
if (src->mem.source != dst->mem.source) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("Target memoryBacking source '%s' doesn't "
|
|
|
|
"match source memoryBacking source'%s'"),
|
|
|
|
virDomainMemorySourceTypeToString(dst->mem.source),
|
|
|
|
virDomainMemorySourceTypeToString(src->mem.source));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-10-17 08:42:54 +00:00
|
|
|
for (i = 0; i < src->nmems; i++) {
|
|
|
|
const char *srcAlias = src->mems[i]->info.alias;
|
|
|
|
const char *dstAlias = dst->mems[i]->info.alias;
|
|
|
|
|
|
|
|
if (STRNEQ_NULLABLE(srcAlias, dstAlias)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("Target memory device alias '%s' doesn't "
|
|
|
|
"match source alias '%s'"),
|
|
|
|
NULLSTR(srcAlias), NULLSTR(dstAlias));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-19 14:46:27 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virDomainABIStability virQEMUDriverDomainABIStability = {
|
|
|
|
.domain = qemuDomainABIStabilityCheck,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2017-06-14 11:43:12 +00:00
|
|
|
static bool
|
|
|
|
qemuDomainMigratableDefCheckABIStability(virQEMUDriverPtr driver,
|
|
|
|
virDomainDefPtr src,
|
|
|
|
virDomainDefPtr migratableSrc,
|
|
|
|
virDomainDefPtr dst,
|
|
|
|
virDomainDefPtr migratableDst)
|
|
|
|
{
|
|
|
|
if (!virDomainDefCheckABIStabilityFlags(migratableSrc,
|
|
|
|
migratableDst,
|
|
|
|
driver->xmlopt,
|
|
|
|
VIR_DOMAIN_DEF_ABI_CHECK_SKIP_VOLATILE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Force update any skipped values from the volatile flag */
|
|
|
|
dst->mem.cur_balloon = src->mem.cur_balloon;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-06-14 11:43:37 +00:00
|
|
|
#define COPY_FLAGS (VIR_DOMAIN_XML_SECURE | \
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE)
|
|
|
|
|
qemu: Introduce qemuDomainDefCheckABIStability
https://bugzilla.redhat.com/show_bug.cgi?id=994364
Whenever we check for ABI stability, we have new xml (e.g. provided by
user, or obtained from snapshot, whatever) which we compare to old xml
and see if ABI won't break. However, if the new xml was produced via
virDomainGetXMLDesc(..., VIR_DOMAIN_XML_MIGRATABLE) it lacks some
devices, e.g. 'pci-root' controller. Hence, the ABI stability check
fails even though it is stable. Moreover, we can't simply fix
virDomainDefCheckABIStability because removing the correct devices is
task for the driver. For instance, qemu driver wants to remove the usb
controller too, while LXC driver doesn't. That's why we need special
qemu wrapper over virDomainDefCheckABIStability which removes the
correct devices from domain XML, produces MIGRATABLE xml and calls the
check ABI stability function.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2013-10-10 08:53:56 +00:00
|
|
|
bool
|
|
|
|
qemuDomainDefCheckABIStability(virQEMUDriverPtr driver,
|
2019-08-02 15:36:56 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
qemu: Introduce qemuDomainDefCheckABIStability
https://bugzilla.redhat.com/show_bug.cgi?id=994364
Whenever we check for ABI stability, we have new xml (e.g. provided by
user, or obtained from snapshot, whatever) which we compare to old xml
and see if ABI won't break. However, if the new xml was produced via
virDomainGetXMLDesc(..., VIR_DOMAIN_XML_MIGRATABLE) it lacks some
devices, e.g. 'pci-root' controller. Hence, the ABI stability check
fails even though it is stable. Moreover, we can't simply fix
virDomainDefCheckABIStability because removing the correct devices is
task for the driver. For instance, qemu driver wants to remove the usb
controller too, while LXC driver doesn't. That's why we need special
qemu wrapper over virDomainDefCheckABIStability which removes the
correct devices from domain XML, produces MIGRATABLE xml and calls the
check ABI stability function.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2013-10-10 08:53:56 +00:00
|
|
|
virDomainDefPtr src,
|
|
|
|
virDomainDefPtr dst)
|
|
|
|
{
|
2020-11-12 20:27:04 +00:00
|
|
|
g_autoptr(virDomainDef) migratableDefSrc = NULL;
|
|
|
|
g_autoptr(virDomainDef) migratableDefDst = NULL;
|
qemu: Introduce qemuDomainDefCheckABIStability
https://bugzilla.redhat.com/show_bug.cgi?id=994364
Whenever we check for ABI stability, we have new xml (e.g. provided by
user, or obtained from snapshot, whatever) which we compare to old xml
and see if ABI won't break. However, if the new xml was produced via
virDomainGetXMLDesc(..., VIR_DOMAIN_XML_MIGRATABLE) it lacks some
devices, e.g. 'pci-root' controller. Hence, the ABI stability check
fails even though it is stable. Moreover, we can't simply fix
virDomainDefCheckABIStability because removing the correct devices is
task for the driver. For instance, qemu driver wants to remove the usb
controller too, while LXC driver doesn't. That's why we need special
qemu wrapper over virDomainDefCheckABIStability which removes the
correct devices from domain XML, produces MIGRATABLE xml and calls the
check ABI stability function.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2013-10-10 08:53:56 +00:00
|
|
|
|
2019-08-02 15:36:56 +00:00
|
|
|
if (!(migratableDefSrc = qemuDomainDefCopy(driver, qemuCaps, src, COPY_FLAGS)) ||
|
|
|
|
!(migratableDefDst = qemuDomainDefCopy(driver, qemuCaps, dst, COPY_FLAGS)))
|
2020-11-12 20:37:31 +00:00
|
|
|
return false;
|
qemu: Introduce qemuDomainDefCheckABIStability
https://bugzilla.redhat.com/show_bug.cgi?id=994364
Whenever we check for ABI stability, we have new xml (e.g. provided by
user, or obtained from snapshot, whatever) which we compare to old xml
and see if ABI won't break. However, if the new xml was produced via
virDomainGetXMLDesc(..., VIR_DOMAIN_XML_MIGRATABLE) it lacks some
devices, e.g. 'pci-root' controller. Hence, the ABI stability check
fails even though it is stable. Moreover, we can't simply fix
virDomainDefCheckABIStability because removing the correct devices is
task for the driver. For instance, qemu driver wants to remove the usb
controller too, while LXC driver doesn't. That's why we need special
qemu wrapper over virDomainDefCheckABIStability which removes the
correct devices from domain XML, produces MIGRATABLE xml and calls the
check ABI stability function.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2013-10-10 08:53:56 +00:00
|
|
|
|
2020-11-12 20:37:31 +00:00
|
|
|
return qemuDomainMigratableDefCheckABIStability(driver,
|
|
|
|
src, migratableDefSrc,
|
|
|
|
dst, migratableDefDst);
|
qemu: Introduce qemuDomainDefCheckABIStability
https://bugzilla.redhat.com/show_bug.cgi?id=994364
Whenever we check for ABI stability, we have new xml (e.g. provided by
user, or obtained from snapshot, whatever) which we compare to old xml
and see if ABI won't break. However, if the new xml was produced via
virDomainGetXMLDesc(..., VIR_DOMAIN_XML_MIGRATABLE) it lacks some
devices, e.g. 'pci-root' controller. Hence, the ABI stability check
fails even though it is stable. Moreover, we can't simply fix
virDomainDefCheckABIStability because removing the correct devices is
task for the driver. For instance, qemu driver wants to remove the usb
controller too, while LXC driver doesn't. That's why we need special
qemu wrapper over virDomainDefCheckABIStability which removes the
correct devices from domain XML, produces MIGRATABLE xml and calls the
check ABI stability function.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2013-10-10 08:53:56 +00:00
|
|
|
}
|
2014-02-27 10:45:13 +00:00
|
|
|
|
2017-06-14 11:43:37 +00:00
|
|
|
|
|
|
|
bool
|
|
|
|
qemuDomainCheckABIStability(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainDefPtr dst)
|
|
|
|
{
|
2019-08-02 15:36:56 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-11-12 20:27:04 +00:00
|
|
|
g_autoptr(virDomainDef) migratableSrc = NULL;
|
|
|
|
g_autoptr(virDomainDef) migratableDst = NULL;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *xml = NULL;
|
2017-06-14 11:43:37 +00:00
|
|
|
|
|
|
|
if (!(xml = qemuDomainFormatXML(driver, vm, COPY_FLAGS)) ||
|
2019-08-02 15:36:56 +00:00
|
|
|
!(migratableSrc = qemuDomainDefFromXML(driver, priv->qemuCaps, xml)) ||
|
|
|
|
!(migratableDst = qemuDomainDefCopy(driver, priv->qemuCaps, dst, COPY_FLAGS)))
|
2020-11-12 20:37:31 +00:00
|
|
|
return false;
|
2017-06-14 11:43:37 +00:00
|
|
|
|
2020-11-12 20:37:31 +00:00
|
|
|
return qemuDomainMigratableDefCheckABIStability(driver,
|
|
|
|
vm->def, migratableSrc,
|
|
|
|
dst, migratableDst);
|
2017-06-14 11:43:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#undef COPY_FLAGS
|
|
|
|
|
|
|
|
|
2014-02-27 10:45:13 +00:00
|
|
|
bool
|
2015-02-27 13:06:47 +00:00
|
|
|
qemuDomainAgentAvailable(virDomainObjPtr vm,
|
2014-02-27 10:45:13 +00:00
|
|
|
bool reportError)
|
|
|
|
{
|
2015-02-27 13:06:47 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2015-07-03 06:58:05 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
|
|
|
|
if (reportError) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("domain is not running"));
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2014-02-27 10:45:13 +00:00
|
|
|
if (priv->agentError) {
|
|
|
|
if (reportError) {
|
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("QEMU guest agent is not "
|
|
|
|
"available due to an error"));
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!priv->agent) {
|
2015-04-24 14:43:38 +00:00
|
|
|
if (qemuFindAgentConfig(vm->def)) {
|
|
|
|
if (reportError) {
|
|
|
|
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
|
|
|
|
_("QEMU guest agent is not connected"));
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (reportError) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("QEMU guest agent is not configured"));
|
|
|
|
}
|
|
|
|
return false;
|
2014-02-27 10:45:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
|
2015-02-18 13:31:47 +00:00
|
|
|
|
2020-09-15 02:42:56 +00:00
|
|
|
static unsigned long long
|
2020-08-24 16:29:44 +00:00
|
|
|
qemuDomainGetMemorySizeAlignment(const virDomainDef *def)
|
2015-07-31 14:00:20 +00:00
|
|
|
{
|
2015-09-21 16:10:55 +00:00
|
|
|
/* PPC requires the memory sizes to be rounded to 256MiB increments, so
|
|
|
|
* round them to the size always. */
|
|
|
|
if (ARCH_IS_PPC64(def->os.arch))
|
|
|
|
return 256 * 1024;
|
|
|
|
|
2015-07-31 14:00:20 +00:00
|
|
|
/* Align memory size. QEMU requires rounding to next 4KiB block.
|
2020-01-24 20:30:04 +00:00
|
|
|
* We'll take the "traditional" path and round it to 1MiB */
|
2015-07-31 14:00:20 +00:00
|
|
|
|
|
|
|
return 1024;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-09-23 11:07:03 +00:00
|
|
|
static unsigned long long
|
|
|
|
qemuDomainGetMemoryModuleSizeAlignment(const virDomainDef *def,
|
2019-10-14 12:45:33 +00:00
|
|
|
const virDomainMemoryDef *mem G_GNUC_UNUSED)
|
2015-09-23 11:07:03 +00:00
|
|
|
{
|
|
|
|
/* PPC requires the memory sizes to be rounded to 256MiB increments, so
|
|
|
|
* round them to the size always. */
|
|
|
|
if (ARCH_IS_PPC64(def->os.arch))
|
|
|
|
return 256 * 1024;
|
|
|
|
|
|
|
|
/* dimm memory modules require 2MiB alignment rather than the 1MiB we are
|
|
|
|
* using elsewhere. */
|
|
|
|
return 2048;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-02-18 13:31:47 +00:00
|
|
|
int
|
|
|
|
qemuDomainAlignMemorySizes(virDomainDefPtr def)
|
|
|
|
{
|
2015-12-01 13:08:37 +00:00
|
|
|
unsigned long long maxmemkb = virMemoryMaxValue(false) >> 10;
|
|
|
|
unsigned long long maxmemcapped = virMemoryMaxValue(true) >> 10;
|
2015-08-13 14:39:28 +00:00
|
|
|
unsigned long long initialmem = 0;
|
2016-06-14 09:23:23 +00:00
|
|
|
unsigned long long hotplugmem = 0;
|
2015-02-18 13:31:47 +00:00
|
|
|
unsigned long long mem;
|
2015-07-31 14:00:20 +00:00
|
|
|
unsigned long long align = qemuDomainGetMemorySizeAlignment(def);
|
2015-02-18 13:31:47 +00:00
|
|
|
size_t ncells = virDomainNumaGetNodeCount(def->numa);
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* align NUMA cell sizes if relevant */
|
|
|
|
for (i = 0; i < ncells; i++) {
|
2015-08-13 14:39:28 +00:00
|
|
|
mem = VIR_ROUND_UP(virDomainNumaGetNodeMemorySize(def->numa, i), align);
|
|
|
|
initialmem += mem;
|
2015-12-01 13:08:37 +00:00
|
|
|
|
|
|
|
if (mem > maxmemkb) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("memory size of NUMA node '%zu' overflowed after "
|
|
|
|
"alignment"), i);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-08-13 14:39:28 +00:00
|
|
|
virDomainNumaSetNodeMemorySize(def->numa, i, mem);
|
2015-02-18 13:31:47 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 14:39:28 +00:00
|
|
|
/* align initial memory size, if NUMA is present calculate it as total of
|
|
|
|
* individual aligned NUMA node sizes */
|
|
|
|
if (initialmem == 0)
|
|
|
|
initialmem = VIR_ROUND_UP(virDomainDefGetMemoryInitial(def), align);
|
|
|
|
|
2015-12-01 13:08:37 +00:00
|
|
|
if (initialmem > maxmemcapped) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("initial memory size overflowed after alignment"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-07-31 14:00:20 +00:00
|
|
|
def->mem.max_memory = VIR_ROUND_UP(def->mem.max_memory, align);
|
2015-12-01 13:08:37 +00:00
|
|
|
if (def->mem.max_memory > maxmemkb) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("maximum memory size overflowed after alignment"));
|
|
|
|
return -1;
|
|
|
|
}
|
2014-10-06 12:18:37 +00:00
|
|
|
|
2015-01-09 09:40:37 +00:00
|
|
|
/* Align memory module sizes */
|
2015-09-23 11:07:03 +00:00
|
|
|
for (i = 0; i < def->nmems; i++) {
|
2020-09-15 02:42:56 +00:00
|
|
|
if (def->mems[i]->model == VIR_DOMAIN_MEMORY_MODEL_NVDIMM &&
|
|
|
|
ARCH_IS_PPC64(def->os.arch)) {
|
2020-09-15 01:02:47 +00:00
|
|
|
if (virDomainNVDimmAlignSizePseries(def->mems[i]) < 0)
|
2020-09-15 02:42:56 +00:00
|
|
|
return -1;
|
|
|
|
} else {
|
2020-03-23 19:40:49 +00:00
|
|
|
align = qemuDomainGetMemoryModuleSizeAlignment(def, def->mems[i]);
|
|
|
|
def->mems[i]->size = VIR_ROUND_UP(def->mems[i]->size, align);
|
|
|
|
}
|
|
|
|
|
2016-06-14 09:23:23 +00:00
|
|
|
hotplugmem += def->mems[i]->size;
|
2015-12-01 13:08:37 +00:00
|
|
|
|
|
|
|
if (def->mems[i]->size > maxmemkb) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("size of memory module '%zu' overflowed after "
|
|
|
|
"alignment"), i);
|
|
|
|
return -1;
|
|
|
|
}
|
2015-09-23 11:07:03 +00:00
|
|
|
}
|
2015-01-09 09:40:37 +00:00
|
|
|
|
2016-06-14 09:23:23 +00:00
|
|
|
virDomainDefSetMemoryTotal(def, initialmem + hotplugmem);
|
|
|
|
|
2015-02-18 13:31:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2015-01-09 09:40:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainMemoryDeviceAlignSize:
|
|
|
|
* @mem: memory device definition object
|
|
|
|
*
|
|
|
|
* Aligns the size of the memory module as qemu enforces it. The size is updated
|
2020-07-09 04:42:21 +00:00
|
|
|
* inplace. Default rounding is now to 1 MiB (qemu requires rounding to page,
|
2015-01-09 09:40:37 +00:00
|
|
|
* size so this should be safe).
|
|
|
|
*/
|
2020-09-15 02:42:56 +00:00
|
|
|
int
|
2015-07-31 14:00:20 +00:00
|
|
|
qemuDomainMemoryDeviceAlignSize(virDomainDefPtr def,
|
|
|
|
virDomainMemoryDefPtr mem)
|
2015-01-09 09:40:37 +00:00
|
|
|
{
|
2020-09-15 02:42:56 +00:00
|
|
|
if (mem->model == VIR_DOMAIN_MEMORY_MODEL_NVDIMM &&
|
|
|
|
ARCH_IS_PPC64(def->os.arch)) {
|
2020-09-15 01:02:47 +00:00
|
|
|
return virDomainNVDimmAlignSizePseries(mem);
|
2020-09-15 02:42:56 +00:00
|
|
|
} else {
|
2020-03-23 19:40:49 +00:00
|
|
|
mem->size = VIR_ROUND_UP(mem->size,
|
|
|
|
qemuDomainGetMemorySizeAlignment(def));
|
|
|
|
}
|
2020-09-15 02:42:56 +00:00
|
|
|
|
|
|
|
return 0;
|
2015-01-09 09:40:37 +00:00
|
|
|
}
|
2015-03-31 15:24:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainGetMonitor:
|
|
|
|
* @vm: domain object
|
|
|
|
*
|
|
|
|
* Returns the monitor pointer corresponding to the domain object @vm.
|
|
|
|
*/
|
|
|
|
qemuMonitorPtr
|
|
|
|
qemuDomainGetMonitor(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
return ((qemuDomainObjPrivatePtr) vm->privateData)->mon;
|
|
|
|
}
|
2015-03-31 15:29:35 +00:00
|
|
|
|
|
|
|
|
2015-04-24 14:43:38 +00:00
|
|
|
/**
|
|
|
|
* qemuFindAgentConfig:
|
|
|
|
* @def: domain definition
|
|
|
|
*
|
|
|
|
* Returns the pointer to the channel definition that is used to access the
|
|
|
|
* guest agent if the agent is configured or NULL otherwise.
|
|
|
|
*/
|
2016-01-08 15:21:30 +00:00
|
|
|
virDomainChrDefPtr
|
2015-04-24 14:43:38 +00:00
|
|
|
qemuFindAgentConfig(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nchannels; i++) {
|
|
|
|
virDomainChrDefPtr channel = def->channels[i];
|
|
|
|
|
|
|
|
if (channel->targetType != VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO)
|
|
|
|
continue;
|
|
|
|
|
2016-01-08 15:21:30 +00:00
|
|
|
if (STREQ_NULLABLE(channel->target.name, "org.qemu.guest_agent.0"))
|
|
|
|
return channel;
|
2015-04-24 14:43:38 +00:00
|
|
|
}
|
|
|
|
|
2016-01-08 15:21:30 +00:00
|
|
|
return NULL;
|
2015-04-24 14:43:38 +00:00
|
|
|
}
|
2015-04-28 09:21:52 +00:00
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
static bool
|
2018-11-28 15:48:41 +00:00
|
|
|
qemuDomainMachineIsQ35(const char *machine,
|
2018-11-28 15:56:35 +00:00
|
|
|
const virArch arch)
|
2015-04-28 09:21:52 +00:00
|
|
|
{
|
2018-11-28 15:56:35 +00:00
|
|
|
if (!ARCH_IS_X86(arch))
|
|
|
|
return false;
|
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (STREQ(machine, "q35") ||
|
|
|
|
STRPREFIX(machine, "pc-q35-")) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2015-04-28 09:21:52 +00:00
|
|
|
}
|
2015-05-27 13:51:52 +00:00
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
static bool
|
2018-11-28 15:48:41 +00:00
|
|
|
qemuDomainMachineIsI440FX(const char *machine,
|
2018-11-28 15:56:35 +00:00
|
|
|
const virArch arch)
|
2017-04-18 10:43:58 +00:00
|
|
|
{
|
2018-11-28 15:56:35 +00:00
|
|
|
if (!ARCH_IS_X86(arch))
|
|
|
|
return false;
|
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (STREQ(machine, "pc") ||
|
|
|
|
STRPREFIX(machine, "pc-0.") ||
|
|
|
|
STRPREFIX(machine, "pc-1.") ||
|
|
|
|
STRPREFIX(machine, "pc-i440fx-") ||
|
|
|
|
STRPREFIX(machine, "rhel")) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2017-04-18 10:43:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
static bool
|
2018-11-28 15:48:41 +00:00
|
|
|
qemuDomainMachineIsS390CCW(const char *machine,
|
2018-11-28 15:56:35 +00:00
|
|
|
const virArch arch)
|
2016-09-03 01:41:43 +00:00
|
|
|
{
|
2018-11-28 15:56:35 +00:00
|
|
|
if (!ARCH_IS_S390(arch))
|
|
|
|
return false;
|
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (STRPREFIX(machine, "s390-ccw"))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2018-11-28 15:15:31 +00:00
|
|
|
}
|
2016-09-03 01:41:43 +00:00
|
|
|
|
2018-11-28 15:15:31 +00:00
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
/* You should normally avoid this function and use
|
|
|
|
* qemuDomainIsARMVirt() instead. */
|
2018-11-28 15:15:31 +00:00
|
|
|
bool
|
|
|
|
qemuDomainMachineIsARMVirt(const char *machine,
|
|
|
|
const virArch arch)
|
|
|
|
{
|
|
|
|
if (arch != VIR_ARCH_ARMV6L &&
|
|
|
|
arch != VIR_ARCH_ARMV7L &&
|
2019-02-06 11:37:54 +00:00
|
|
|
arch != VIR_ARCH_AARCH64) {
|
2016-09-03 01:41:43 +00:00
|
|
|
return false;
|
2019-02-06 11:37:54 +00:00
|
|
|
}
|
2016-09-03 01:41:43 +00:00
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (STREQ(machine, "virt") ||
|
|
|
|
STRPREFIX(machine, "virt-")) {
|
|
|
|
return true;
|
|
|
|
}
|
2016-09-03 01:41:43 +00:00
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
return false;
|
2016-09-03 01:41:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
static bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainMachineIsRISCVVirt(const char *machine,
|
|
|
|
const virArch arch)
|
2016-09-03 01:41:43 +00:00
|
|
|
{
|
2018-11-28 15:15:31 +00:00
|
|
|
if (!ARCH_IS_RISCV(arch))
|
|
|
|
return false;
|
2016-09-03 01:41:43 +00:00
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (STREQ(machine, "virt") ||
|
|
|
|
STRPREFIX(machine, "virt-")) {
|
|
|
|
return true;
|
|
|
|
}
|
2016-09-03 01:41:43 +00:00
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
return false;
|
2018-11-28 15:15:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
/* You should normally avoid this function and use
|
|
|
|
* qemuDomainIsPSeries() instead. */
|
2018-11-28 15:15:31 +00:00
|
|
|
bool
|
|
|
|
qemuDomainMachineIsPSeries(const char *machine,
|
|
|
|
const virArch arch)
|
|
|
|
{
|
|
|
|
if (!ARCH_IS_PPC64(arch))
|
|
|
|
return false;
|
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (STREQ(machine, "pseries") ||
|
|
|
|
STRPREFIX(machine, "pseries-")) {
|
|
|
|
return true;
|
|
|
|
}
|
2016-09-03 01:41:43 +00:00
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
return false;
|
2016-09-03 01:41:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
/* You should normally avoid this function and use
|
|
|
|
* qemuDomainHasBuiltinIDE() instead. */
|
2015-06-22 13:20:55 +00:00
|
|
|
bool
|
2018-11-28 15:48:41 +00:00
|
|
|
qemuDomainMachineHasBuiltinIDE(const char *machine,
|
|
|
|
const virArch arch)
|
2015-06-22 13:20:55 +00:00
|
|
|
{
|
2018-11-28 15:48:41 +00:00
|
|
|
return qemuDomainMachineIsI440FX(machine, arch) ||
|
2018-11-28 15:15:31 +00:00
|
|
|
STREQ(machine, "malta") ||
|
|
|
|
STREQ(machine, "sun4u") ||
|
|
|
|
STREQ(machine, "g3beige");
|
2017-04-18 10:43:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-11-18 16:11:05 +00:00
|
|
|
bool qemuDomainHasBuiltinESP(const virDomainDef *def)
|
|
|
|
{
|
|
|
|
/* These machines use ncr53c90 (ESP) SCSI controller built-in */
|
|
|
|
if (def->os.arch == VIR_ARCH_SPARC) {
|
|
|
|
return true;
|
|
|
|
} else if (ARCH_IS_MIPS64(def->os.arch) &&
|
|
|
|
(STREQ(def->os.machine, "magnum") ||
|
|
|
|
STREQ(def->os.machine, "pica61"))) {
|
|
|
|
return true;
|
|
|
|
} else if (def->os.arch == VIR_ARCH_M68K &&
|
|
|
|
STREQ(def->os.machine, "q800")) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-28 16:34:07 +00:00
|
|
|
static bool
|
2018-11-28 15:48:41 +00:00
|
|
|
qemuDomainMachineNeedsFDC(const char *machine,
|
2018-11-28 15:56:35 +00:00
|
|
|
const virArch arch)
|
2017-04-18 10:43:58 +00:00
|
|
|
{
|
|
|
|
const char *p = STRSKIP(machine, "pc-q35-");
|
2015-06-22 13:20:55 +00:00
|
|
|
|
2018-11-28 15:56:35 +00:00
|
|
|
if (!ARCH_IS_X86(arch))
|
|
|
|
return false;
|
|
|
|
|
2019-02-06 11:37:54 +00:00
|
|
|
if (!p)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (STRPREFIX(p, "1.") ||
|
|
|
|
STREQ(p, "2.0") ||
|
|
|
|
STREQ(p, "2.1") ||
|
|
|
|
STREQ(p, "2.2") ||
|
|
|
|
STREQ(p, "2.3")) {
|
|
|
|
return false;
|
2015-06-22 13:20:55 +00:00
|
|
|
}
|
2019-02-06 11:37:54 +00:00
|
|
|
|
|
|
|
return true;
|
2015-06-22 13:20:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-09-02 20:02:14 +00:00
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainIsQ35(const virDomainDef *def)
|
2015-09-02 20:02:14 +00:00
|
|
|
{
|
2018-11-28 15:48:41 +00:00
|
|
|
return qemuDomainMachineIsQ35(def->os.machine, def->os.arch);
|
2015-09-02 20:02:14 +00:00
|
|
|
}
|
|
|
|
|
2015-06-22 13:20:55 +00:00
|
|
|
|
2016-05-03 08:49:40 +00:00
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainIsI440FX(const virDomainDef *def)
|
2016-05-03 08:49:40 +00:00
|
|
|
{
|
2018-11-28 15:48:41 +00:00
|
|
|
return qemuDomainMachineIsI440FX(def->os.machine, def->os.arch);
|
2017-04-18 10:43:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainIsS390CCW(const virDomainDef *def)
|
2017-04-18 10:43:58 +00:00
|
|
|
{
|
2018-11-28 15:48:41 +00:00
|
|
|
return qemuDomainMachineIsS390CCW(def->os.machine, def->os.arch);
|
2017-04-18 10:43:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainIsARMVirt(const virDomainDef *def)
|
2017-04-18 10:43:58 +00:00
|
|
|
{
|
2018-11-28 15:15:31 +00:00
|
|
|
return qemuDomainMachineIsARMVirt(def->os.machine, def->os.arch);
|
2016-05-03 08:49:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-08-22 09:15:24 +00:00
|
|
|
bool
|
|
|
|
qemuDomainIsRISCVVirt(const virDomainDef *def)
|
|
|
|
{
|
|
|
|
return qemuDomainMachineIsRISCVVirt(def->os.machine, def->os.arch);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainIsPSeries(const virDomainDef *def)
|
2018-08-22 09:15:24 +00:00
|
|
|
{
|
2018-11-28 15:15:31 +00:00
|
|
|
return qemuDomainMachineIsPSeries(def->os.machine, def->os.arch);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
qemuDomainHasPCIRoot(const virDomainDef *def)
|
|
|
|
{
|
|
|
|
int root = virDomainControllerFind(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0);
|
|
|
|
|
|
|
|
if (root < 0)
|
2018-08-22 09:15:24 +00:00
|
|
|
return false;
|
|
|
|
|
2018-11-28 15:15:31 +00:00
|
|
|
if (def->controllers[root]->model != VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT)
|
2018-08-22 09:15:24 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-06-23 08:19:05 +00:00
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainHasPCIeRoot(const virDomainDef *def)
|
2016-06-23 08:19:05 +00:00
|
|
|
{
|
2018-11-28 15:15:31 +00:00
|
|
|
int root = virDomainControllerFind(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0);
|
|
|
|
|
|
|
|
if (root < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (def->controllers[root]->model != VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2017-04-18 10:43:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool
|
2018-11-28 15:15:31 +00:00
|
|
|
qemuDomainHasBuiltinIDE(const virDomainDef *def)
|
2017-04-18 10:43:58 +00:00
|
|
|
{
|
2018-11-28 15:48:41 +00:00
|
|
|
return qemuDomainMachineHasBuiltinIDE(def->os.machine, def->os.arch);
|
2018-11-28 15:15:31 +00:00
|
|
|
}
|
2016-06-23 08:19:05 +00:00
|
|
|
|
|
|
|
|
2018-11-28 15:15:31 +00:00
|
|
|
bool
|
|
|
|
qemuDomainNeedsFDC(const virDomainDef *def)
|
|
|
|
{
|
2018-11-28 15:48:41 +00:00
|
|
|
return qemuDomainMachineNeedsFDC(def->os.machine, def->os.arch);
|
2016-06-23 08:19:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-28 16:45:37 +00:00
|
|
|
bool
|
|
|
|
qemuDomainSupportsPCI(virDomainDefPtr def,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
2019-02-06 11:37:54 +00:00
|
|
|
if (def->os.arch != VIR_ARCH_ARMV6L &&
|
|
|
|
def->os.arch != VIR_ARCH_ARMV7L &&
|
|
|
|
def->os.arch != VIR_ARCH_AARCH64 &&
|
|
|
|
!ARCH_IS_RISCV(def->os.arch)) {
|
2018-11-28 16:45:37 +00:00
|
|
|
return true;
|
2019-02-06 11:37:54 +00:00
|
|
|
}
|
2018-11-28 16:45:37 +00:00
|
|
|
|
|
|
|
if (STREQ(def->os.machine, "versatilepb"))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((qemuDomainIsARMVirt(def) ||
|
|
|
|
qemuDomainIsRISCVVirt(def)) &&
|
2019-02-06 11:37:54 +00:00
|
|
|
virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_GPEX)) {
|
2018-11-28 16:45:37 +00:00
|
|
|
return true;
|
2019-02-06 11:37:54 +00:00
|
|
|
}
|
2018-11-28 16:45:37 +00:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-08 05:25:32 +00:00
|
|
|
static bool
|
|
|
|
qemuCheckMemoryDimmConflict(const virDomainDef *def,
|
|
|
|
const virDomainMemoryDef *mem)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nmems; i++) {
|
|
|
|
virDomainMemoryDefPtr tmp = def->mems[i];
|
|
|
|
|
|
|
|
if (tmp == mem ||
|
|
|
|
tmp->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (mem->info.addr.dimm.slot == tmp->info.addr.dimm.slot) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("memory device slot '%u' is already being "
|
|
|
|
"used by another memory device"),
|
|
|
|
mem->info.addr.dimm.slot);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mem->info.addr.dimm.base != 0 &&
|
|
|
|
mem->info.addr.dimm.base == tmp->info.addr.dimm.base) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("memory device base '0x%llx' is already being "
|
|
|
|
"used by another memory device"),
|
|
|
|
mem->info.addr.dimm.base);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2020-07-28 08:51:32 +00:00
|
|
|
|
|
|
|
|
2015-10-08 05:25:32 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDefValidateMemoryHotplugDevice(const virDomainMemoryDef *mem,
|
|
|
|
const virDomainDef *def)
|
|
|
|
{
|
|
|
|
switch ((virDomainMemoryModel) mem->model) {
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_DIMM:
|
2016-07-29 09:02:25 +00:00
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NVDIMM:
|
2015-10-08 05:25:32 +00:00
|
|
|
if (mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM &&
|
|
|
|
mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("only 'dimm' addresses are supported for the "
|
|
|
|
"pc-dimm device"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-10-13 17:12:23 +00:00
|
|
|
if (virDomainNumaGetNodeCount(def->numa) != 0) {
|
|
|
|
if (mem->targetNode == -1) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
2016-03-07 13:24:51 +00:00
|
|
|
_("target NUMA node needs to be specified for "
|
2015-10-13 17:12:23 +00:00
|
|
|
"memory device"));
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-07 11:52:45 +00:00
|
|
|
}
|
|
|
|
|
2015-10-08 05:25:32 +00:00
|
|
|
if (mem->info.type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM) {
|
|
|
|
if (mem->info.addr.dimm.slot >= def->mem.memory_slots) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("memory device slot '%u' exceeds slots "
|
|
|
|
"count '%u'"),
|
|
|
|
mem->info.addr.dimm.slot, def->mem.memory_slots);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (qemuCheckMemoryDimmConflict(def, mem))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NONE:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_LAST:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-08 04:06:15 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDefValidateMemoryHotplug:
|
|
|
|
* @def: domain definition
|
|
|
|
* @qemuCaps: qemu capabilities object
|
|
|
|
* @mem: definition of memory device that is to be added to @def with hotplug,
|
|
|
|
* NULL in case of regular VM startup
|
|
|
|
*
|
|
|
|
* Validates that the domain definition and memory modules have valid
|
|
|
|
* configuration and are possibly able to accept @mem via hotplug if it's
|
|
|
|
* non-NULL.
|
|
|
|
*
|
|
|
|
* Returns 0 on success; -1 and a libvirt error on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
|
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
const virDomainMemoryDef *mem)
|
|
|
|
{
|
|
|
|
unsigned int nmems = def->nmems;
|
|
|
|
unsigned long long hotplugSpace;
|
|
|
|
unsigned long long hotplugMemory = 0;
|
2016-07-29 09:02:25 +00:00
|
|
|
bool needPCDimmCap = false;
|
|
|
|
bool needNvdimmCap = false;
|
2015-10-08 04:06:15 +00:00
|
|
|
size_t i;
|
|
|
|
|
|
|
|
hotplugSpace = def->mem.max_memory - virDomainDefGetMemoryInitial(def);
|
|
|
|
|
|
|
|
if (mem) {
|
|
|
|
nmems++;
|
|
|
|
hotplugMemory = mem->size;
|
2015-10-08 05:25:32 +00:00
|
|
|
|
|
|
|
if (qemuDomainDefValidateMemoryHotplugDevice(mem, def) < 0)
|
|
|
|
return -1;
|
2015-10-08 04:06:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainDefHasMemoryHotplug(def)) {
|
|
|
|
if (nmems) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cannot use/hotplug a memory device when domain "
|
|
|
|
"'maxMemory' is not defined"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-13 17:12:23 +00:00
|
|
|
if (!ARCH_IS_PPC64(def->os.arch)) {
|
|
|
|
/* due to guest support, qemu would silently enable NUMA with one node
|
|
|
|
* once the memory hotplug backend is enabled. To avoid possible
|
|
|
|
* confusion we will enforce user originated numa configuration along
|
|
|
|
* with memory hotplug. */
|
|
|
|
if (virDomainNumaGetNodeCount(def->numa) == 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("At least one numa node has to be configured when "
|
|
|
|
"enabling memory hotplug"));
|
|
|
|
return -1;
|
|
|
|
}
|
2015-10-08 04:06:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (nmems > def->mem.memory_slots) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("memory device count '%u' exceeds slots count '%u'"),
|
|
|
|
nmems, def->mem.memory_slots);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-10-08 05:25:32 +00:00
|
|
|
for (i = 0; i < def->nmems; i++) {
|
2015-10-08 04:06:15 +00:00
|
|
|
hotplugMemory += def->mems[i]->size;
|
|
|
|
|
2016-07-29 09:02:25 +00:00
|
|
|
switch ((virDomainMemoryModel) def->mems[i]->model) {
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_DIMM:
|
|
|
|
needPCDimmCap = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NVDIMM:
|
|
|
|
needNvdimmCap = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_NONE:
|
|
|
|
case VIR_DOMAIN_MEMORY_MODEL_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-10-08 05:25:32 +00:00
|
|
|
/* already existing devices don't need to be checked on hotplug */
|
|
|
|
if (!mem &&
|
|
|
|
qemuDomainDefValidateMemoryHotplugDevice(def->mems[i], def) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-07-29 09:02:25 +00:00
|
|
|
if (needPCDimmCap &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_PC_DIMM)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("memory hotplug isn't supported by this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (needNvdimmCap &&
|
|
|
|
!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_NVDIMM)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("nvdimm isn't supported by this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:06:15 +00:00
|
|
|
if (hotplugMemory > hotplugSpace) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("memory device total size exceeds hotplug space"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-27 13:51:52 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainUpdateCurrentMemorySize:
|
|
|
|
*
|
2019-02-07 09:36:01 +00:00
|
|
|
* In case when the balloon is not present for the domain, the function
|
|
|
|
* recalculates the maximum size to reflect possible changes.
|
2015-05-27 13:51:52 +00:00
|
|
|
*/
|
2019-02-07 09:46:20 +00:00
|
|
|
void
|
|
|
|
qemuDomainUpdateCurrentMemorySize(virDomainObjPtr vm)
|
2015-05-27 13:51:52 +00:00
|
|
|
{
|
|
|
|
/* inactive domain doesn't need size update */
|
|
|
|
if (!virDomainObjIsActive(vm))
|
2019-02-07 09:46:20 +00:00
|
|
|
return;
|
2015-05-27 13:51:52 +00:00
|
|
|
|
|
|
|
/* if no balloning is available, the current size equals to the current
|
|
|
|
* full memory size */
|
2019-02-07 09:46:20 +00:00
|
|
|
if (!virDomainDefHasMemballoon(vm->def))
|
2016-06-15 13:34:04 +00:00
|
|
|
vm->def->mem.cur_balloon = virDomainDefGetMemoryTotal(vm->def);
|
2015-05-27 13:51:52 +00:00
|
|
|
}
|
2015-11-06 14:51:33 +00:00
|
|
|
|
|
|
|
|
qemu_domain: NVLink2 bridge detection function for PPC64
The NVLink2 support in QEMU implements the detection of NVLink2
capable devices by verifying the attributes of the VFIO mem region
QEMU allocates for the NVIDIA GPUs. To properly allocate an
adequate amount of memLock, Libvirt needs this information before
a QEMU instance is even created, thus querying QEMU is not
possible and opening a VFIO window is too much.
An alternative is presented in this patch. Making the following
assumptions:
- if we want GPU RAM to be available in the guest, an NVLink2 bridge
must be passed through;
- an unknown PCI device can be classified as a NVLink2 bridge
if its device tree node has 'ibm,gpu', 'ibm,nvlink',
'ibm,nvlink-speed' and 'memory-region'.
This patch introduces a helper called @ppc64VFIODeviceIsNV2Bridge
that checks the device tree node of a given PCI device and
check if it meets the criteria to be a NVLink2 bridge. This
new function will be used in a follow-up patch that, using the
first assumption, will set up the rlimits of the guest
accordingly.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2019-04-04 13:40:38 +00:00
|
|
|
/**
|
|
|
|
* ppc64VFIODeviceIsNV2Bridge:
|
|
|
|
* @device: string with the PCI device address
|
|
|
|
*
|
|
|
|
* This function receives a string that represents a PCI device,
|
|
|
|
* such as '0004:04:00.0', and tells if the device is a NVLink2
|
|
|
|
* bridge.
|
|
|
|
*/
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
static bool
|
qemu_domain: NVLink2 bridge detection function for PPC64
The NVLink2 support in QEMU implements the detection of NVLink2
capable devices by verifying the attributes of the VFIO mem region
QEMU allocates for the NVIDIA GPUs. To properly allocate an
adequate amount of memLock, Libvirt needs this information before
a QEMU instance is even created, thus querying QEMU is not
possible and opening a VFIO window is too much.
An alternative is presented in this patch. Making the following
assumptions:
- if we want GPU RAM to be available in the guest, an NVLink2 bridge
must be passed through;
- an unknown PCI device can be classified as a NVLink2 bridge
if its device tree node has 'ibm,gpu', 'ibm,nvlink',
'ibm,nvlink-speed' and 'memory-region'.
This patch introduces a helper called @ppc64VFIODeviceIsNV2Bridge
that checks the device tree node of a given PCI device and
check if it meets the criteria to be a NVLink2 bridge. This
new function will be used in a follow-up patch that, using the
first assumption, will set up the rlimits of the guest
accordingly.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2019-04-04 13:40:38 +00:00
|
|
|
ppc64VFIODeviceIsNV2Bridge(const char *device)
|
|
|
|
{
|
|
|
|
const char *nvlink2Files[] = {"ibm,gpu", "ibm,nvlink",
|
|
|
|
"ibm,nvlink-speed", "memory-region"};
|
|
|
|
size_t i;
|
|
|
|
|
2019-10-15 11:55:26 +00:00
|
|
|
for (i = 0; i < G_N_ELEMENTS(nvlink2Files); i++) {
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *file = NULL;
|
qemu_domain: NVLink2 bridge detection function for PPC64
The NVLink2 support in QEMU implements the detection of NVLink2
capable devices by verifying the attributes of the VFIO mem region
QEMU allocates for the NVIDIA GPUs. To properly allocate an
adequate amount of memLock, Libvirt needs this information before
a QEMU instance is even created, thus querying QEMU is not
possible and opening a VFIO window is too much.
An alternative is presented in this patch. Making the following
assumptions:
- if we want GPU RAM to be available in the guest, an NVLink2 bridge
must be passed through;
- an unknown PCI device can be classified as a NVLink2 bridge
if its device tree node has 'ibm,gpu', 'ibm,nvlink',
'ibm,nvlink-speed' and 'memory-region'.
This patch introduces a helper called @ppc64VFIODeviceIsNV2Bridge
that checks the device tree node of a given PCI device and
check if it meets the criteria to be a NVLink2 bridge. This
new function will be used in a follow-up patch that, using the
first assumption, will set up the rlimits of the guest
accordingly.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2019-04-04 13:40:38 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
file = g_strdup_printf("/sys/bus/pci/devices/%s/of_node/%s",
|
|
|
|
device, nvlink2Files[i]);
|
qemu_domain: NVLink2 bridge detection function for PPC64
The NVLink2 support in QEMU implements the detection of NVLink2
capable devices by verifying the attributes of the VFIO mem region
QEMU allocates for the NVIDIA GPUs. To properly allocate an
adequate amount of memLock, Libvirt needs this information before
a QEMU instance is even created, thus querying QEMU is not
possible and opening a VFIO window is too much.
An alternative is presented in this patch. Making the following
assumptions:
- if we want GPU RAM to be available in the guest, an NVLink2 bridge
must be passed through;
- an unknown PCI device can be classified as a NVLink2 bridge
if its device tree node has 'ibm,gpu', 'ibm,nvlink',
'ibm,nvlink-speed' and 'memory-region'.
This patch introduces a helper called @ppc64VFIODeviceIsNV2Bridge
that checks the device tree node of a given PCI device and
check if it meets the criteria to be a NVLink2 bridge. This
new function will be used in a follow-up patch that, using the
first assumption, will set up the rlimits of the guest
accordingly.
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
2019-04-04 13:40:38 +00:00
|
|
|
|
|
|
|
if (!virFileExists(file))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-03-05 12:46:07 +00:00
|
|
|
/**
|
|
|
|
* getPPC64MemLockLimitBytes:
|
|
|
|
* @def: domain definition
|
2019-09-26 11:15:47 +00:00
|
|
|
* @forceVFIO: force VFIO usage
|
2019-03-05 12:46:07 +00:00
|
|
|
*
|
|
|
|
* A PPC64 helper that calculates the memory locking limit in order for
|
|
|
|
* the guest to operate properly.
|
|
|
|
*/
|
|
|
|
static unsigned long long
|
2019-09-26 11:15:47 +00:00
|
|
|
getPPC64MemLockLimitBytes(virDomainDefPtr def,
|
|
|
|
bool forceVFIO)
|
2019-03-05 12:46:07 +00:00
|
|
|
{
|
|
|
|
unsigned long long memKB = 0;
|
|
|
|
unsigned long long baseLimit = 0;
|
|
|
|
unsigned long long memory = 0;
|
|
|
|
unsigned long long maxMemory = 0;
|
|
|
|
unsigned long long passthroughLimit = 0;
|
|
|
|
size_t i, nPCIHostBridges = 0;
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
virPCIDeviceAddressPtr pciAddr;
|
2019-03-05 12:46:07 +00:00
|
|
|
bool usesVFIO = false;
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
bool nvlink2Capable = false;
|
2019-03-05 12:46:07 +00:00
|
|
|
|
|
|
|
for (i = 0; i < def->ncontrollers; i++) {
|
|
|
|
virDomainControllerDefPtr cont = def->controllers[i];
|
|
|
|
|
|
|
|
if (!virDomainControllerIsPSeriesPHB(cont))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
nPCIHostBridges++;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < def->nhostdevs; i++) {
|
|
|
|
virDomainHostdevDefPtr dev = def->hostdevs[i];
|
|
|
|
|
2019-08-06 14:25:12 +00:00
|
|
|
if (virHostdevIsVFIODevice(dev)) {
|
2019-03-05 12:46:07 +00:00
|
|
|
usesVFIO = true;
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
|
|
|
|
pciAddr = &dev->source.subsys.u.pci.addr;
|
|
|
|
if (virPCIDeviceAddressIsValid(pciAddr, false)) {
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *pciAddrStr = NULL;
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
|
|
|
|
pciAddrStr = virPCIDeviceAddressAsString(pciAddr);
|
|
|
|
if (ppc64VFIODeviceIsNV2Bridge(pciAddrStr)) {
|
|
|
|
nvlink2Capable = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-03-05 12:46:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-25 13:36:44 +00:00
|
|
|
if (virDomainDefHasNVMeDisk(def))
|
|
|
|
usesVFIO = true;
|
|
|
|
|
2019-03-05 12:46:07 +00:00
|
|
|
memory = virDomainDefGetMemoryTotal(def);
|
|
|
|
|
|
|
|
if (def->mem.max_memory)
|
|
|
|
maxMemory = def->mem.max_memory;
|
|
|
|
else
|
|
|
|
maxMemory = memory;
|
|
|
|
|
|
|
|
/* baseLimit := maxMemory / 128 (a)
|
|
|
|
* + 4 MiB * #PHBs + 8 MiB (b)
|
|
|
|
*
|
|
|
|
* (a) is the hash table
|
|
|
|
*
|
|
|
|
* (b) is accounting for the 32-bit DMA window - it could be either the
|
|
|
|
* KVM accelerated TCE tables for emulated devices, or the VFIO
|
|
|
|
* userspace view. The 4 MiB per-PHB (including the default one) covers
|
|
|
|
* a 2GiB DMA window: default is 1GiB, but it's possible it'll be
|
|
|
|
* increased to help performance. The 8 MiB extra should be plenty for
|
|
|
|
* the TCE table index for any reasonable number of PHBs and several
|
|
|
|
* spapr-vlan or spapr-vscsi devices (512kB + a tiny bit each) */
|
|
|
|
baseLimit = maxMemory / 128 +
|
|
|
|
4096 * nPCIHostBridges +
|
|
|
|
8192;
|
|
|
|
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
/* NVLink2 support in QEMU is a special case of the passthrough
|
|
|
|
* mechanics explained in the usesVFIO case below. The GPU RAM
|
|
|
|
* is placed with a gap after maxMemory. The current QEMU
|
|
|
|
* implementation puts the NVIDIA RAM above the PCI MMIO, which
|
|
|
|
* starts at 32TiB and is the MMIO reserved for the guest main RAM.
|
2019-03-05 12:46:07 +00:00
|
|
|
*
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
* This window ends at 64TiB, and this is where the GPUs are being
|
|
|
|
* placed. The next available window size is at 128TiB, and
|
|
|
|
* 64TiB..128TiB will fit all possible NVIDIA GPUs.
|
2019-03-05 12:46:07 +00:00
|
|
|
*
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
* The same assumption as the most common case applies here:
|
|
|
|
* the guest will request a 64-bit DMA window, per PHB, that is
|
|
|
|
* big enough to map all its RAM, which is now at 128TiB due
|
|
|
|
* to the GPUs.
|
2019-03-05 12:46:07 +00:00
|
|
|
*
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
* Note that the NVIDIA RAM window must be accounted for the TCE
|
|
|
|
* table size, but *not* for the main RAM (maxMemory). This gives
|
|
|
|
* us the following passthroughLimit for the NVLink2 case:
|
|
|
|
*
|
|
|
|
* passthroughLimit = maxMemory +
|
|
|
|
* 128TiB/512KiB * #PHBs + 8 MiB */
|
|
|
|
if (nvlink2Capable) {
|
|
|
|
passthroughLimit = maxMemory +
|
|
|
|
128 * (1ULL<<30) / 512 * nPCIHostBridges +
|
|
|
|
8192;
|
2019-09-26 11:15:47 +00:00
|
|
|
} else if (usesVFIO || forceVFIO) {
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
/* For regular (non-NVLink2 present) VFIO passthrough, the value
|
|
|
|
* of passthroughLimit is:
|
|
|
|
*
|
|
|
|
* passthroughLimit := max( 2 GiB * #PHBs, (c)
|
|
|
|
* memory (d)
|
|
|
|
* + memory * 1/512 * #PHBs + 8 MiB ) (e)
|
|
|
|
*
|
|
|
|
* (c) is the pre-DDW VFIO DMA window accounting. We're allowing 2
|
|
|
|
* GiB rather than 1 GiB
|
|
|
|
*
|
|
|
|
* (d) is the with-DDW (and memory pre-registration and related
|
|
|
|
* features) DMA window accounting - assuming that we only account
|
|
|
|
* RAM once, even if mapped to multiple PHBs
|
|
|
|
*
|
|
|
|
* (e) is the with-DDW userspace view and overhead for the 64-bit
|
|
|
|
* DMA window. This is based a bit on expected guest behaviour, but
|
|
|
|
* there really isn't a way to completely avoid that. We assume the
|
|
|
|
* guest requests a 64-bit DMA window (per PHB) just big enough to
|
|
|
|
* map all its RAM. 4 kiB page size gives the 1/512; it will be
|
|
|
|
* less with 64 kiB pages, less still if the guest is mapped with
|
|
|
|
* hugepages (unlike the default 32-bit DMA window, DDW windows
|
|
|
|
* can use large IOMMU pages). 8 MiB is for second and further level
|
|
|
|
* overheads, like (b) */
|
2019-03-05 12:46:07 +00:00
|
|
|
passthroughLimit = MAX(2 * 1024 * 1024 * nPCIHostBridges,
|
|
|
|
memory +
|
|
|
|
memory / 512 * nPCIHostBridges + 8192);
|
PPC64 support for NVIDIA V100 GPU with NVLink2 passthrough
The NVIDIA V100 GPU has an onboard RAM that is mapped into the
host memory and accessible as normal RAM via an NVLink2 bridge. When
passed through in a guest, QEMU puts the NVIDIA RAM window in a
non-contiguous area, above the PCI MMIO area that starts at 32TiB.
This means that the NVIDIA RAM window starts at 64TiB and go all the
way to 128TiB.
This means that the guest might request a 64-bit window, for each PCI
Host Bridge, that goes all the way to 128TiB. However, the NVIDIA RAM
window isn't counted as regular RAM, thus this window is considered
only for the allocation of the Translation and Control Entry (TCE).
For more information about how NVLink2 support works in QEMU,
refer to the accepted implementation [1].
This memory layout differs from the existing VFIO case, requiring its
own formula. This patch changes the PPC64 code of
@qemuDomainGetMemLockLimitBytes to:
- detect if we have a NVLink2 bridge being passed through to the
guest. This is done by using the @ppc64VFIODeviceIsNV2Bridge function
added in the previous patch. The existence of the NVLink2 bridge in
the guest means that we are dealing with the NVLink2 memory layout;
- if an IBM NVLink2 bridge exists, passthroughLimit is calculated in a
different way to account for the extra memory the TCE table can alloc.
The 64TiB..128TiB window is more than enough to fit all possible
GPUs, thus the memLimit is the same regardless of passing through 1 or
multiple V100 GPUs.
Further reading explaining the background
[1] https://lists.gnu.org/archive/html/qemu-devel/2019-03/msg03700.html
[2] https://www.redhat.com/archives/libvir-list/2019-March/msg00660.html
[3] https://www.redhat.com/archives/libvir-list/2019-April/msg00527.html
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Erik Skultety <eskultet@redhat.com>
2019-04-04 13:40:39 +00:00
|
|
|
}
|
2019-03-05 12:46:07 +00:00
|
|
|
|
|
|
|
memKB = baseLimit + passthroughLimit;
|
|
|
|
|
|
|
|
return memKB << 10;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-06 14:51:33 +00:00
|
|
|
/**
|
2015-11-24 12:51:11 +00:00
|
|
|
* qemuDomainGetMemLockLimitBytes:
|
2015-11-06 14:51:33 +00:00
|
|
|
* @def: domain definition
|
2019-09-26 11:15:47 +00:00
|
|
|
* @forceVFIO: force VFIO calculation
|
2015-11-06 14:51:33 +00:00
|
|
|
*
|
2017-03-21 18:52:50 +00:00
|
|
|
* Calculate the memory locking limit that needs to be set in order for
|
|
|
|
* the guest to operate properly. The limit depends on a number of factors,
|
|
|
|
* including certain configuration options and less immediately apparent ones
|
|
|
|
* such as the guest architecture or the use of certain devices.
|
2019-09-26 11:15:47 +00:00
|
|
|
* The @forceVFIO argument can be used to tell this function will use VFIO even
|
|
|
|
* though @def doesn't indicates so right now.
|
2017-03-21 18:52:50 +00:00
|
|
|
*
|
|
|
|
* Returns: the memory locking limit, or 0 if setting the limit is not needed
|
2015-11-06 14:51:33 +00:00
|
|
|
*/
|
|
|
|
unsigned long long
|
2019-09-26 11:15:47 +00:00
|
|
|
qemuDomainGetMemLockLimitBytes(virDomainDefPtr def,
|
|
|
|
bool forceVFIO)
|
2015-11-06 14:51:33 +00:00
|
|
|
{
|
2017-03-21 18:52:50 +00:00
|
|
|
unsigned long long memKB = 0;
|
2019-06-25 13:36:44 +00:00
|
|
|
bool usesVFIO = false;
|
2017-03-21 18:52:50 +00:00
|
|
|
size_t i;
|
2015-11-06 14:51:33 +00:00
|
|
|
|
2015-11-11 05:49:06 +00:00
|
|
|
/* prefer the hard limit */
|
|
|
|
if (virMemoryLimitIsSet(def->mem.hard_limit)) {
|
|
|
|
memKB = def->mem.hard_limit;
|
2020-01-06 21:57:40 +00:00
|
|
|
return memKB << 10;
|
2015-11-11 05:49:06 +00:00
|
|
|
}
|
|
|
|
|
2017-03-22 12:44:13 +00:00
|
|
|
/* If the guest wants its memory to be locked, we need to raise the memory
|
|
|
|
* locking limit so that the OS will not refuse allocation requests;
|
|
|
|
* however, there is no reliable way for us to figure out how much memory
|
|
|
|
* the QEMU process will allocate for its own use, so our only way out is
|
|
|
|
* to remove the limit altogether. Use with extreme care */
|
|
|
|
if (def->mem.locked)
|
|
|
|
return VIR_DOMAIN_MEMORY_PARAM_UNLIMITED;
|
2017-03-21 18:52:50 +00:00
|
|
|
|
2019-03-05 12:46:07 +00:00
|
|
|
if (ARCH_IS_PPC64(def->os.arch) && def->virtType == VIR_DOMAIN_VIRT_KVM)
|
2019-09-26 11:15:47 +00:00
|
|
|
return getPPC64MemLockLimitBytes(def, forceVFIO);
|
2015-11-13 09:37:12 +00:00
|
|
|
|
2015-11-11 05:44:56 +00:00
|
|
|
/* For device passthrough using VFIO the guest memory and MMIO memory
|
|
|
|
* regions need to be locked persistent in order to allow DMA.
|
|
|
|
*
|
|
|
|
* Currently the below limit is based on assumptions about the x86 platform.
|
|
|
|
*
|
|
|
|
* The chosen value of 1GiB below originates from x86 systems where it was
|
|
|
|
* used as space reserved for the MMIO region for the whole system.
|
|
|
|
*
|
|
|
|
* On x86_64 systems the MMIO regions of the IOMMU mapped devices don't
|
|
|
|
* count towards the locked memory limit since the memory is owned by the
|
|
|
|
* device. Emulated devices though do count, but the regions are usually
|
|
|
|
* small. Although it's not guaranteed that the limit will be enough for all
|
|
|
|
* configurations it didn't pose a problem for now.
|
|
|
|
*
|
2020-08-25 22:49:31 +00:00
|
|
|
* https://www.redhat.com/archives/libvir-list/2015-November/msg00329.html
|
2015-11-11 05:44:56 +00:00
|
|
|
*
|
|
|
|
* Note that this may not be valid for all platforms.
|
|
|
|
*/
|
2019-09-26 11:15:47 +00:00
|
|
|
if (!forceVFIO) {
|
|
|
|
for (i = 0; i < def->nhostdevs; i++) {
|
|
|
|
if (virHostdevIsVFIODevice(def->hostdevs[i]) ||
|
|
|
|
virHostdevIsMdevDevice(def->hostdevs[i])) {
|
|
|
|
usesVFIO = true;
|
|
|
|
break;
|
|
|
|
}
|
2017-03-21 18:52:50 +00:00
|
|
|
}
|
2015-11-06 15:39:31 +00:00
|
|
|
|
2019-09-26 11:15:47 +00:00
|
|
|
if (virDomainDefHasNVMeDisk(def))
|
|
|
|
usesVFIO = true;
|
|
|
|
}
|
2019-06-25 13:36:44 +00:00
|
|
|
|
2019-09-26 11:15:47 +00:00
|
|
|
if (usesVFIO || forceVFIO)
|
2019-06-25 13:36:44 +00:00
|
|
|
memKB = virDomainDefGetMemoryTotal(def) + 1024 * 1024;
|
|
|
|
|
2017-03-21 18:52:50 +00:00
|
|
|
return memKB << 10;
|
2015-11-06 15:39:31 +00:00
|
|
|
}
|
2015-11-11 13:20:04 +00:00
|
|
|
|
2017-03-21 18:52:50 +00:00
|
|
|
|
2015-12-10 17:39:14 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainAdjustMaxMemLock:
|
|
|
|
* @vm: domain
|
2019-09-26 11:15:47 +00:00
|
|
|
* @forceVFIO: apply VFIO requirements even if vm's def doesn't require it
|
2015-12-10 17:39:14 +00:00
|
|
|
*
|
|
|
|
* Adjust the memory locking limit for the QEMU process associated to @vm, in
|
2019-09-26 11:15:47 +00:00
|
|
|
* order to comply with VFIO or architecture requirements. If @forceVFIO is
|
|
|
|
* true then the limit is changed even if nothing in @vm's definition indicates
|
|
|
|
* so.
|
2015-12-10 17:39:14 +00:00
|
|
|
*
|
2015-12-10 18:13:58 +00:00
|
|
|
* The limit will not be changed unless doing so is needed; the first time
|
|
|
|
* the limit is changed, the original (default) limit is stored in @vm and
|
|
|
|
* that value will be restored if qemuDomainAdjustMaxMemLock() is called once
|
|
|
|
* memory locking is no longer required.
|
2015-12-10 17:39:14 +00:00
|
|
|
*
|
|
|
|
* Returns: 0 on success, <0 on failure
|
|
|
|
*/
|
|
|
|
int
|
2019-09-26 11:15:47 +00:00
|
|
|
qemuDomainAdjustMaxMemLock(virDomainObjPtr vm,
|
|
|
|
bool forceVFIO)
|
2015-12-10 17:39:14 +00:00
|
|
|
{
|
|
|
|
unsigned long long bytes = 0;
|
|
|
|
|
2019-09-26 11:15:47 +00:00
|
|
|
bytes = qemuDomainGetMemLockLimitBytes(vm->def, forceVFIO);
|
2017-03-21 18:52:50 +00:00
|
|
|
|
|
|
|
if (bytes) {
|
2015-12-10 18:13:58 +00:00
|
|
|
/* If this is the first time adjusting the limit, save the current
|
|
|
|
* value so that we can restore it once memory locking is no longer
|
|
|
|
* required. Failing to obtain the current limit is not a critical
|
|
|
|
* failure, it just means we'll be unable to lower it later */
|
|
|
|
if (!vm->original_memlock) {
|
|
|
|
if (virProcessGetMaxMemLock(vm->pid, &(vm->original_memlock)) < 0)
|
|
|
|
vm->original_memlock = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Once memory locking is no longer required, we can restore the
|
|
|
|
* original, usually very low, limit */
|
|
|
|
bytes = vm->original_memlock;
|
|
|
|
vm->original_memlock = 0;
|
|
|
|
}
|
2015-12-10 17:39:14 +00:00
|
|
|
|
|
|
|
/* Trying to set the memory locking limit to zero is a no-op */
|
|
|
|
if (virProcessSetMaxMemLock(vm->pid, bytes) < 0)
|
2020-01-06 21:57:40 +00:00
|
|
|
return -1;
|
2015-12-10 17:39:14 +00:00
|
|
|
|
2020-01-06 21:57:40 +00:00
|
|
|
return 0;
|
2015-12-10 17:39:14 +00:00
|
|
|
}
|
2015-11-11 13:20:04 +00:00
|
|
|
|
2019-09-03 20:09:46 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainAdjustMaxMemLockHostdev:
|
|
|
|
* @vm: domain
|
|
|
|
* @hostdev: device
|
|
|
|
*
|
|
|
|
* Temporarily add the hostdev to the domain definition. This is needed
|
|
|
|
* because qemuDomainAdjustMaxMemLock() requires the hostdev to be already
|
|
|
|
* part of the domain definition, but other functions like
|
|
|
|
* qemuAssignDeviceHostdevAlias() expect it *not* to be there.
|
|
|
|
* A better way to handle this would be nice
|
|
|
|
*
|
|
|
|
* Returns: 0 on success, <0 on failure
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainAdjustMaxMemLockHostdev(virDomainObjPtr vm,
|
|
|
|
virDomainHostdevDefPtr hostdev)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
vm->def->hostdevs[vm->def->nhostdevs++] = hostdev;
|
2019-09-26 11:15:47 +00:00
|
|
|
if (qemuDomainAdjustMaxMemLock(vm, false) < 0)
|
2019-09-03 20:09:46 +00:00
|
|
|
ret = -1;
|
|
|
|
|
|
|
|
vm->def->hostdevs[--(vm->def->nhostdevs)] = NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-11 13:20:04 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainHasVcpuPids:
|
|
|
|
* @vm: Domain object
|
|
|
|
*
|
|
|
|
* Returns true if we were able to successfully detect vCPU pids for the VM.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainHasVcpuPids(virDomainObjPtr vm)
|
|
|
|
{
|
2016-07-01 12:56:14 +00:00
|
|
|
size_t i;
|
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
2015-11-11 13:20:04 +00:00
|
|
|
|
2016-07-01 12:56:14 +00:00
|
|
|
if (QEMU_DOMAIN_VCPU_PRIVATE(vcpu)->tid > 0)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2015-11-11 13:20:04 +00:00
|
|
|
}
|
2015-11-12 15:45:12 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainGetVcpuPid:
|
|
|
|
* @vm: domain object
|
|
|
|
* @vcpu: cpu id
|
|
|
|
*
|
|
|
|
* Returns the vCPU pid. If @vcpu is offline or out of range 0 is returned.
|
|
|
|
*/
|
|
|
|
pid_t
|
|
|
|
qemuDomainGetVcpuPid(virDomainObjPtr vm,
|
2016-07-01 12:56:14 +00:00
|
|
|
unsigned int vcpuid)
|
2015-11-12 15:45:12 +00:00
|
|
|
{
|
2016-07-01 12:56:14 +00:00
|
|
|
virDomainVcpuDefPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
|
|
|
|
return QEMU_DOMAIN_VCPU_PRIVATE(vcpu)->tid;
|
2015-11-12 15:45:12 +00:00
|
|
|
}
|
2015-12-15 13:45:33 +00:00
|
|
|
|
|
|
|
|
2016-07-08 13:39:32 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainValidateVcpuInfo:
|
|
|
|
*
|
|
|
|
* Validates vcpu thread information. If vcpu thread IDs are reported by qemu,
|
|
|
|
* this function validates that online vcpus have thread info present and
|
|
|
|
* offline vcpus don't.
|
|
|
|
*
|
|
|
|
* Returns 0 on success -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainValidateVcpuInfo(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
qemuDomainVcpuPrivatePtr vcpupriv;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (!qemuDomainHasVcpuPids(vm))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
|
|
|
|
|
|
|
if (vcpu->online && vcpupriv->tid == 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("qemu didn't report thread id for vcpu '%zu'"), i);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vcpu->online && vcpupriv->tid != 0) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("qemu reported thread id for inactive vcpu '%zu'"),
|
|
|
|
i);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-07-31 12:05:04 +00:00
|
|
|
bool
|
|
|
|
qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
return virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-12-15 13:45:33 +00:00
|
|
|
/**
|
2016-07-19 14:00:29 +00:00
|
|
|
* qemuDomainRefreshVcpuInfo:
|
2015-12-15 13:45:33 +00:00
|
|
|
* @driver: qemu driver data
|
|
|
|
* @vm: domain object
|
|
|
|
* @asyncJob: current asynchronous job type
|
2016-08-05 12:48:27 +00:00
|
|
|
* @state: refresh vcpu state
|
2015-12-15 13:45:33 +00:00
|
|
|
*
|
2016-08-01 05:35:50 +00:00
|
|
|
* Updates vCPU information private data of @vm. Due to historical reasons this
|
|
|
|
* function returns success even if some data were not reported by qemu.
|
2015-12-15 13:45:33 +00:00
|
|
|
*
|
2016-08-05 12:48:27 +00:00
|
|
|
* If @state is true, the vcpu state is refreshed as reported by the monitor.
|
|
|
|
*
|
2016-08-01 05:35:50 +00:00
|
|
|
* Returns 0 on success and -1 on fatal error.
|
2015-12-15 13:45:33 +00:00
|
|
|
*/
|
|
|
|
int
|
2016-07-19 14:00:29 +00:00
|
|
|
qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2016-08-05 12:48:27 +00:00
|
|
|
int asyncJob,
|
|
|
|
bool state)
|
2015-12-15 13:45:33 +00:00
|
|
|
{
|
2016-07-01 12:56:14 +00:00
|
|
|
virDomainVcpuDefPtr vcpu;
|
2016-08-01 05:43:32 +00:00
|
|
|
qemuDomainVcpuPrivatePtr vcpupriv;
|
|
|
|
qemuMonitorCPUInfoPtr info = NULL;
|
2016-07-01 12:56:14 +00:00
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
2018-10-17 13:14:32 +00:00
|
|
|
size_t i, j;
|
2016-07-31 12:05:04 +00:00
|
|
|
bool hotplug;
|
2018-04-04 14:45:03 +00:00
|
|
|
bool fast;
|
2018-10-17 13:14:32 +00:00
|
|
|
bool validTIDs = true;
|
2016-08-01 05:43:32 +00:00
|
|
|
int rc;
|
2016-07-01 12:56:14 +00:00
|
|
|
int ret = -1;
|
2015-12-15 13:45:33 +00:00
|
|
|
|
2016-07-31 12:05:04 +00:00
|
|
|
hotplug = qemuDomainSupportsNewVcpuHotplug(vm);
|
2018-04-04 14:45:03 +00:00
|
|
|
fast = virQEMUCapsGet(QEMU_DOMAIN_PRIVATE(vm)->qemuCaps,
|
|
|
|
QEMU_CAPS_QUERY_CPUS_FAST);
|
2015-12-15 13:45:33 +00:00
|
|
|
|
2018-10-17 13:14:32 +00:00
|
|
|
VIR_DEBUG("Maxvcpus %zu hotplug %d fast query %d", maxvcpus, hotplug, fast);
|
|
|
|
|
2015-12-15 13:45:33 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
2016-08-01 05:43:32 +00:00
|
|
|
|
2018-04-04 14:45:03 +00:00
|
|
|
rc = qemuMonitorGetCPUInfo(qemuDomainGetMonitor(vm), &info, maxvcpus,
|
|
|
|
hotplug, fast);
|
2016-08-01 05:43:32 +00:00
|
|
|
|
2016-08-01 05:35:50 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
2016-07-01 12:56:14 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-08-01 05:43:32 +00:00
|
|
|
if (rc < 0)
|
2016-07-01 12:56:14 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-10-17 13:14:32 +00:00
|
|
|
/*
|
|
|
|
* The query-cpus[-fast] commands return information
|
|
|
|
* about the vCPUs, including the OS level PID that
|
|
|
|
* is executing the vCPU.
|
|
|
|
*
|
|
|
|
* For KVM there is always a 1-1 mapping between
|
|
|
|
* vCPUs and host OS PIDs.
|
|
|
|
*
|
|
|
|
* For TCG things are a little more complicated.
|
|
|
|
*
|
|
|
|
* - In some cases the vCPUs will all have the same
|
|
|
|
* PID as the main emulator thread.
|
|
|
|
* - In some cases the first vCPU will have a distinct
|
|
|
|
* PID, but other vCPUs will share the emulator thread
|
|
|
|
*
|
|
|
|
* For MTTCG, things work the same as KVM, with each
|
|
|
|
* vCPU getting its own PID.
|
|
|
|
*
|
|
|
|
* We use the Host OS PIDs for doing vCPU pinning
|
|
|
|
* and reporting. The TCG data reporting will result
|
|
|
|
* in bad behaviour such as pinning the wrong PID.
|
|
|
|
* We must thus detect and discard bogus PID info
|
|
|
|
* from TCG, while still honouring the modern MTTCG
|
|
|
|
* impl which we can support.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < maxvcpus && validTIDs; i++) {
|
|
|
|
if (info[i].tid == vm->pid) {
|
|
|
|
VIR_DEBUG("vCPU[%zu] PID %llu duplicates process",
|
|
|
|
i, (unsigned long long)info[i].tid);
|
|
|
|
validTIDs = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < i; j++) {
|
2019-02-26 02:01:28 +00:00
|
|
|
if (info[i].tid != 0 && info[i].tid == info[j].tid) {
|
2018-10-17 13:14:32 +00:00
|
|
|
VIR_DEBUG("vCPU[%zu] PID %llu duplicates vCPU[%zu]",
|
|
|
|
i, (unsigned long long)info[i].tid, j);
|
|
|
|
validTIDs = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (validTIDs)
|
2020-03-13 16:43:26 +00:00
|
|
|
VIR_DEBUG("vCPU[%zu] PID %llu is valid "
|
|
|
|
"(node=%d socket=%d die=%d core=%d thread=%d)",
|
|
|
|
i, (unsigned long long)info[i].tid,
|
|
|
|
info[i].node_id,
|
|
|
|
info[i].socket_id,
|
|
|
|
info[i].die_id,
|
|
|
|
info[i].core_id,
|
|
|
|
info[i].thread_id);
|
2018-10-17 13:14:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Extracting vCPU information validTIDs=%d", validTIDs);
|
2016-07-01 12:56:14 +00:00
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
2016-08-01 05:43:32 +00:00
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
2016-07-01 12:56:14 +00:00
|
|
|
|
2018-10-17 13:14:32 +00:00
|
|
|
if (validTIDs)
|
2016-07-31 12:05:04 +00:00
|
|
|
vcpupriv->tid = info[i].tid;
|
|
|
|
|
|
|
|
vcpupriv->socket_id = info[i].socket_id;
|
|
|
|
vcpupriv->core_id = info[i].core_id;
|
|
|
|
vcpupriv->thread_id = info[i].thread_id;
|
2017-06-27 14:04:38 +00:00
|
|
|
vcpupriv->node_id = info[i].node_id;
|
2016-07-31 12:05:04 +00:00
|
|
|
vcpupriv->vcpus = info[i].vcpus;
|
|
|
|
VIR_FREE(vcpupriv->type);
|
2019-10-16 11:43:18 +00:00
|
|
|
vcpupriv->type = g_steal_pointer(&info[i].type);
|
2016-07-31 12:05:04 +00:00
|
|
|
VIR_FREE(vcpupriv->alias);
|
2019-10-16 11:43:18 +00:00
|
|
|
vcpupriv->alias = g_steal_pointer(&info[i].alias);
|
2019-08-29 12:47:10 +00:00
|
|
|
virJSONValueFree(vcpupriv->props);
|
2019-10-16 11:43:18 +00:00
|
|
|
vcpupriv->props = g_steal_pointer(&info[i].props);
|
2016-07-31 12:05:04 +00:00
|
|
|
vcpupriv->enable_id = info[i].id;
|
2016-11-21 13:57:54 +00:00
|
|
|
vcpupriv->qemu_id = info[i].qemu_id;
|
2016-08-05 12:48:27 +00:00
|
|
|
|
2016-08-02 15:58:43 +00:00
|
|
|
if (hotplug && state) {
|
2016-09-13 15:56:08 +00:00
|
|
|
vcpu->online = info[i].online;
|
|
|
|
if (info[i].hotpluggable)
|
|
|
|
vcpu->hotpluggable = VIR_TRISTATE_BOOL_YES;
|
|
|
|
else
|
|
|
|
vcpu->hotpluggable = VIR_TRISTATE_BOOL_NO;
|
2016-08-02 15:58:43 +00:00
|
|
|
}
|
2015-12-15 13:45:33 +00:00
|
|
|
}
|
|
|
|
|
2016-08-01 05:35:50 +00:00
|
|
|
ret = 0;
|
2016-07-01 12:56:14 +00:00
|
|
|
|
|
|
|
cleanup:
|
2016-08-01 05:43:32 +00:00
|
|
|
qemuMonitorCPUInfoFree(info, maxvcpus);
|
2016-07-01 12:56:14 +00:00
|
|
|
return ret;
|
2015-12-15 13:45:33 +00:00
|
|
|
}
|
2016-02-15 16:44:21 +00:00
|
|
|
|
2016-10-13 11:42:45 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainGetVcpuHalted:
|
|
|
|
* @vm: domain object
|
|
|
|
* @vcpu: cpu id
|
|
|
|
*
|
|
|
|
* Returns the vCPU halted state.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainGetVcpuHalted(virDomainObjPtr vm,
|
|
|
|
unsigned int vcpuid)
|
|
|
|
{
|
|
|
|
virDomainVcpuDefPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
|
|
|
|
return QEMU_DOMAIN_VCPU_PRIVATE(vcpu)->halted;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainRefreshVcpuHalted:
|
|
|
|
* @driver: qemu driver data
|
|
|
|
* @vm: domain object
|
|
|
|
* @asyncJob: current asynchronous job type
|
|
|
|
*
|
|
|
|
* Updates vCPU halted state in the private data of @vm.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and -1 on error
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int asyncJob)
|
|
|
|
{
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
2016-11-21 14:50:19 +00:00
|
|
|
qemuDomainVcpuPrivatePtr vcpupriv;
|
2016-10-13 11:42:45 +00:00
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(vm->def);
|
2020-11-12 21:19:40 +00:00
|
|
|
g_autoptr(virBitmap) haltedmap = NULL;
|
2016-10-13 11:42:45 +00:00
|
|
|
size_t i;
|
2018-04-04 14:45:03 +00:00
|
|
|
bool fast;
|
2016-10-13 11:42:45 +00:00
|
|
|
|
|
|
|
/* Not supported currently for TCG, see qemuDomainRefreshVcpuInfo */
|
|
|
|
if (vm->def->virtType == VIR_DOMAIN_VIRT_QEMU)
|
|
|
|
return 0;
|
|
|
|
|
2020-07-09 04:42:21 +00:00
|
|
|
/* The halted state is interesting only on s390(x). On other platforms
|
2018-04-04 14:45:07 +00:00
|
|
|
* the data would be stale at the time when it would be used.
|
|
|
|
* Calling qemuMonitorGetCpuHalted() can adversely affect the running
|
|
|
|
* VM's performance unless QEMU supports query-cpus-fast.
|
|
|
|
*/
|
|
|
|
if (!ARCH_IS_S390(vm->def->os.arch) ||
|
|
|
|
!virQEMUCapsGet(QEMU_DOMAIN_PRIVATE(vm)->qemuCaps,
|
|
|
|
QEMU_CAPS_QUERY_CPUS_FAST))
|
2018-02-06 10:18:56 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-10-13 11:42:45 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-04-04 14:45:03 +00:00
|
|
|
fast = virQEMUCapsGet(QEMU_DOMAIN_PRIVATE(vm)->qemuCaps,
|
|
|
|
QEMU_CAPS_QUERY_CPUS_FAST);
|
|
|
|
haltedmap = qemuMonitorGetCpuHalted(qemuDomainGetMonitor(vm), maxvcpus,
|
|
|
|
fast);
|
2016-11-21 14:50:19 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || !haltedmap)
|
2020-11-12 21:19:40 +00:00
|
|
|
return -1;
|
2016-10-13 11:42:45 +00:00
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(vm->def, i);
|
2016-11-21 14:50:19 +00:00
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
2018-02-06 15:00:45 +00:00
|
|
|
vcpupriv->halted = virTristateBoolFromBool(virBitmapIsBitSet(haltedmap,
|
|
|
|
vcpupriv->qemu_id));
|
2016-10-13 11:42:45 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 21:19:40 +00:00
|
|
|
return 0;
|
2016-10-13 11:42:45 +00:00
|
|
|
}
|
2016-02-15 16:44:21 +00:00
|
|
|
|
|
|
|
bool
|
|
|
|
qemuDomainSupportsNicdev(virDomainDefPtr def,
|
|
|
|
virDomainNetDefPtr net)
|
|
|
|
{
|
|
|
|
/* non-virtio ARM nics require legacy -net nic */
|
2018-11-28 21:45:14 +00:00
|
|
|
if (((def->os.arch == VIR_ARCH_ARMV6L) ||
|
|
|
|
(def->os.arch == VIR_ARCH_ARMV7L) ||
|
2016-02-15 16:44:21 +00:00
|
|
|
(def->os.arch == VIR_ARCH_AARCH64)) &&
|
|
|
|
net->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_VIRTIO_MMIO &&
|
|
|
|
net->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-23 13:32:13 +00:00
|
|
|
bool
|
|
|
|
qemuDomainNetSupportsMTU(virDomainNetType type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
2017-01-23 13:33:20 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_NETWORK:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_BRIDGE:
|
2017-01-23 13:32:13 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_ETHERNET:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_VHOSTUSER:
|
2017-01-23 13:33:20 +00:00
|
|
|
return true;
|
|
|
|
case VIR_DOMAIN_NET_TYPE_USER:
|
2017-01-23 13:32:13 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_SERVER:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_CLIENT:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_MCAST:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_INTERNAL:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_DIRECT:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_HOSTDEV:
|
|
|
|
case VIR_DOMAIN_NET_TYPE_UDP:
|
2020-10-14 17:08:25 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_VDPA:
|
2017-01-23 13:32:13 +00:00
|
|
|
case VIR_DOMAIN_NET_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2016-02-16 15:11:34 +00:00
|
|
|
|
2016-02-26 15:29:58 +00:00
|
|
|
|
|
|
|
virDomainDiskDefPtr
|
|
|
|
qemuDomainDiskByName(virDomainDefPtr def,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
virDomainDiskDefPtr ret;
|
|
|
|
|
|
|
|
if (!(ret = virDomainDiskByName(def, name, true))) {
|
2019-12-06 12:59:17 +00:00
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("disk '%s' not found in domain"), name);
|
2016-02-26 15:29:58 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-05-02 13:26:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainDefValidateDiskLunSource:
|
|
|
|
* @src: disk source struct
|
|
|
|
*
|
|
|
|
* Validate whether the disk source is valid for disk device='lun'.
|
|
|
|
*
|
2020-07-09 04:42:21 +00:00
|
|
|
* Returns 0 if the configuration is valid -1 and a libvirt error if the source
|
2016-05-02 13:26:51 +00:00
|
|
|
* is invalid.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainDefValidateDiskLunSource(const virStorageSource *src)
|
|
|
|
{
|
|
|
|
if (virStorageSourceGetActualType(src) == VIR_STORAGE_TYPE_NETWORK) {
|
|
|
|
if (src->protocol != VIR_STORAGE_NET_PROTOCOL_ISCSI) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("disk device='lun' is not supported "
|
|
|
|
"for protocol='%s'"),
|
|
|
|
virStorageNetProtocolTypeToString(src->protocol));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (!virStorageSourceIsBlockLocal(src)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("disk device='lun' is only valid for block "
|
|
|
|
"type disk source"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-07 08:53:51 +00:00
|
|
|
if (src->format != VIR_STORAGE_FILE_RAW) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("disk device 'lun' must use 'raw' format"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-05-07 08:55:37 +00:00
|
|
|
if (src->sliceStorage) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("disk device 'lun' doesn't support storage slice"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->encryption &&
|
|
|
|
src->encryption->format != VIR_STORAGE_ENCRYPTION_FORMAT_DEFAULT) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("disk device 'lun' doesn't support encryption"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2016-05-02 13:26:51 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2016-03-30 14:34:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainPrepareChannel(virDomainChrDefPtr channel,
|
|
|
|
const char *domainChannelTargetDir)
|
|
|
|
{
|
2017-07-25 14:33:50 +00:00
|
|
|
if (channel->targetType != VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO ||
|
|
|
|
channel->source->type != VIR_DOMAIN_CHR_TYPE_UNIX ||
|
|
|
|
channel->source->data.nix.path)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (channel->target.name) {
|
2019-10-22 13:26:14 +00:00
|
|
|
channel->source->data.nix.path = g_strdup_printf("%s/%s",
|
|
|
|
domainChannelTargetDir,
|
|
|
|
channel->target.name);
|
2017-07-25 21:10:00 +00:00
|
|
|
} else {
|
|
|
|
/* Generate a unique name */
|
2019-10-22 13:26:14 +00:00
|
|
|
channel->source->data.nix.path = g_strdup_printf("%s/vioser-%02d-%02d-%02d.sock",
|
|
|
|
domainChannelTargetDir,
|
|
|
|
channel->info.addr.vioserial.controller,
|
|
|
|
channel->info.addr.vioserial.bus,
|
|
|
|
channel->info.addr.vioserial.port);
|
2016-03-30 14:34:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2016-08-04 11:57:46 +00:00
|
|
|
|
|
|
|
|
2017-08-30 19:40:58 +00:00
|
|
|
/* qemuDomainPrepareChardevSourceTLS:
|
2016-10-24 12:05:54 +00:00
|
|
|
* @source: pointer to host interface data for char devices
|
|
|
|
* @cfg: driver configuration
|
|
|
|
*
|
|
|
|
* Updates host interface TLS encryption setting based on qemu.conf
|
|
|
|
* for char devices. This will be presented as "tls='yes|no'" in
|
|
|
|
* live XML of a guest.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainPrepareChardevSourceTLS(virDomainChrSourceDefPtr source,
|
|
|
|
virQEMUDriverConfigPtr cfg)
|
|
|
|
{
|
|
|
|
if (source->type == VIR_DOMAIN_CHR_TYPE_TCP) {
|
|
|
|
if (source->data.tcp.haveTLS == VIR_TRISTATE_BOOL_ABSENT) {
|
|
|
|
if (cfg->chardevTLS)
|
|
|
|
source->data.tcp.haveTLS = VIR_TRISTATE_BOOL_YES;
|
|
|
|
else
|
|
|
|
source->data.tcp.haveTLS = VIR_TRISTATE_BOOL_NO;
|
2016-10-21 14:42:26 +00:00
|
|
|
source->data.tcp.tlsFromConfig = true;
|
2016-10-24 12:05:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-08-30 19:40:58 +00:00
|
|
|
/* qemuDomainPrepareChardevSource:
|
2016-10-24 12:05:54 +00:00
|
|
|
* @def: live domain definition
|
2017-08-30 19:40:58 +00:00
|
|
|
* @cfg: driver configuration
|
2016-10-24 12:05:54 +00:00
|
|
|
*
|
|
|
|
* Iterate through all devices that use virDomainChrSourceDefPtr as host
|
|
|
|
* interface part.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainPrepareChardevSource(virDomainDefPtr def,
|
2017-08-30 19:40:58 +00:00
|
|
|
virQEMUDriverConfigPtr cfg)
|
2016-10-24 12:05:54 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nserials; i++)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->serials[i]->source, cfg);
|
|
|
|
|
|
|
|
for (i = 0; i < def->nparallels; i++)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->parallels[i]->source, cfg);
|
|
|
|
|
|
|
|
for (i = 0; i < def->nchannels; i++)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->channels[i]->source, cfg);
|
|
|
|
|
|
|
|
for (i = 0; i < def->nconsoles; i++)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->consoles[i]->source, cfg);
|
|
|
|
|
|
|
|
for (i = 0; i < def->nrngs; i++)
|
|
|
|
if (def->rngs[i]->backend == VIR_DOMAIN_RNG_BACKEND_EGD)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->rngs[i]->source.chardev, cfg);
|
|
|
|
|
|
|
|
for (i = 0; i < def->nsmartcards; i++)
|
|
|
|
if (def->smartcards[i]->type == VIR_DOMAIN_SMARTCARD_TYPE_PASSTHROUGH)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->smartcards[i]->data.passthru,
|
|
|
|
cfg);
|
|
|
|
|
|
|
|
for (i = 0; i < def->nredirdevs; i++)
|
|
|
|
qemuDomainPrepareChardevSourceTLS(def->redirdevs[i]->source, cfg);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-29 11:40:16 +00:00
|
|
|
static int
|
|
|
|
qemuProcessPrepareStorageSourceTLSVxhs(virStorageSourcePtr src,
|
2020-06-30 13:39:41 +00:00
|
|
|
virQEMUDriverConfigPtr cfg,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
const char *parentAlias)
|
2018-05-29 11:40:16 +00:00
|
|
|
{
|
|
|
|
/* VxHS uses only client certificates and thus has no need for
|
|
|
|
* the server-key.pem nor a secret that could be used to decrypt
|
|
|
|
* the it, so no need to add a secinfo for a secret UUID. */
|
|
|
|
if (src->haveTLS == VIR_TRISTATE_BOOL_ABSENT) {
|
|
|
|
if (cfg->vxhsTLS)
|
|
|
|
src->haveTLS = VIR_TRISTATE_BOOL_YES;
|
|
|
|
else
|
|
|
|
src->haveTLS = VIR_TRISTATE_BOOL_NO;
|
|
|
|
src->tlsFromConfig = true;
|
|
|
|
}
|
|
|
|
|
2020-06-30 13:39:41 +00:00
|
|
|
if (src->haveTLS == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
src->tlsAlias = qemuAliasTLSObjFromSrcAlias(parentAlias);
|
2019-10-20 11:49:46 +00:00
|
|
|
src->tlsCertdir = g_strdup(cfg->vxhsTLSx509certdir);
|
2018-05-29 11:40:16 +00:00
|
|
|
|
2020-06-30 13:39:41 +00:00
|
|
|
if (cfg->vxhsTLSx509secretUUID) {
|
|
|
|
qemuDomainStorageSourcePrivatePtr srcpriv = qemuDomainStorageSourcePrivateFetch(src);
|
|
|
|
|
|
|
|
if (!(srcpriv->tlsKeySecret = qemuDomainSecretInfoTLSNew(priv, src->tlsAlias,
|
|
|
|
cfg->vxhsTLSx509secretUUID)))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-29 11:40:16 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-29 11:57:17 +00:00
|
|
|
static int
|
|
|
|
qemuProcessPrepareStorageSourceTLSNBD(virStorageSourcePtr src,
|
|
|
|
virQEMUDriverConfigPtr cfg,
|
2020-06-30 13:39:41 +00:00
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
const char *parentAlias)
|
2018-05-29 11:57:17 +00:00
|
|
|
{
|
|
|
|
if (src->haveTLS == VIR_TRISTATE_BOOL_ABSENT) {
|
|
|
|
if (cfg->nbdTLS)
|
|
|
|
src->haveTLS = VIR_TRISTATE_BOOL_YES;
|
|
|
|
else
|
|
|
|
src->haveTLS = VIR_TRISTATE_BOOL_NO;
|
|
|
|
src->tlsFromConfig = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->haveTLS == VIR_TRISTATE_BOOL_YES) {
|
2020-06-30 13:39:41 +00:00
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_TLS)) {
|
2018-05-29 11:57:17 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("this qemu does not support TLS transport for NBD"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-06-30 13:39:41 +00:00
|
|
|
src->tlsAlias = qemuAliasTLSObjFromSrcAlias(parentAlias);
|
2019-10-20 11:49:46 +00:00
|
|
|
src->tlsCertdir = g_strdup(cfg->nbdTLSx509certdir);
|
2020-06-30 13:39:41 +00:00
|
|
|
|
|
|
|
if (cfg->nbdTLSx509secretUUID) {
|
|
|
|
qemuDomainStorageSourcePrivatePtr srcpriv = qemuDomainStorageSourcePrivateFetch(src);
|
|
|
|
|
|
|
|
if (!(srcpriv->tlsKeySecret = qemuDomainSecretInfoTLSNew(priv, src->tlsAlias,
|
|
|
|
cfg->nbdTLSx509secretUUID)))
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-29 11:57:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-29 11:45:18 +00:00
|
|
|
/* qemuProcessPrepareStorageSourceTLS:
|
2018-05-30 10:48:34 +00:00
|
|
|
* @source: source for a disk
|
2017-08-30 19:29:59 +00:00
|
|
|
* @cfg: driver configuration
|
2018-05-29 15:42:23 +00:00
|
|
|
* @parentAlias: alias of the parent device
|
2017-08-30 19:29:59 +00:00
|
|
|
*
|
|
|
|
* Updates host interface TLS encryption setting based on qemu.conf
|
|
|
|
* for disk devices. This will be presented as "tls='yes|no'" in
|
|
|
|
* live XML of a guest.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on bad config/failure
|
|
|
|
*/
|
2017-11-23 16:01:37 +00:00
|
|
|
static int
|
2018-05-30 10:48:34 +00:00
|
|
|
qemuDomainPrepareStorageSourceTLS(virStorageSourcePtr src,
|
2018-05-29 15:42:23 +00:00
|
|
|
virQEMUDriverConfigPtr cfg,
|
2018-05-29 11:57:17 +00:00
|
|
|
const char *parentAlias,
|
2020-06-30 13:39:41 +00:00
|
|
|
qemuDomainObjPrivatePtr priv)
|
2017-08-30 19:29:59 +00:00
|
|
|
{
|
2018-05-29 11:45:18 +00:00
|
|
|
if (virStorageSourceGetActualType(src) != VIR_STORAGE_TYPE_NETWORK)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch ((virStorageNetProtocol) src->protocol) {
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_VXHS:
|
2020-06-30 13:39:41 +00:00
|
|
|
if (qemuProcessPrepareStorageSourceTLSVxhs(src, cfg, priv, parentAlias) < 0)
|
2018-05-29 11:45:18 +00:00
|
|
|
return -1;
|
|
|
|
break;
|
2017-08-30 19:29:59 +00:00
|
|
|
|
2018-05-29 11:45:18 +00:00
|
|
|
case VIR_STORAGE_NET_PROTOCOL_NBD:
|
2020-06-30 13:39:41 +00:00
|
|
|
if (qemuProcessPrepareStorageSourceTLSNBD(src, cfg, priv, parentAlias) < 0)
|
2018-05-29 11:57:17 +00:00
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
2018-05-29 11:45:18 +00:00
|
|
|
case VIR_STORAGE_NET_PROTOCOL_RBD:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_HTTP:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_HTTPS:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_FTP:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_FTPS:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_TFTP:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_SSH:
|
2018-05-30 10:50:44 +00:00
|
|
|
if (src->haveTLS == VIR_TRISTATE_BOOL_YES) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("TLS transport is not supported for disk protocol '%s'"),
|
|
|
|
virStorageNetProtocolTypeToString(src->protocol));
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-29 11:45:18 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_NONE:
|
|
|
|
case VIR_STORAGE_NET_PROTOCOL_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virStorageNetProtocol, src->protocol);
|
2018-05-30 10:48:34 +00:00
|
|
|
return -1;
|
2018-05-29 11:45:18 +00:00
|
|
|
}
|
2017-08-30 19:29:59 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
void
|
2016-09-13 11:40:50 +00:00
|
|
|
qemuDomainPrepareShmemChardev(virDomainShmemDefPtr shmem)
|
|
|
|
{
|
|
|
|
if (!shmem->server.enabled ||
|
|
|
|
shmem->server.chr.data.nix.path)
|
2019-10-22 13:26:14 +00:00
|
|
|
return;
|
2016-09-13 11:40:50 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
shmem->server.chr.data.nix.path = g_strdup_printf("/var/lib/libvirt/shmem-%s-sock",
|
|
|
|
shmem->name);
|
2016-09-13 11:40:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-08-04 11:57:46 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainVcpuHotplugIsInOrder:
|
|
|
|
* @def: domain definition
|
|
|
|
*
|
|
|
|
* Returns true if online vcpus were added in order (clustered behind vcpu0
|
|
|
|
* with increasing order).
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainVcpuHotplugIsInOrder(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
unsigned int prevorder = 0;
|
|
|
|
size_t seenonlinevcpus = 0;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
|
|
|
|
if (!vcpu->online)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (vcpu->order < prevorder)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (vcpu->order > prevorder)
|
|
|
|
prevorder = vcpu->order;
|
|
|
|
|
|
|
|
seenonlinevcpus++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return seenonlinevcpus == virDomainDefGetVcpus(def);
|
|
|
|
}
|
2016-08-04 12:23:25 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainVcpuPersistOrder:
|
|
|
|
* @def: domain definition
|
|
|
|
*
|
|
|
|
* Saves the order of vcpus detected from qemu to the domain definition.
|
|
|
|
* The private data note the order only for the entry describing the
|
|
|
|
* hotpluggable entity. This function copies the order into the definition part
|
|
|
|
* of all sub entities.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainVcpuPersistOrder(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
size_t maxvcpus = virDomainDefGetVcpusMax(def);
|
|
|
|
virDomainVcpuDefPtr vcpu;
|
|
|
|
qemuDomainVcpuPrivatePtr vcpupriv;
|
|
|
|
unsigned int prevorder = 0;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < maxvcpus; i++) {
|
|
|
|
vcpu = virDomainDefGetVcpu(def, i);
|
|
|
|
vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
|
|
|
|
|
|
|
|
if (!vcpu->online) {
|
|
|
|
vcpu->order = 0;
|
|
|
|
} else {
|
|
|
|
if (vcpupriv->enable_id != 0)
|
|
|
|
prevorder = vcpupriv->enable_id;
|
|
|
|
|
|
|
|
vcpu->order = prevorder;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-12 08:24:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainCheckMonitor(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = qemuMonitorCheck(priv->mon);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-09-30 12:41:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
qemuDomainSupportsVideoVga(virDomainVideoDefPtr video,
|
|
|
|
virQEMUCapsPtr qemuCaps)
|
|
|
|
{
|
2019-09-23 10:44:30 +00:00
|
|
|
if (video->type == VIR_DOMAIN_VIDEO_TYPE_VIRTIO) {
|
|
|
|
if (video->backend == VIR_DOMAIN_VIDEO_BACKEND_TYPE_VHOSTUSER) {
|
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VHOST_USER_VGA))
|
|
|
|
return false;
|
|
|
|
} else if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VIRTIO_VGA)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2016-09-30 12:41:37 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2016-11-15 10:30:18 +00:00
|
|
|
|
|
|
|
|
2019-09-16 15:14:48 +00:00
|
|
|
bool
|
|
|
|
qemuDomainNeedsVFIO(const virDomainDef *def)
|
|
|
|
{
|
|
|
|
return virDomainDefHasVFIOHostdev(def) ||
|
2019-09-18 09:36:18 +00:00
|
|
|
virDomainDefHasMdevHostdev(def) ||
|
|
|
|
virDomainDefHasNVMeDisk(def);
|
2019-09-16 15:14:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-09 10:01:29 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainGetHostdevPath:
|
|
|
|
* @dev: host device definition
|
|
|
|
* @path: resulting path to @dev
|
|
|
|
* @perms: Optional pointer to VIR_CGROUP_DEVICE_* perms
|
|
|
|
*
|
2017-02-08 13:23:30 +00:00
|
|
|
* For given device @dev fetch its host path and store it at
|
2019-09-17 09:47:59 +00:00
|
|
|
* @path. Optionally, caller can get @perms on the path (e.g.
|
2020-01-09 14:40:14 +00:00
|
|
|
* rw/ro). When called on a missing device, the function will return success
|
|
|
|
* and store NULL at @path.
|
2017-02-08 13:23:30 +00:00
|
|
|
*
|
2019-09-17 09:47:59 +00:00
|
|
|
* The caller is responsible for freeing the @path when no longer
|
|
|
|
* needed.
|
2017-02-09 10:01:29 +00:00
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 otherwise.
|
|
|
|
*/
|
|
|
|
int
|
2019-09-17 09:47:59 +00:00
|
|
|
qemuDomainGetHostdevPath(virDomainHostdevDefPtr dev,
|
|
|
|
char **path,
|
|
|
|
int *perms)
|
2016-12-15 15:06:14 +00:00
|
|
|
{
|
|
|
|
virDomainHostdevSubsysUSBPtr usbsrc = &dev->source.subsys.u.usb;
|
|
|
|
virDomainHostdevSubsysPCIPtr pcisrc = &dev->source.subsys.u.pci;
|
|
|
|
virDomainHostdevSubsysSCSIPtr scsisrc = &dev->source.subsys.u.scsi;
|
|
|
|
virDomainHostdevSubsysSCSIVHostPtr hostsrc = &dev->source.subsys.u.scsi_host;
|
2017-02-03 13:13:25 +00:00
|
|
|
virDomainHostdevSubsysMediatedDevPtr mdevsrc = &dev->source.subsys.u.mdev;
|
2019-09-17 09:31:24 +00:00
|
|
|
g_autoptr(virUSBDevice) usb = NULL;
|
|
|
|
g_autoptr(virSCSIDevice) scsi = NULL;
|
|
|
|
g_autoptr(virSCSIVHostDevice) host = NULL;
|
|
|
|
g_autofree char *tmpPath = NULL;
|
2017-02-08 13:23:30 +00:00
|
|
|
int perm = 0;
|
2016-12-15 15:06:14 +00:00
|
|
|
|
|
|
|
switch ((virDomainHostdevMode) dev->mode) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_MODE_SUBSYS:
|
2018-04-25 12:42:34 +00:00
|
|
|
switch ((virDomainHostdevSubsysType)dev->source.subsys.type) {
|
2016-12-15 15:06:14 +00:00
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
|
|
|
|
if (pcisrc->backend == VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO) {
|
2019-09-19 12:43:13 +00:00
|
|
|
if (!(tmpPath = virPCIDeviceAddressGetIOMMUGroupDev(&pcisrc->addr)))
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2017-02-08 13:23:30 +00:00
|
|
|
|
|
|
|
perm = VIR_CGROUP_DEVICE_RW;
|
2016-12-15 15:06:14 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
|
|
|
|
if (dev->missing)
|
|
|
|
break;
|
|
|
|
usb = virUSBDeviceNew(usbsrc->bus,
|
|
|
|
usbsrc->device,
|
|
|
|
NULL);
|
|
|
|
if (!usb)
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2016-12-15 15:06:14 +00:00
|
|
|
|
2019-10-24 22:34:21 +00:00
|
|
|
tmpPath = g_strdup(virUSBDeviceGetPath(usb));
|
2017-02-08 13:23:30 +00:00
|
|
|
perm = VIR_CGROUP_DEVICE_RW;
|
2016-12-15 15:06:14 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI:
|
|
|
|
if (scsisrc->protocol == VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI) {
|
|
|
|
virDomainHostdevSubsysSCSIiSCSIPtr iscsisrc = &scsisrc->u.iscsi;
|
2017-09-22 19:18:22 +00:00
|
|
|
VIR_DEBUG("Not updating /dev for hostdev iSCSI path '%s'", iscsisrc->src->path);
|
2016-12-15 15:06:14 +00:00
|
|
|
} else {
|
|
|
|
virDomainHostdevSubsysSCSIHostPtr scsihostsrc = &scsisrc->u.host;
|
|
|
|
scsi = virSCSIDeviceNew(NULL,
|
|
|
|
scsihostsrc->adapter,
|
|
|
|
scsihostsrc->bus,
|
|
|
|
scsihostsrc->target,
|
|
|
|
scsihostsrc->unit,
|
|
|
|
dev->readonly,
|
|
|
|
dev->shareable);
|
|
|
|
|
|
|
|
if (!scsi)
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2016-12-15 15:06:14 +00:00
|
|
|
|
2019-10-24 22:34:21 +00:00
|
|
|
tmpPath = g_strdup(virSCSIDeviceGetPath(scsi));
|
2017-02-08 13:23:30 +00:00
|
|
|
perm = virSCSIDeviceGetReadonly(scsi) ?
|
|
|
|
VIR_CGROUP_DEVICE_READ : VIR_CGROUP_DEVICE_RW;
|
2016-12-15 15:06:14 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST: {
|
|
|
|
if (hostsrc->protocol ==
|
|
|
|
VIR_DOMAIN_HOSTDEV_SUBSYS_SCSI_HOST_PROTOCOL_TYPE_VHOST) {
|
|
|
|
if (!(host = virSCSIVHostDeviceNew(hostsrc->wwpn)))
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2016-12-15 15:06:14 +00:00
|
|
|
|
2019-10-24 22:34:21 +00:00
|
|
|
tmpPath = g_strdup(virSCSIVHostDeviceGetPath(host));
|
2017-02-08 13:23:30 +00:00
|
|
|
perm = VIR_CGROUP_DEVICE_RW;
|
2016-12-15 15:06:14 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-01-31 16:26:36 +00:00
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
|
2017-04-26 12:47:15 +00:00
|
|
|
if (!(tmpPath = virMediatedDeviceGetIOMMUGroupDev(mdevsrc->uuidstr)))
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2017-02-03 13:13:25 +00:00
|
|
|
|
|
|
|
perm = VIR_CGROUP_DEVICE_RW;
|
|
|
|
break;
|
2016-12-15 15:06:14 +00:00
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_MODE_CAPABILITIES:
|
|
|
|
case VIR_DOMAIN_HOSTDEV_MODE_LAST:
|
|
|
|
/* nada */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-09-17 09:47:59 +00:00
|
|
|
*path = g_steal_pointer(&tmpPath);
|
|
|
|
if (perms)
|
|
|
|
*perms = perm;
|
2019-09-17 09:57:04 +00:00
|
|
|
return 0;
|
2016-12-15 15:06:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-22 16:51:05 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDiskLookupByNodename:
|
|
|
|
* @def: domain definition to look for the disk
|
|
|
|
* @nodename: block backend node name to find
|
|
|
|
* @src: filled with the specific backing store element if provided
|
|
|
|
*
|
|
|
|
* Looks up the disk in the domain via @nodename and returns its definition.
|
|
|
|
* Optionally fills @src and @idx if provided with the specific backing chain
|
|
|
|
* element which corresponds to the node name.
|
|
|
|
*/
|
|
|
|
virDomainDiskDefPtr
|
|
|
|
qemuDomainDiskLookupByNodename(virDomainDefPtr def,
|
|
|
|
const char *nodename,
|
2020-07-15 10:29:34 +00:00
|
|
|
virStorageSourcePtr *src)
|
2017-02-22 16:51:05 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
virStorageSourcePtr tmp = NULL;
|
|
|
|
|
|
|
|
if (src)
|
|
|
|
*src = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
2020-07-15 10:33:34 +00:00
|
|
|
if ((tmp = virStorageSourceFindByNodeName(def->disks[i]->src, nodename))) {
|
2017-02-22 16:51:05 +00:00
|
|
|
if (src)
|
|
|
|
*src = tmp;
|
|
|
|
|
|
|
|
return def->disks[i];
|
|
|
|
}
|
2020-07-15 10:36:50 +00:00
|
|
|
|
|
|
|
if (def->disks[i]->mirror &&
|
|
|
|
(tmp = virStorageSourceFindByNodeName(def->disks[i]->mirror, nodename))) {
|
|
|
|
if (src)
|
|
|
|
*src = tmp;
|
|
|
|
|
|
|
|
return def->disks[i];
|
|
|
|
}
|
2017-02-22 16:51:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
2017-02-23 17:13:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainDiskBackingStoreGetName:
|
|
|
|
*
|
|
|
|
* Creates a name using the indexed syntax (vda[1])for the given backing store
|
|
|
|
* entry for a disk.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
qemuDomainDiskBackingStoreGetName(virDomainDiskDefPtr disk,
|
|
|
|
unsigned int idx)
|
|
|
|
{
|
|
|
|
if (idx)
|
2020-07-15 10:01:39 +00:00
|
|
|
return g_strdup_printf("%s[%d]", disk->dst, idx);
|
2017-02-23 17:13:02 +00:00
|
|
|
|
2020-07-15 10:01:39 +00:00
|
|
|
return g_strdup(disk->dst);
|
2017-02-23 17:13:02 +00:00
|
|
|
}
|
2017-02-23 18:14:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
virStorageSourcePtr
|
|
|
|
qemuDomainGetStorageSourceByDevstr(const char *devstr,
|
|
|
|
virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
virDomainDiskDefPtr disk = NULL;
|
|
|
|
virStorageSourcePtr src = NULL;
|
2020-01-09 18:33:44 +00:00
|
|
|
g_autofree char *target = NULL;
|
2017-02-23 18:14:47 +00:00
|
|
|
unsigned int idx;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (virStorageFileParseBackingStoreStr(devstr, &target, &idx) < 0) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("failed to parse block device '%s'"), devstr);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
|
|
|
if (STREQ(target, def->disks[i]->dst)) {
|
|
|
|
disk = def->disks[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!disk) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
2017-04-22 19:06:20 +00:00
|
|
|
_("failed to find disk '%s'"), target);
|
2020-01-09 18:33:46 +00:00
|
|
|
return NULL;
|
2017-02-23 18:14:47 +00:00
|
|
|
}
|
|
|
|
|
2017-03-29 14:56:05 +00:00
|
|
|
if (idx == 0)
|
2020-07-15 10:51:40 +00:00
|
|
|
return disk->src;
|
|
|
|
|
|
|
|
if ((src = virStorageFileChainLookup(disk->src, NULL, NULL, idx, NULL)))
|
|
|
|
return src;
|
2017-02-23 18:14:47 +00:00
|
|
|
|
2020-07-15 10:51:40 +00:00
|
|
|
if (disk->mirror &&
|
|
|
|
(src = virStorageFileChainLookup(disk->mirror, NULL, NULL, idx, NULL)))
|
|
|
|
return src;
|
|
|
|
|
|
|
|
return NULL;
|
2017-02-23 18:14:47 +00:00
|
|
|
}
|
2017-06-02 20:50:18 +00:00
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainSaveCookieDispose(void *obj)
|
|
|
|
{
|
|
|
|
qemuDomainSaveCookiePtr cookie = obj;
|
|
|
|
|
|
|
|
VIR_DEBUG("cookie=%p", cookie);
|
2017-06-02 20:52:03 +00:00
|
|
|
|
|
|
|
virCPUDefFree(cookie->cpu);
|
2017-06-02 20:50:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
qemuDomainSaveCookiePtr
|
2019-08-08 14:55:07 +00:00
|
|
|
qemuDomainSaveCookieNew(virDomainObjPtr vm)
|
2017-06-02 20:50:18 +00:00
|
|
|
{
|
2017-06-02 20:52:03 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-01-13 10:06:39 +00:00
|
|
|
g_autoptr(qemuDomainSaveCookie) cookie = NULL;
|
2017-06-02 20:50:18 +00:00
|
|
|
|
|
|
|
if (qemuDomainInitialize() < 0)
|
2020-01-13 10:06:39 +00:00
|
|
|
return NULL;
|
2017-06-02 20:50:18 +00:00
|
|
|
|
|
|
|
if (!(cookie = virObjectNew(qemuDomainSaveCookieClass)))
|
2020-01-13 10:06:39 +00:00
|
|
|
return NULL;
|
2017-06-02 20:50:18 +00:00
|
|
|
|
2017-06-02 20:52:03 +00:00
|
|
|
if (priv->origCPU && !(cookie->cpu = virCPUDefCopy(vm->def->cpu)))
|
2020-01-13 10:06:39 +00:00
|
|
|
return NULL;
|
2017-06-02 20:52:03 +00:00
|
|
|
|
2019-08-08 14:55:07 +00:00
|
|
|
cookie->slirpHelper = qemuDomainGetSlirpHelperOk(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("Save cookie %p, cpu=%p, slirpHelper=%d",
|
|
|
|
cookie, cookie->cpu, cookie->slirpHelper);
|
2017-06-02 20:50:18 +00:00
|
|
|
|
2020-01-13 10:06:39 +00:00
|
|
|
return g_steal_pointer(&cookie);
|
2017-06-02 20:50:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2019-10-14 12:45:33 +00:00
|
|
|
qemuDomainSaveCookieParse(xmlXPathContextPtr ctxt G_GNUC_UNUSED,
|
2017-06-02 20:50:18 +00:00
|
|
|
virObjectPtr *obj)
|
|
|
|
{
|
2020-01-13 10:06:39 +00:00
|
|
|
g_autoptr(qemuDomainSaveCookie) cookie = NULL;
|
2017-06-02 20:50:18 +00:00
|
|
|
|
|
|
|
if (qemuDomainInitialize() < 0)
|
2020-01-13 10:06:39 +00:00
|
|
|
return -1;
|
2017-06-02 20:50:18 +00:00
|
|
|
|
|
|
|
if (!(cookie = virObjectNew(qemuDomainSaveCookieClass)))
|
2020-01-13 10:06:39 +00:00
|
|
|
return -1;
|
2017-06-02 20:50:18 +00:00
|
|
|
|
2017-06-02 20:52:03 +00:00
|
|
|
if (virCPUDefParseXML(ctxt, "./cpu[1]", VIR_CPU_TYPE_GUEST,
|
2020-10-07 08:54:55 +00:00
|
|
|
&cookie->cpu, false) < 0)
|
2020-01-13 10:06:39 +00:00
|
|
|
return -1;
|
2017-06-02 20:52:03 +00:00
|
|
|
|
2019-08-08 14:55:07 +00:00
|
|
|
cookie->slirpHelper = virXPathBoolean("boolean(./slirpHelper)", ctxt) > 0;
|
|
|
|
|
2020-01-13 10:06:39 +00:00
|
|
|
*obj = (virObjectPtr) g_steal_pointer(&cookie);
|
2017-06-02 20:50:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2017-06-02 20:52:03 +00:00
|
|
|
qemuDomainSaveCookieFormat(virBufferPtr buf,
|
|
|
|
virObjectPtr obj)
|
2017-06-02 20:50:18 +00:00
|
|
|
{
|
2017-06-02 20:52:03 +00:00
|
|
|
qemuDomainSaveCookiePtr cookie = (qemuDomainSaveCookiePtr) obj;
|
|
|
|
|
|
|
|
if (cookie->cpu &&
|
2017-06-30 13:47:23 +00:00
|
|
|
virCPUDefFormatBufFull(buf, cookie->cpu, NULL) < 0)
|
2017-06-02 20:52:03 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-08-08 14:55:07 +00:00
|
|
|
if (cookie->slirpHelper)
|
|
|
|
virBufferAddLit(buf, "<slirpHelper/>\n");
|
|
|
|
|
2017-06-02 20:50:18 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
virSaveCookieCallbacks virQEMUDriverDomainSaveCookie = {
|
|
|
|
.parse = qemuDomainSaveCookieParse,
|
|
|
|
.format = qemuDomainSaveCookieFormat,
|
|
|
|
};
|
2017-05-31 10:34:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainUpdateCPU:
|
|
|
|
* @vm: domain which is being started
|
|
|
|
* @cpu: CPU updated when the domain was running previously (before migration,
|
|
|
|
* snapshot, or save)
|
|
|
|
* @origCPU: where to store the original CPU from vm->def in case @cpu was
|
|
|
|
* used instead
|
|
|
|
*
|
|
|
|
* Replace the CPU definition with the updated one when QEMU is new enough to
|
|
|
|
* allow us to check extra features it is about to enable or disable when
|
|
|
|
* starting a domain. The original CPU is stored in @origCPU.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainUpdateCPU(virDomainObjPtr vm,
|
|
|
|
virCPUDefPtr cpu,
|
|
|
|
virCPUDefPtr *origCPU)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
*origCPU = NULL;
|
|
|
|
|
|
|
|
if (!cpu || !vm->def->cpu ||
|
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION) ||
|
|
|
|
virCPUDefIsEqual(vm->def->cpu, cpu, false))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(cpu = virCPUDefCopy(cpu)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
VIR_DEBUG("Replacing CPU def with the updated one");
|
|
|
|
|
|
|
|
*origCPU = vm->def->cpu;
|
|
|
|
vm->def->cpu = cpu;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-07-21 13:51:03 +00:00
|
|
|
|
2017-10-06 12:49:07 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainFixupCPUS:
|
|
|
|
* @vm: domain object
|
|
|
|
* @origCPU: original CPU used when the domain was started
|
|
|
|
*
|
|
|
|
* Libvirt older than 3.9.0 could have messed up the expansion of host-model
|
|
|
|
* CPU when reconnecting to a running domain by adding features QEMU does not
|
|
|
|
* support (such as cmt). This API fixes both the actual CPU provided by QEMU
|
|
|
|
* (stored in the domain object) and the @origCPU used when starting the
|
|
|
|
* domain.
|
|
|
|
*
|
|
|
|
* This is safe even if the original CPU definition used mode='custom' (rather
|
|
|
|
* than host-model) since we know QEMU was able to start the domain and thus
|
|
|
|
* the CPU definitions do not contain any features unknown to QEMU.
|
|
|
|
*
|
|
|
|
* This function can only be used on an active domain or when restoring a
|
|
|
|
* domain which was running.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainFixupCPUs(virDomainObjPtr vm,
|
|
|
|
virCPUDefPtr *origCPU)
|
|
|
|
{
|
2020-11-12 20:50:58 +00:00
|
|
|
g_autoptr(virCPUDef) fixedCPU = NULL;
|
|
|
|
g_autoptr(virCPUDef) fixedOrig = NULL;
|
2017-10-06 12:49:07 +00:00
|
|
|
virArch arch = vm->def->os.arch;
|
|
|
|
|
|
|
|
if (!ARCH_IS_X86(arch))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!vm->def->cpu ||
|
|
|
|
vm->def->cpu->mode != VIR_CPU_MODE_CUSTOM ||
|
|
|
|
!vm->def->cpu->model)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Missing origCPU means QEMU created exactly the same virtual CPU which
|
|
|
|
* we asked for or libvirt was too old to mess up the translation from
|
|
|
|
* host-model.
|
|
|
|
*/
|
|
|
|
if (!*origCPU)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virCPUDefFindFeature(vm->def->cpu, "cmt") &&
|
|
|
|
(!(fixedCPU = virCPUDefCopyWithoutModel(vm->def->cpu)) ||
|
|
|
|
virCPUDefCopyModelFilter(fixedCPU, vm->def->cpu, false,
|
|
|
|
virQEMUCapsCPUFilterFeatures, &arch) < 0))
|
2020-11-12 20:50:58 +00:00
|
|
|
return -1;
|
2017-10-06 12:49:07 +00:00
|
|
|
|
|
|
|
if (virCPUDefFindFeature(*origCPU, "cmt") &&
|
|
|
|
(!(fixedOrig = virCPUDefCopyWithoutModel(*origCPU)) ||
|
|
|
|
virCPUDefCopyModelFilter(fixedOrig, *origCPU, false,
|
|
|
|
virQEMUCapsCPUFilterFeatures, &arch) < 0))
|
2020-11-12 20:50:58 +00:00
|
|
|
return -1;
|
2017-10-06 12:49:07 +00:00
|
|
|
|
|
|
|
if (fixedCPU) {
|
|
|
|
virCPUDefFree(vm->def->cpu);
|
2019-10-16 11:43:18 +00:00
|
|
|
vm->def->cpu = g_steal_pointer(&fixedCPU);
|
2017-10-06 12:49:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fixedOrig) {
|
|
|
|
virCPUDefFree(*origCPU);
|
2019-10-16 11:43:18 +00:00
|
|
|
*origCPU = g_steal_pointer(&fixedOrig);
|
2017-10-06 12:49:07 +00:00
|
|
|
}
|
|
|
|
|
2020-11-12 20:50:58 +00:00
|
|
|
return 0;
|
2017-10-06 12:49:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-21 13:51:03 +00:00
|
|
|
char *
|
|
|
|
qemuDomainGetMachineName(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virQEMUDriverPtr driver = priv->driver;
|
|
|
|
char *ret = NULL;
|
|
|
|
|
2017-08-31 09:01:44 +00:00
|
|
|
if (vm->pid > 0) {
|
2017-07-21 13:51:03 +00:00
|
|
|
ret = virSystemdGetMachineNameByPID(vm->pid);
|
|
|
|
if (!ret)
|
|
|
|
virResetLastError();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret)
|
2020-03-23 07:48:38 +00:00
|
|
|
ret = virDomainDriverGenerateMachineName("qemu",
|
|
|
|
driver->embeddedRoot,
|
2020-03-20 17:14:22 +00:00
|
|
|
vm->def->id, vm->def->name,
|
|
|
|
driver->privileged);
|
2017-07-21 13:51:03 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2017-10-11 13:06:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* Check whether the device address is using either 'ccw' or default s390
|
|
|
|
* address format and whether that's "legal" for the current qemu and/or
|
|
|
|
* guest os.machine type. This is the corollary to the code which doesn't
|
|
|
|
* find the address type set using an emulator that supports either 'ccw'
|
|
|
|
* or s390 and sets the address type based on the capabilities.
|
|
|
|
*
|
|
|
|
* If the address is using 'ccw' or s390 and it's not supported, generate
|
|
|
|
* an error and return false; otherwise, return true.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainCheckCCWS390AddressSupport(const virDomainDef *def,
|
2019-01-25 11:11:21 +00:00
|
|
|
const virDomainDeviceInfo *info,
|
2017-10-11 13:06:07 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
|
|
|
const char *devicename)
|
|
|
|
{
|
2019-01-25 11:11:21 +00:00
|
|
|
if (info->type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_CCW) {
|
2017-10-11 13:06:07 +00:00
|
|
|
if (!qemuDomainIsS390CCW(def)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("cannot use CCW address type for device "
|
|
|
|
"'%s' using machine type '%s'"),
|
|
|
|
devicename, def->os.machine);
|
|
|
|
return false;
|
2018-05-07 14:41:11 +00:00
|
|
|
} else if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_CCW)) {
|
2017-10-11 13:06:07 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("CCW address type is not supported by "
|
|
|
|
"this QEMU"));
|
|
|
|
return false;
|
|
|
|
}
|
2019-01-25 11:11:21 +00:00
|
|
|
} else if (info->type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_VIRTIO_S390) {
|
2017-10-11 13:06:07 +00:00
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_VIRTIO_S390)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("virtio S390 address type is not supported by "
|
|
|
|
"this QEMU"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2017-10-17 19:39:41 +00:00
|
|
|
|
|
|
|
|
2017-12-05 15:40:27 +00:00
|
|
|
/**
|
2018-05-29 15:05:05 +00:00
|
|
|
* qemuDomainPrepareDiskSourceData:
|
2017-12-05 15:40:27 +00:00
|
|
|
*
|
|
|
|
* @disk: Disk config object
|
|
|
|
* @src: source to start from
|
|
|
|
*
|
2018-05-29 15:05:05 +00:00
|
|
|
* Prepares various aspects of a storage source belonging to a disk backing
|
2019-10-08 13:16:41 +00:00
|
|
|
* chain based on the disk configuration. This function should be also called
|
|
|
|
* for detected backing chain members.
|
2017-12-05 15:40:27 +00:00
|
|
|
*/
|
2019-10-08 13:14:22 +00:00
|
|
|
void
|
2018-05-29 15:05:05 +00:00
|
|
|
qemuDomainPrepareDiskSourceData(virDomainDiskDefPtr disk,
|
2019-10-08 12:58:48 +00:00
|
|
|
virStorageSourcePtr src)
|
2017-12-05 15:40:27 +00:00
|
|
|
{
|
2019-10-08 13:17:32 +00:00
|
|
|
if (!disk)
|
|
|
|
return;
|
|
|
|
|
2018-03-23 16:00:29 +00:00
|
|
|
/* transfer properties valid only for the top level image */
|
2018-05-29 14:52:17 +00:00
|
|
|
if (src == disk->src)
|
|
|
|
src->detect_zeroes = disk->detect_zeroes;
|
2018-03-23 16:00:29 +00:00
|
|
|
|
2018-05-29 15:05:05 +00:00
|
|
|
/* transfer properties valid for the full chain */
|
|
|
|
src->iomode = disk->iomode;
|
|
|
|
src->cachemode = disk->cachemode;
|
|
|
|
src->discard = disk->discard;
|
2018-04-19 13:31:55 +00:00
|
|
|
|
2018-05-29 15:05:05 +00:00
|
|
|
if (disk->device == VIR_DOMAIN_DISK_DEVICE_FLOPPY)
|
|
|
|
src->floppyimg = true;
|
2017-12-05 15:40:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-03-27 09:11:26 +00:00
|
|
|
static void
|
|
|
|
qemuDomainPrepareDiskCachemode(virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
if (disk->cachemode == VIR_DOMAIN_DISK_CACHE_DEFAULT &&
|
|
|
|
disk->src->shared && !disk->src->readonly)
|
|
|
|
disk->cachemode = VIR_DOMAIN_DISK_CACHE_DISABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-14 05:28:05 +00:00
|
|
|
static int
|
|
|
|
qemuDomainPrepareStorageSourcePR(virStorageSourcePtr src,
|
2018-05-11 14:39:21 +00:00
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
const char *parentalias)
|
2018-05-14 05:28:05 +00:00
|
|
|
{
|
|
|
|
if (!src->pr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virStoragePRDefIsManaged(src->pr)) {
|
2018-07-03 11:24:48 +00:00
|
|
|
VIR_FREE(src->pr->path);
|
2018-05-14 05:28:05 +00:00
|
|
|
if (!(src->pr->path = qemuDomainGetManagedPRSocketPath(priv)))
|
|
|
|
return -1;
|
2019-10-20 11:49:46 +00:00
|
|
|
src->pr->mgralias = g_strdup(qemuDomainGetManagedPRAlias());
|
2018-05-11 14:39:21 +00:00
|
|
|
} else {
|
|
|
|
if (!(src->pr->mgralias = qemuDomainGetUnmanagedPRAlias(parentalias)))
|
|
|
|
return -1;
|
2018-05-14 05:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-29 14:38:50 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainPrepareDiskSourceLegacy:
|
|
|
|
* @disk: disk to prepare
|
|
|
|
* @priv: VM private data
|
|
|
|
* @cfg: qemu driver config
|
|
|
|
*
|
|
|
|
* Prepare any disk source relevant data for use with the -drive command line.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuDomainPrepareDiskSourceLegacy(virDomainDiskDefPtr disk,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
virQEMUDriverConfigPtr cfg)
|
2017-11-23 16:01:37 +00:00
|
|
|
{
|
2020-05-05 15:00:41 +00:00
|
|
|
if (qemuDomainValidateStorageSource(disk->src, priv->qemuCaps, true) < 0)
|
2018-05-29 15:28:11 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-10-08 12:58:48 +00:00
|
|
|
qemuDomainPrepareStorageSourceConfig(disk->src, cfg, priv->qemuCaps);
|
2019-10-08 13:14:22 +00:00
|
|
|
qemuDomainPrepareDiskSourceData(disk, disk->src);
|
2017-11-23 16:01:37 +00:00
|
|
|
|
2018-05-29 14:38:50 +00:00
|
|
|
if (qemuDomainSecretStorageSourcePrepare(priv, disk->src,
|
|
|
|
disk->info.alias,
|
|
|
|
disk->info.alias) < 0)
|
2017-11-23 16:01:37 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-05-29 14:38:50 +00:00
|
|
|
if (qemuDomainPrepareStorageSourcePR(disk->src, priv, disk->info.alias) < 0)
|
2017-12-05 15:40:27 +00:00
|
|
|
return -1;
|
2017-11-23 16:15:17 +00:00
|
|
|
|
2018-05-29 11:57:17 +00:00
|
|
|
if (qemuDomainPrepareStorageSourceTLS(disk->src, cfg, disk->info.alias,
|
2020-06-30 13:39:41 +00:00
|
|
|
priv) < 0)
|
2018-05-29 15:42:23 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-05-29 14:38:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-10-09 09:06:39 +00:00
|
|
|
int
|
2017-10-17 11:33:12 +00:00
|
|
|
qemuDomainPrepareStorageSourceBlockdev(virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr src,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
virQEMUDriverConfigPtr cfg)
|
|
|
|
{
|
|
|
|
src->id = qemuDomainStorageIdNew(priv);
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
src->nodestorage = g_strdup_printf("libvirt-%u-storage", src->id);
|
|
|
|
src->nodeformat = g_strdup_printf("libvirt-%u-format", src->id);
|
2017-10-17 11:33:12 +00:00
|
|
|
|
2020-03-19 15:43:49 +00:00
|
|
|
if (qemuBlockStorageSourceNeedsStorageSliceLayer(src))
|
2020-02-10 13:37:14 +00:00
|
|
|
src->sliceStorage->nodename = g_strdup_printf("libvirt-%u-slice-sto", src->id);
|
|
|
|
|
2020-05-05 15:00:41 +00:00
|
|
|
if (qemuDomainValidateStorageSource(src, priv->qemuCaps, false) < 0)
|
2017-10-17 11:33:12 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-10-08 12:58:48 +00:00
|
|
|
qemuDomainPrepareStorageSourceConfig(src, cfg, priv->qemuCaps);
|
2019-10-08 13:14:22 +00:00
|
|
|
qemuDomainPrepareDiskSourceData(disk, src);
|
2017-10-17 11:33:12 +00:00
|
|
|
|
|
|
|
if (qemuDomainSecretStorageSourcePrepare(priv, src,
|
|
|
|
src->nodestorage,
|
|
|
|
src->nodeformat) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-10-09 08:55:00 +00:00
|
|
|
if (qemuDomainPrepareStorageSourcePR(src, priv, src->nodestorage) < 0)
|
2017-10-17 11:33:12 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-10-09 08:55:00 +00:00
|
|
|
if (qemuDomainPrepareStorageSourceTLS(src, cfg, src->nodestorage,
|
2020-06-30 13:39:41 +00:00
|
|
|
priv) < 0)
|
2017-10-17 11:33:12 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainPrepareDiskSourceBlockdev(virDomainDiskDefPtr disk,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
virQEMUDriverConfigPtr cfg)
|
|
|
|
{
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
virStorageSourcePtr n;
|
|
|
|
|
|
|
|
if (disk->copy_on_read == VIR_TRISTATE_SWITCH_ON &&
|
2019-10-22 13:26:14 +00:00
|
|
|
!diskPriv->nodeCopyOnRead)
|
|
|
|
diskPriv->nodeCopyOnRead = g_strdup_printf("libvirt-CoR-%s", disk->dst);
|
2017-10-17 11:33:12 +00:00
|
|
|
|
|
|
|
for (n = disk->src; virStorageSourceIsBacking(n); n = n->backingStore) {
|
|
|
|
if (qemuDomainPrepareStorageSourceBlockdev(disk, n, priv, cfg) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-05-29 14:38:50 +00:00
|
|
|
int
|
|
|
|
qemuDomainPrepareDiskSource(virDomainDiskDefPtr disk,
|
|
|
|
qemuDomainObjPrivatePtr priv,
|
|
|
|
virQEMUDriverConfigPtr cfg)
|
|
|
|
{
|
|
|
|
qemuDomainPrepareDiskCachemode(disk);
|
|
|
|
|
2019-06-25 13:23:56 +00:00
|
|
|
/* set default format for storage pool based disks */
|
2019-01-31 14:37:53 +00:00
|
|
|
if (disk->src->type == VIR_STORAGE_TYPE_VOLUME &&
|
2019-06-25 13:23:56 +00:00
|
|
|
disk->src->format <= VIR_STORAGE_FILE_NONE) {
|
|
|
|
int actualType = virStorageSourceGetActualType(disk->src);
|
|
|
|
|
|
|
|
if (actualType == VIR_STORAGE_TYPE_DIR)
|
|
|
|
disk->src->format = VIR_STORAGE_FILE_FAT;
|
|
|
|
else
|
|
|
|
disk->src->format = VIR_STORAGE_FILE_RAW;
|
|
|
|
}
|
2019-01-31 14:37:53 +00:00
|
|
|
|
2020-05-06 11:48:35 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
|
|
|
!qemuDiskBusIsSD(disk->bus)) {
|
2017-10-17 11:33:12 +00:00
|
|
|
if (qemuDomainPrepareDiskSourceBlockdev(disk, priv, cfg) < 0)
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainPrepareDiskSourceLegacy(disk, priv, cfg) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2018-05-29 14:38:50 +00:00
|
|
|
|
2017-11-23 16:01:37 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2018-02-02 12:13:46 +00:00
|
|
|
|
|
|
|
|
2020-09-10 10:32:04 +00:00
|
|
|
int
|
|
|
|
qemuDomainPrepareHostdev(virDomainHostdevDefPtr hostdev,
|
|
|
|
qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
if (virHostdevIsSCSIDevice(hostdev)) {
|
|
|
|
virDomainHostdevSubsysSCSIPtr scsisrc = &hostdev->source.subsys.u.scsi;
|
|
|
|
virStorageSourcePtr src = NULL;
|
|
|
|
|
|
|
|
switch ((virDomainHostdevSCSIProtocolType) scsisrc->protocol) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_NONE:
|
2020-09-10 13:33:45 +00:00
|
|
|
virObjectUnref(scsisrc->u.host.src);
|
2020-09-22 09:04:17 +00:00
|
|
|
scsisrc->u.host.src = virStorageSourceNew();
|
2020-09-10 13:33:45 +00:00
|
|
|
src = scsisrc->u.host.src;
|
2020-10-15 13:27:30 +00:00
|
|
|
|
|
|
|
src->type = VIR_STORAGE_TYPE_BLOCK;
|
|
|
|
|
2020-09-10 10:32:04 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI:
|
|
|
|
src = scsisrc->u.iscsi.src;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virDomainHostdevSCSIProtocolType, scsisrc->protocol);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src) {
|
2020-09-10 13:39:49 +00:00
|
|
|
const char *backendalias = hostdev->info->alias;
|
|
|
|
|
2020-10-15 13:27:30 +00:00
|
|
|
src->readonly = hostdev->readonly;
|
|
|
|
|
2020-09-10 13:33:45 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_HOSTDEV_SCSI)) {
|
2020-09-10 13:43:54 +00:00
|
|
|
src->id = qemuDomainStorageIdNew(priv);
|
|
|
|
src->nodestorage = g_strdup_printf("libvirt-%d-backend", src->id);
|
2020-09-10 13:39:49 +00:00
|
|
|
backendalias = src->nodestorage;
|
2020-09-10 13:33:45 +00:00
|
|
|
}
|
|
|
|
|
2020-09-10 10:32:04 +00:00
|
|
|
if (src->auth) {
|
|
|
|
bool iscsiHasPS = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_ISCSI_PASSWORD_SECRET);
|
|
|
|
virSecretUsageType usageType = VIR_SECRET_USAGE_TYPE_ISCSI;
|
|
|
|
qemuDomainStorageSourcePrivatePtr srcPriv = qemuDomainStorageSourcePrivateFetch(src);
|
|
|
|
|
|
|
|
if (!qemuDomainSupportsEncryptedSecret(priv) || !iscsiHasPS) {
|
|
|
|
srcPriv->secinfo = qemuDomainSecretInfoNewPlain(usageType,
|
|
|
|
src->auth->username,
|
|
|
|
&src->auth->seclookupdef);
|
|
|
|
} else {
|
|
|
|
srcPriv->secinfo = qemuDomainSecretAESSetupFromSecret(priv,
|
2020-09-10 13:39:49 +00:00
|
|
|
backendalias,
|
2020-09-10 10:32:04 +00:00
|
|
|
NULL,
|
|
|
|
usageType,
|
|
|
|
src->auth->username,
|
|
|
|
&src->auth->seclookupdef);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!srcPriv->secinfo)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-04-04 06:43:06 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainDiskCachemodeFlags:
|
|
|
|
*
|
|
|
|
* Converts disk cachemode to the cache mode options for qemu. Returns -1 for
|
|
|
|
* invalid @cachemode values and fills the flags and returns 0 on success.
|
|
|
|
* Flags may be NULL.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainDiskCachemodeFlags(int cachemode,
|
|
|
|
bool *writeback,
|
|
|
|
bool *direct,
|
|
|
|
bool *noflush)
|
|
|
|
{
|
|
|
|
bool dummy;
|
|
|
|
|
|
|
|
if (!writeback)
|
|
|
|
writeback = &dummy;
|
|
|
|
|
|
|
|
if (!direct)
|
|
|
|
direct = &dummy;
|
|
|
|
|
|
|
|
if (!noflush)
|
|
|
|
noflush = &dummy;
|
|
|
|
|
|
|
|
/* Mapping of cache modes to the attributes according to qemu-options.hx
|
|
|
|
* │ cache.writeback cache.direct cache.no-flush
|
|
|
|
* ─────────────┼─────────────────────────────────────────────────
|
|
|
|
* writeback │ true false false
|
|
|
|
* none │ true true false
|
|
|
|
* writethrough │ false false false
|
|
|
|
* directsync │ false true false
|
|
|
|
* unsafe │ true false true
|
|
|
|
*/
|
|
|
|
switch ((virDomainDiskCache) cachemode) {
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_DISABLE: /* 'none' */
|
|
|
|
*writeback = true;
|
|
|
|
*direct = true;
|
|
|
|
*noflush = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_WRITETHRU:
|
|
|
|
*writeback = false;
|
|
|
|
*direct = false;
|
|
|
|
*noflush = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_WRITEBACK:
|
|
|
|
*writeback = true;
|
|
|
|
*direct = false;
|
|
|
|
*noflush = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_DIRECTSYNC:
|
|
|
|
*writeback = false;
|
|
|
|
*direct = true;
|
|
|
|
*noflush = false;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_UNSAFE:
|
|
|
|
*writeback = true;
|
|
|
|
*direct = false;
|
|
|
|
*noflush = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_DEFAULT:
|
|
|
|
case VIR_DOMAIN_DISK_CACHE_LAST:
|
|
|
|
default:
|
|
|
|
virReportEnumRangeError(virDomainDiskCache, cachemode);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-02 12:13:46 +00:00
|
|
|
void
|
|
|
|
qemuProcessEventFree(struct qemuProcessEvent *event)
|
|
|
|
{
|
|
|
|
if (!event)
|
|
|
|
return;
|
|
|
|
|
|
|
|
switch (event->eventType) {
|
|
|
|
case QEMU_PROCESS_EVENT_GUESTPANIC:
|
|
|
|
qemuMonitorEventPanicInfoFree(event->data);
|
|
|
|
break;
|
2018-12-24 10:15:12 +00:00
|
|
|
case QEMU_PROCESS_EVENT_RDMA_GID_STATUS_CHANGED:
|
|
|
|
qemuMonitorEventRdmaGidStatusFree(event->data);
|
|
|
|
break;
|
2018-02-02 12:13:46 +00:00
|
|
|
case QEMU_PROCESS_EVENT_WATCHDOG:
|
|
|
|
case QEMU_PROCESS_EVENT_DEVICE_DELETED:
|
|
|
|
case QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED:
|
|
|
|
case QEMU_PROCESS_EVENT_SERIAL_CHANGED:
|
|
|
|
case QEMU_PROCESS_EVENT_BLOCK_JOB:
|
|
|
|
case QEMU_PROCESS_EVENT_MONITOR_EOF:
|
qemu: support Panic Crashloaded event handling
Pvpanic device supports bit 1 as crashloaded event, it means that
guest actually panicked and run kexec to handle error by guest side.
Handle crashloaded as a lifecyle event in libvirt.
Test case:
Guest side:
before testing, we need make sure kdump is enabled,
1, build new pvpanic driver (with commit from upstream
e0b9a42735f2672ca2764cfbea6e55a81098d5ba
191941692a3d1b6a9614502b279be062926b70f5)
2, insmod new kmod
3, enable crash_kexec_post_notifiers,
# echo 1 > /sys/module/kernel/parameters/crash_kexec_post_notifiers
4, trigger kernel panic
# echo 1 > /proc/sys/kernel/sysrq
# echo c > /proc/sysrq-trigger
Host side:
1, build new qemu with pvpanic patches (with commit from upstream
600d7b47e8f5085919fd1d1157f25950ea8dbc11
7dc58deea79a343ac3adc5cadb97215086054c86)
2, build libvirt with this patch
3, handle lifecycle event and trigger guest side panic
# virsh event stretch --event lifecycle
event 'lifecycle' for domain stretch: Crashed Crashloaded
events received: 1
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Signed-off-by: zhenwei pi <pizhenwei@bytedance.com>
2020-02-04 07:41:00 +00:00
|
|
|
case QEMU_PROCESS_EVENT_GUEST_CRASHLOADED:
|
2018-02-02 12:13:46 +00:00
|
|
|
VIR_FREE(event->data);
|
|
|
|
break;
|
2018-12-05 09:40:45 +00:00
|
|
|
case QEMU_PROCESS_EVENT_JOB_STATUS_CHANGE:
|
|
|
|
virObjectUnref(event->data);
|
|
|
|
break;
|
2018-06-27 10:17:59 +00:00
|
|
|
case QEMU_PROCESS_EVENT_PR_DISCONNECT:
|
2018-02-02 12:13:46 +00:00
|
|
|
case QEMU_PROCESS_EVENT_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
VIR_FREE(event);
|
|
|
|
}
|
2018-04-18 14:55:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
char *
|
2018-05-14 05:28:05 +00:00
|
|
|
qemuDomainGetManagedPRSocketPath(qemuDomainObjPrivatePtr priv)
|
2018-04-18 14:55:14 +00:00
|
|
|
{
|
2020-05-04 15:03:42 +00:00
|
|
|
return g_strdup_printf("%s/%s.sock", priv->libDir,
|
|
|
|
qemuDomainGetManagedPRAlias());
|
2018-04-18 14:55:14 +00:00
|
|
|
}
|
2017-07-07 12:29:32 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainStorageIdNew:
|
|
|
|
* @priv: qemu VM private data object.
|
|
|
|
*
|
|
|
|
* Generate a new unique id for a storage object. Useful for node name generation.
|
|
|
|
*/
|
|
|
|
unsigned int
|
|
|
|
qemuDomainStorageIdNew(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
return ++priv->nodenameindex;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainStorageIdReset:
|
|
|
|
* @priv: qemu VM private data object.
|
|
|
|
*
|
|
|
|
* Resets the data for the node name generator. The node names need to be unique
|
|
|
|
* for a single instance, so can be reset on VM shutdown.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
qemuDomainStorageIdReset(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
|
|
|
priv->nodenameindex = 0;
|
|
|
|
}
|
2018-09-11 13:13:08 +00:00
|
|
|
|
|
|
|
|
|
|
|
virDomainEventResumedDetailType
|
|
|
|
qemuDomainRunningReasonToResumeEvent(virDomainRunningReason reason)
|
|
|
|
{
|
|
|
|
switch (reason) {
|
|
|
|
case VIR_DOMAIN_RUNNING_RESTORED:
|
|
|
|
case VIR_DOMAIN_RUNNING_FROM_SNAPSHOT:
|
|
|
|
return VIR_DOMAIN_EVENT_RESUMED_FROM_SNAPSHOT;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_RUNNING_MIGRATED:
|
|
|
|
case VIR_DOMAIN_RUNNING_MIGRATION_CANCELED:
|
|
|
|
return VIR_DOMAIN_EVENT_RESUMED_MIGRATED;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_RUNNING_POSTCOPY:
|
|
|
|
return VIR_DOMAIN_EVENT_RESUMED_POSTCOPY;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_RUNNING_UNKNOWN:
|
|
|
|
case VIR_DOMAIN_RUNNING_SAVE_CANCELED:
|
|
|
|
case VIR_DOMAIN_RUNNING_BOOTED:
|
|
|
|
case VIR_DOMAIN_RUNNING_UNPAUSED:
|
|
|
|
case VIR_DOMAIN_RUNNING_WAKEUP:
|
|
|
|
case VIR_DOMAIN_RUNNING_CRASHED:
|
|
|
|
case VIR_DOMAIN_RUNNING_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VIR_DOMAIN_EVENT_RESUMED_UNPAUSED;
|
|
|
|
}
|
2018-10-16 12:38:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* qemuDomainIsUsingNoShutdown:
|
|
|
|
* @priv: Domain private data
|
|
|
|
*
|
2019-06-14 19:13:58 +00:00
|
|
|
* We can receive an event when QEMU stops. If we use no-shutdown, then
|
|
|
|
* we can watch for this event and do a soft/warm reboot.
|
2018-10-16 12:38:27 +00:00
|
|
|
*
|
|
|
|
* Returns: @true when -no-shutdown either should be or was added to the
|
|
|
|
* command line.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainIsUsingNoShutdown(qemuDomainObjPrivatePtr priv)
|
|
|
|
{
|
2019-06-14 19:13:58 +00:00
|
|
|
return priv->allowReboot == VIR_TRISTATE_BOOL_YES;
|
2018-10-16 12:38:27 +00:00
|
|
|
}
|
2018-11-09 09:21:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
qemuDomainDiskIsMissingLocalOptional(virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
return disk->startupPolicy == VIR_DOMAIN_STARTUP_POLICY_OPTIONAL &&
|
|
|
|
virStorageSourceIsLocalStorage(disk->src) && disk->src->path &&
|
|
|
|
!virFileExists(disk->src->path);
|
|
|
|
}
|
2019-02-25 15:24:27 +00:00
|
|
|
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
void
|
2019-10-15 08:31:22 +00:00
|
|
|
qemuDomainNVRAMPathFormat(virQEMUDriverConfigPtr cfg,
|
|
|
|
virDomainDefPtr def,
|
|
|
|
char **path)
|
|
|
|
{
|
2019-10-22 13:26:14 +00:00
|
|
|
*path = g_strdup_printf("%s/%s_VARS.fd", cfg->nvramDir, def->name);
|
2019-10-15 08:31:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
void
|
2019-02-25 15:24:27 +00:00
|
|
|
qemuDomainNVRAMPathGenerate(virQEMUDriverConfigPtr cfg,
|
|
|
|
virDomainDefPtr def)
|
|
|
|
{
|
2020-01-07 09:34:03 +00:00
|
|
|
if (virDomainDefHasOldStyleROUEFI(def) &&
|
2019-10-22 13:26:14 +00:00
|
|
|
!def->os.loader->nvram)
|
|
|
|
qemuDomainNVRAMPathFormat(cfg, def, &def->os.loader->nvram);
|
2019-02-25 15:24:27 +00:00
|
|
|
}
|
2018-10-09 13:45:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
virDomainEventSuspendedDetailType
|
|
|
|
qemuDomainPausedReasonToSuspendedEvent(virDomainPausedReason reason)
|
|
|
|
{
|
|
|
|
switch (reason) {
|
|
|
|
case VIR_DOMAIN_PAUSED_MIGRATION:
|
|
|
|
return VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_PAUSED_FROM_SNAPSHOT:
|
|
|
|
return VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_PAUSED_POSTCOPY_FAILED:
|
|
|
|
return VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY_FAILED;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_PAUSED_POSTCOPY:
|
|
|
|
return VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_PAUSED_UNKNOWN:
|
|
|
|
case VIR_DOMAIN_PAUSED_USER:
|
|
|
|
case VIR_DOMAIN_PAUSED_SAVE:
|
|
|
|
case VIR_DOMAIN_PAUSED_DUMP:
|
|
|
|
case VIR_DOMAIN_PAUSED_IOERROR:
|
|
|
|
case VIR_DOMAIN_PAUSED_WATCHDOG:
|
|
|
|
case VIR_DOMAIN_PAUSED_SHUTTING_DOWN:
|
|
|
|
case VIR_DOMAIN_PAUSED_SNAPSHOT:
|
|
|
|
case VIR_DOMAIN_PAUSED_CRASHED:
|
|
|
|
case VIR_DOMAIN_PAUSED_STARTING_UP:
|
|
|
|
case VIR_DOMAIN_PAUSED_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VIR_DOMAIN_EVENT_SUSPENDED_PAUSED;
|
|
|
|
}
|
2019-03-21 12:54:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDefHasManagedPRBlockjobIterator(void *payload,
|
2020-10-21 11:31:16 +00:00
|
|
|
const char *name G_GNUC_UNUSED,
|
2019-03-21 12:54:20 +00:00
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
qemuBlockJobDataPtr job = payload;
|
|
|
|
bool *hasPR = opaque;
|
|
|
|
|
|
|
|
if (job->disk)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((job->chain && virStorageSourceChainHasManagedPR(job->chain)) ||
|
|
|
|
(job->mirrorChain && virStorageSourceChainHasManagedPR(job->mirrorChain)))
|
|
|
|
*hasPR = true;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainDefHasManagedPR:
|
|
|
|
* @vm: domain object
|
|
|
|
*
|
|
|
|
* @vm must be an active VM. Returns true if @vm has any storage source with
|
|
|
|
* managed persistent reservations.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainDefHasManagedPR(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
bool jobPR = false;
|
|
|
|
|
|
|
|
if (virDomainDefHasManagedPR(vm->def))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
virHashForEach(priv->blockjobs, qemuDomainDefHasManagedPRBlockjobIterator, &jobPR);
|
|
|
|
|
|
|
|
return jobPR;
|
|
|
|
}
|
2019-09-26 11:54:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainSupportsCheckpointsBlockjobs:
|
|
|
|
* @vm: domain object
|
|
|
|
*
|
|
|
|
* Checks whether a block job is supported in possible combination with
|
|
|
|
* checkpoints (qcow2 bitmaps). Returns -1 if unsupported and reports an error
|
|
|
|
* 0 in case everything is supported.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainSupportsCheckpointsBlockjobs(virDomainObjPtr vm)
|
|
|
|
{
|
2019-09-26 12:08:11 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_INCREMENTAL_BACKUP) &&
|
|
|
|
virDomainListCheckpoints(vm->checkpoints, NULL, NULL, NULL, 0) > 0) {
|
2019-09-26 11:54:15 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("cannot perform block operations while checkpoint exists"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2019-11-15 15:25:26 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainInitializePflashStorageSource:
|
|
|
|
*
|
|
|
|
* This helper converts the specification of the source of the 'loader' in case
|
|
|
|
* PFLASH is required to virStorageSources in case QEMU_CAPS_BLOCKDEV is present.
|
|
|
|
*
|
|
|
|
* This helper is used in the intermediate state when we don't support full
|
|
|
|
* backing chains for pflash drives in the XML.
|
|
|
|
*
|
|
|
|
* The nodenames used here have a different prefix to allow for a later
|
|
|
|
* conversion. The prefixes are 'libvirt-pflash0-storage',
|
|
|
|
* 'libvirt-pflash0-format' for pflash0 and 'libvirt-pflash1-storage' and
|
|
|
|
* 'libvirt-pflash1-format' for pflash1.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainInitializePflashStorageSource(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virDomainDefPtr def = vm->def;
|
|
|
|
g_autoptr(virStorageSource) pflash0 = NULL;
|
|
|
|
g_autoptr(virStorageSource) pflash1 = NULL;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
|
|
|
return 0;
|
|
|
|
|
2020-01-07 09:34:03 +00:00
|
|
|
if (!virDomainDefHasOldStyleUEFI(def))
|
2019-11-15 15:25:26 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-09-22 09:04:17 +00:00
|
|
|
pflash0 = virStorageSourceNew();
|
2019-11-15 15:25:26 +00:00
|
|
|
pflash0->type = VIR_STORAGE_TYPE_FILE;
|
|
|
|
pflash0->format = VIR_STORAGE_FILE_RAW;
|
|
|
|
pflash0->path = g_strdup(def->os.loader->path);
|
|
|
|
pflash0->readonly = def->os.loader->readonly;
|
|
|
|
pflash0->nodeformat = g_strdup("libvirt-pflash0-format");
|
|
|
|
pflash0->nodestorage = g_strdup("libvirt-pflash0-storage");
|
|
|
|
|
|
|
|
|
|
|
|
if (def->os.loader->nvram) {
|
2020-09-22 09:04:17 +00:00
|
|
|
pflash1 = virStorageSourceNew();
|
2019-11-15 15:25:26 +00:00
|
|
|
pflash1->type = VIR_STORAGE_TYPE_FILE;
|
|
|
|
pflash1->format = VIR_STORAGE_FILE_RAW;
|
|
|
|
pflash1->path = g_strdup(def->os.loader->nvram);
|
|
|
|
pflash1->readonly = false;
|
|
|
|
pflash1->nodeformat = g_strdup("libvirt-pflash1-format");
|
|
|
|
pflash1->nodestorage = g_strdup("libvirt-pflash1-storage");
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->pflash0 = g_steal_pointer(&pflash0);
|
|
|
|
priv->pflash1 = g_steal_pointer(&pflash1);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-04-17 09:09:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainDiskBlockJobIsSupported:
|
|
|
|
*
|
|
|
|
* Returns true if block jobs are supported on @disk by @vm or false and reports
|
|
|
|
* an error otherwise.
|
|
|
|
*
|
|
|
|
* Note that this does not verify whether other block jobs are running etc.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
qemuDomainDiskBlockJobIsSupported(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV) &&
|
|
|
|
qemuDiskBusIsSD(disk->bus)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("block jobs are not supported on disk '%s' using bus 'sd'"),
|
|
|
|
disk->dst);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-09-17 13:30:40 +00:00
|
|
|
if (disk->transient) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("block jobs are not supported on transient disk '%s'"),
|
|
|
|
disk->dst);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-04-17 09:09:58 +00:00
|
|
|
return true;
|
|
|
|
}
|
2020-07-16 09:17:47 +00:00
|
|
|
|
|
|
|
|
2020-08-24 15:38:05 +00:00
|
|
|
int
|
|
|
|
virQEMUFileOpenAs(uid_t fallback_uid,
|
|
|
|
gid_t fallback_gid,
|
|
|
|
bool dynamicOwnership,
|
|
|
|
const char *path,
|
|
|
|
int oflags,
|
|
|
|
bool *needUnlink)
|
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
bool is_reg = true;
|
|
|
|
bool need_unlink = false;
|
|
|
|
unsigned int vfoflags = 0;
|
|
|
|
int fd = -1;
|
|
|
|
int path_shared = virFileIsSharedFS(path);
|
|
|
|
uid_t uid = geteuid();
|
|
|
|
gid_t gid = getegid();
|
|
|
|
|
|
|
|
/* path might be a pre-existing block dev, in which case
|
|
|
|
* we need to skip the create step, and also avoid unlink
|
|
|
|
* in the failure case */
|
|
|
|
if (oflags & O_CREAT) {
|
|
|
|
need_unlink = true;
|
|
|
|
|
|
|
|
/* Don't force chown on network-shared FS
|
|
|
|
* as it is likely to fail. */
|
|
|
|
if (path_shared <= 0 || dynamicOwnership)
|
|
|
|
vfoflags |= VIR_FILE_OPEN_FORCE_OWNER;
|
|
|
|
|
|
|
|
if (stat(path, &sb) == 0) {
|
|
|
|
/* It already exists, we don't want to delete it on error */
|
|
|
|
need_unlink = false;
|
|
|
|
|
|
|
|
is_reg = !!S_ISREG(sb.st_mode);
|
|
|
|
/* If the path is regular file which exists
|
|
|
|
* already and dynamic_ownership is off, we don't
|
|
|
|
* want to change its ownership, just open it as-is */
|
|
|
|
if (is_reg && !dynamicOwnership) {
|
|
|
|
uid = sb.st_uid;
|
|
|
|
gid = sb.st_gid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First try creating the file as root */
|
|
|
|
if (!is_reg) {
|
|
|
|
if ((fd = open(path, oflags & ~O_CREAT)) < 0) {
|
|
|
|
fd = -errno;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if ((fd = virFileOpenAs(path, oflags, S_IRUSR | S_IWUSR, uid, gid,
|
|
|
|
vfoflags | VIR_FILE_OPEN_NOFORK)) < 0) {
|
|
|
|
/* If we failed as root, and the error was permission-denied
|
|
|
|
(EACCES or EPERM), assume it's on a network-connected share
|
|
|
|
where root access is restricted (eg, root-squashed NFS). If the
|
|
|
|
qemu user is non-root, just set a flag to
|
|
|
|
bypass security driver shenanigans, and retry the operation
|
|
|
|
after doing setuid to qemu user */
|
|
|
|
if ((fd != -EACCES && fd != -EPERM) || fallback_uid == geteuid())
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* On Linux we can also verify the FS-type of the directory. */
|
|
|
|
switch (path_shared) {
|
|
|
|
case 1:
|
|
|
|
/* it was on a network share, so we'll continue
|
|
|
|
* as outlined above
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
case -1:
|
|
|
|
virReportSystemError(-fd, oflags & O_CREAT
|
|
|
|
? _("Failed to create file "
|
|
|
|
"'%s': couldn't determine fs type")
|
|
|
|
: _("Failed to open file "
|
|
|
|
"'%s': couldn't determine fs type"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
default:
|
|
|
|
/* local file - log the error returned by virFileOpenAs */
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we created the file above, then we need to remove it;
|
|
|
|
* otherwise, the next attempt to create will fail. If the
|
|
|
|
* file had already existed before we got here, then we also
|
|
|
|
* don't want to delete it and allow the following to succeed
|
|
|
|
* or fail based on existing protections
|
|
|
|
*/
|
|
|
|
if (need_unlink)
|
|
|
|
unlink(path);
|
|
|
|
|
|
|
|
/* Retry creating the file as qemu user */
|
|
|
|
|
|
|
|
/* Since we're passing different modes... */
|
|
|
|
vfoflags |= VIR_FILE_OPEN_FORCE_MODE;
|
|
|
|
|
|
|
|
if ((fd = virFileOpenAs(path, oflags,
|
|
|
|
S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP,
|
|
|
|
fallback_uid, fallback_gid,
|
|
|
|
vfoflags | VIR_FILE_OPEN_FORK)) < 0) {
|
|
|
|
virReportSystemError(-fd, oflags & O_CREAT
|
|
|
|
? _("Error from child process creating '%s'")
|
|
|
|
: _("Error from child process opening '%s'"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cleanup:
|
|
|
|
if (needUnlink)
|
|
|
|
*needUnlink = need_unlink;
|
|
|
|
return fd;
|
|
|
|
|
|
|
|
error:
|
|
|
|
virReportSystemError(-fd, oflags & O_CREAT
|
|
|
|
? _("Failed to create file '%s'")
|
|
|
|
: _("Failed to open file '%s'"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-07-16 09:17:47 +00:00
|
|
|
/**
|
|
|
|
* qemuDomainOpenFile:
|
|
|
|
* @driver: driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @path: path to file to open
|
|
|
|
* @oflags: flags for opening/creation of the file
|
|
|
|
* @needUnlink: set to true if file was created by this function
|
|
|
|
*
|
|
|
|
* Internal function to properly create or open existing files, with
|
|
|
|
* ownership affected by qemu driver setup and domain DAC label.
|
|
|
|
*
|
|
|
|
* Returns the file descriptor on success and negative errno on failure.
|
|
|
|
*
|
|
|
|
* This function should not be used on storage sources. Use
|
|
|
|
* qemuDomainStorageFileInit and storage driver APIs if possible.
|
|
|
|
**/
|
|
|
|
int
|
|
|
|
qemuDomainOpenFile(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *path,
|
|
|
|
int oflags,
|
|
|
|
bool *needUnlink)
|
|
|
|
{
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
uid_t user = cfg->user;
|
|
|
|
gid_t group = cfg->group;
|
|
|
|
bool dynamicOwnership = cfg->dynamicOwnership;
|
|
|
|
virSecurityLabelDefPtr seclabel;
|
|
|
|
|
|
|
|
/* TODO: Take imagelabel into account? */
|
|
|
|
if (vm &&
|
|
|
|
(seclabel = virDomainDefGetSecurityLabelDef(vm->def, "dac")) != NULL &&
|
|
|
|
seclabel->label != NULL &&
|
|
|
|
(virParseOwnershipIds(seclabel->label, &user, &group) < 0))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return virQEMUFileOpenAs(user, group, dynamicOwnership,
|
|
|
|
path, oflags, needUnlink);
|
|
|
|
}
|
2020-07-16 09:40:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainFileWrapperFDClose(virDomainObjPtr vm,
|
|
|
|
virFileWrapperFdPtr fd)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* virFileWrapperFd uses iohelper to write data onto disk.
|
|
|
|
* However, iohelper calls fdatasync() which may take ages to
|
|
|
|
* finish. Therefore, we shouldn't be waiting with the domain
|
|
|
|
* object locked. */
|
|
|
|
|
|
|
|
/* XXX Currently, this function is intended for *Save() only
|
|
|
|
* as restore needs some reworking before it's ready for
|
|
|
|
* this. */
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
ret = virFileWrapperFdClose(fd);
|
|
|
|
virObjectLock(vm);
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("domain is no longer running"));
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2020-10-08 14:22:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuDomainInterfaceSetDefaultQDisc:
|
|
|
|
* @driver: QEMU driver
|
|
|
|
* @net: domain interface
|
|
|
|
*
|
|
|
|
* Set the noqueue qdisc on @net if running as privileged. The
|
|
|
|
* noqueue qdisc is a lockless transmit and thus faster than the
|
|
|
|
* default pfifo_fast (at least in theory). But we can modify
|
|
|
|
* root qdisc only if we have CAP_NET_ADMIN.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuDomainInterfaceSetDefaultQDisc(virQEMUDriverPtr driver,
|
|
|
|
virDomainNetDefPtr net)
|
|
|
|
{
|
|
|
|
virDomainNetType actualType = virDomainNetGetActualType(net);
|
|
|
|
|
|
|
|
if (!driver->privileged || !net->ifname)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* We want only those types which are represented as TAP
|
|
|
|
* devices in the host. */
|
|
|
|
if (actualType == VIR_DOMAIN_NET_TYPE_ETHERNET ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_NETWORK ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_BRIDGE ||
|
|
|
|
actualType == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
|
|
|
if (virNetDevSetRootQDisc(net->ifname, "noqueue") < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2020-11-03 07:58:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuDomainNamePathsCleanup(virQEMUDriverConfigPtr cfg,
|
|
|
|
const char *name,
|
|
|
|
bool bestEffort)
|
|
|
|
{
|
|
|
|
g_autofree char *cfg_file = NULL;
|
|
|
|
g_autofree char *autostart_link = NULL;
|
2020-11-03 06:50:44 +00:00
|
|
|
g_autofree char *snap_dir = NULL;
|
2020-11-03 06:51:19 +00:00
|
|
|
g_autofree char *chk_dir = NULL;
|
2020-11-03 07:58:13 +00:00
|
|
|
|
|
|
|
cfg_file = virDomainConfigFile(cfg->configDir, name);
|
|
|
|
autostart_link = virDomainConfigFile(cfg->autostartDir, name);
|
2020-11-03 06:50:44 +00:00
|
|
|
snap_dir = g_strdup_printf("%s/%s", cfg->snapshotDir, name);
|
2020-11-03 06:51:19 +00:00
|
|
|
chk_dir = g_strdup_printf("%s/%s", cfg->checkpointDir, name);
|
2020-11-03 07:58:13 +00:00
|
|
|
|
|
|
|
if (virFileExists(cfg_file) &&
|
|
|
|
unlink(cfg_file) < 0) {
|
|
|
|
virReportSystemError(errno, _("Failed to unlink '%s'"), cfg_file);
|
|
|
|
if (!bestEffort)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virFileIsLink(autostart_link) == 1 &&
|
|
|
|
unlink(autostart_link) < 0) {
|
|
|
|
virReportSystemError(errno, _("Failed to unlink '%s'"), autostart_link);
|
|
|
|
if (!bestEffort)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-11-03 06:50:44 +00:00
|
|
|
if (virFileIsDir(snap_dir) &&
|
|
|
|
virFileDeleteTree(snap_dir) < 0 &&
|
|
|
|
!bestEffort)
|
|
|
|
return -1;
|
|
|
|
|
2020-11-03 06:51:19 +00:00
|
|
|
if (virFileIsDir(chk_dir) &&
|
|
|
|
virFileDeleteTree(chk_dir) < 0 &&
|
|
|
|
!bestEffort)
|
|
|
|
return -1;
|
|
|
|
|
2020-11-03 07:58:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|