2010-12-16 16:10:54 +00:00
|
|
|
/*
|
|
|
|
* qemu_cgroup.c: QEMU cgroup management
|
|
|
|
*
|
2015-03-11 10:17:15 +00:00
|
|
|
* Copyright (C) 2006-2015 Red Hat, Inc.
|
2010-12-16 16:10:54 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2010-12-16 16:10:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include "qemu_cgroup.h"
|
2011-07-21 02:10:31 +00:00
|
|
|
#include "qemu_domain.h"
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
#include "qemu_process.h"
|
2018-04-05 19:06:55 +00:00
|
|
|
#include "qemu_extdevice.h"
|
2019-09-17 08:06:26 +00:00
|
|
|
#include "qemu_hostdev.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2020-02-17 21:29:11 +00:00
|
|
|
#include "domain_cgroup.h"
|
2013-05-03 18:07:27 +00:00
|
|
|
#include "virscsi.h"
|
2013-05-20 09:23:13 +00:00
|
|
|
#include "virstring.h"
|
2013-09-13 13:32:43 +00:00
|
|
|
#include "virfile.h"
|
2014-09-09 14:34:12 +00:00
|
|
|
#include "virtypedparam.h"
|
2014-12-12 14:29:48 +00:00
|
|
|
#include "virnuma.h"
|
2018-04-05 07:34:25 +00:00
|
|
|
#include "virdevmapper.h"
|
2020-02-16 21:59:28 +00:00
|
|
|
#include "virutil.h"
|
2010-12-16 16:10:54 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_cgroup");
|
|
|
|
|
2016-11-15 10:28:51 +00:00
|
|
|
const char *const defaultDeviceACL[] = {
|
2010-12-16 16:10:54 +00:00
|
|
|
"/dev/null", "/dev/full", "/dev/zero",
|
|
|
|
"/dev/random", "/dev/urandom",
|
2019-02-18 15:13:30 +00:00
|
|
|
"/dev/ptmx", "/dev/kvm",
|
2010-12-16 16:10:54 +00:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
#define DEVICE_PTY_MAJOR 136
|
|
|
|
#define DEVICE_SND_MAJOR 116
|
|
|
|
|
2016-02-16 14:46:40 +00:00
|
|
|
|
2014-06-20 12:05:05 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupImagePathCgroup(virDomainObj *vm,
|
2016-02-16 14:46:40 +00:00
|
|
|
const char *path,
|
|
|
|
bool readonly)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2014-06-18 11:09:05 +00:00
|
|
|
int perms = VIR_CGROUP_DEVICE_READ;
|
2020-12-01 08:21:32 +00:00
|
|
|
g_auto(GStrv) targetPaths = NULL;
|
2018-04-05 07:34:25 +00:00
|
|
|
size_t i;
|
|
|
|
int rv;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2016-02-16 14:46:40 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
2014-06-18 11:09:05 +00:00
|
|
|
return 0;
|
|
|
|
|
2016-02-18 09:19:51 +00:00
|
|
|
if (!readonly)
|
2016-02-16 14:37:01 +00:00
|
|
|
perms |= VIR_CGROUP_DEVICE_WRITE;
|
2014-06-18 11:09:05 +00:00
|
|
|
|
2016-02-16 14:37:01 +00:00
|
|
|
VIR_DEBUG("Allow path %s, perms: %s",
|
2016-02-16 14:46:40 +00:00
|
|
|
path, virCgroupGetDevicePermsString(perms));
|
2014-06-18 11:09:05 +00:00
|
|
|
|
2018-04-05 07:34:25 +00:00
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup, path, perms, true);
|
2014-06-18 11:09:05 +00:00
|
|
|
|
2016-02-16 14:46:40 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path,
|
2014-06-18 11:09:05 +00:00
|
|
|
virCgroupGetDevicePermsString(perms),
|
2018-04-05 07:34:25 +00:00
|
|
|
rv);
|
|
|
|
if (rv < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2018-04-05 07:34:25 +00:00
|
|
|
|
|
|
|
if (rv > 0) {
|
|
|
|
/* @path is neither character device nor block device. */
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2018-04-05 07:34:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virDevMapperGetTargets(path, &targetPaths) < 0 &&
|
2020-07-23 15:08:46 +00:00
|
|
|
errno != ENOSYS) {
|
2018-04-05 07:34:25 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Unable to get devmapper targets for %s"),
|
|
|
|
path);
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2018-04-05 07:34:25 +00:00
|
|
|
}
|
2013-07-08 10:08:46 +00:00
|
|
|
|
2018-04-05 07:34:25 +00:00
|
|
|
for (i = 0; targetPaths && targetPaths[i]; i++) {
|
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup, targetPaths[i], perms, false);
|
|
|
|
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", targetPaths[i],
|
|
|
|
virCgroupGetDevicePermsString(perms),
|
|
|
|
rv);
|
|
|
|
if (rv < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2018-04-05 07:34:25 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-16 14:46:40 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupImageCgroupInternal(virDomainObj *vm,
|
|
|
|
virStorageSource *src,
|
2016-02-16 14:46:40 +00:00
|
|
|
bool forceReadonly)
|
|
|
|
{
|
2019-06-24 10:34:45 +00:00
|
|
|
g_autofree char *path = NULL;
|
|
|
|
bool readonly = src->readonly || forceReadonly;
|
|
|
|
|
|
|
|
if (src->type == VIR_STORAGE_TYPE_NVME) {
|
|
|
|
/* Even though disk is R/O we can't make it so in
|
|
|
|
* CGroups. QEMU will try to do some ioctl()-s over the
|
|
|
|
* device and such operations are considered R/W by the
|
|
|
|
* kernel */
|
|
|
|
readonly = false;
|
|
|
|
|
|
|
|
if (!(path = virPCIDeviceAddressGetIOMMUGroupDev(&src->nvme->pciAddr)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuSetupImagePathCgroup(vm, QEMU_DEV_VFIO, false) < 0)
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
if (!src->path || !virStorageSourceIsLocalStorage(src)) {
|
|
|
|
VIR_DEBUG("Not updating cgroups for disk path '%s', type: %s",
|
|
|
|
NULLSTR(src->path), virStorageTypeToString(src->type));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
path = g_strdup(src->path);
|
2016-02-16 14:46:40 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 06:00:59 +00:00
|
|
|
if (virStoragePRDefIsManaged(src->pr) &&
|
2019-06-25 11:21:39 +00:00
|
|
|
virFileExists(QEMU_DEVICE_MAPPER_CONTROL_PATH) &&
|
|
|
|
qemuSetupImagePathCgroup(vm, QEMU_DEVICE_MAPPER_CONTROL_PATH, false) < 0)
|
2018-04-10 06:00:59 +00:00
|
|
|
return -1;
|
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
return qemuSetupImagePathCgroup(vm, path, readonly);
|
2016-02-16 14:46:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-06-20 12:05:05 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupImageCgroup(virDomainObj *vm,
|
|
|
|
virStorageSource *src)
|
2014-06-20 12:05:05 +00:00
|
|
|
{
|
2016-02-16 14:37:01 +00:00
|
|
|
return qemuSetupImageCgroupInternal(vm, src, false);
|
2016-02-15 15:15:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownImageCgroup(virDomainObj *vm,
|
|
|
|
virStorageSource *src)
|
2016-02-15 15:15:58 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-06-24 10:34:45 +00:00
|
|
|
g_autofree char *path = NULL;
|
2018-04-10 06:00:59 +00:00
|
|
|
int perms = VIR_CGROUP_DEVICE_RWM;
|
2019-06-24 10:34:45 +00:00
|
|
|
bool hasPR = false;
|
|
|
|
bool hasNVMe = false;
|
2018-04-10 06:00:59 +00:00
|
|
|
size_t i;
|
2016-02-16 14:37:01 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virStorageSource *diskSrc = vm->def->disks[i]->src;
|
2016-02-16 14:37:01 +00:00
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
if (src == diskSrc)
|
|
|
|
continue;
|
2018-07-13 12:34:28 +00:00
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
if (virStoragePRDefIsManaged(diskSrc->pr))
|
|
|
|
hasPR = true;
|
2018-07-13 12:34:28 +00:00
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
if (virStorageSourceChainHasNVMe(diskSrc))
|
|
|
|
hasNVMe = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (src->type == VIR_STORAGE_TYPE_NVME) {
|
|
|
|
if (!(path = virPCIDeviceAddressGetIOMMUGroupDev(&src->nvme->pciAddr)))
|
|
|
|
return -1;
|
2018-07-13 12:34:28 +00:00
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
if (!hasNVMe &&
|
|
|
|
!qemuDomainNeedsVFIO(vm->def)) {
|
|
|
|
ret = virCgroupDenyDevicePath(priv->cgroup, QEMU_DEV_VFIO, perms, true);
|
2018-07-13 12:34:28 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny",
|
2019-06-24 10:34:45 +00:00
|
|
|
QEMU_DEV_VFIO,
|
2018-07-13 12:34:28 +00:00
|
|
|
virCgroupGetDevicePermsString(perms), ret);
|
|
|
|
if (ret < 0)
|
2019-06-24 10:34:45 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!src->path || !virStorageSourceIsLocalStorage(src)) {
|
|
|
|
VIR_DEBUG("Not updating cgroups for disk path '%s', type: %s",
|
|
|
|
NULLSTR(src->path), virStorageTypeToString(src->type));
|
|
|
|
return 0;
|
2018-07-13 12:34:28 +00:00
|
|
|
}
|
2019-06-24 10:34:45 +00:00
|
|
|
|
|
|
|
path = g_strdup(src->path);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!hasPR &&
|
|
|
|
virFileExists(QEMU_DEVICE_MAPPER_CONTROL_PATH)) {
|
|
|
|
VIR_DEBUG("Disabling device mapper control");
|
|
|
|
ret = virCgroupDenyDevicePath(priv->cgroup,
|
|
|
|
QEMU_DEVICE_MAPPER_CONTROL_PATH,
|
|
|
|
perms, true);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny",
|
|
|
|
QEMU_DEVICE_MAPPER_CONTROL_PATH,
|
|
|
|
virCgroupGetDevicePermsString(perms), ret);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2018-04-10 06:00:59 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
VIR_DEBUG("Deny path %s", path);
|
2016-02-16 14:37:01 +00:00
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
ret = virCgroupDenyDevicePath(priv->cgroup, path, perms, true);
|
2016-02-16 14:37:01 +00:00
|
|
|
|
2019-06-24 10:34:45 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny", path,
|
2017-12-28 17:41:28 +00:00
|
|
|
virCgroupGetDevicePermsString(perms), ret);
|
2016-02-16 14:37:01 +00:00
|
|
|
|
2018-04-05 07:34:25 +00:00
|
|
|
/* If you're looking for a counter part to
|
|
|
|
* qemuSetupImagePathCgroup you're at the right place.
|
|
|
|
* However, we can't just blindly deny all the device mapper
|
|
|
|
* targets of src->path because they might still be used by
|
|
|
|
* another disk in domain. Just like we are not removing
|
|
|
|
* disks from namespace. */
|
|
|
|
|
2016-02-16 14:37:01 +00:00
|
|
|
return ret;
|
2014-06-20 12:05:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-18 09:29:27 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupImageChainCgroup(virDomainObj *vm,
|
|
|
|
virStorageSource *src)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virStorageSource *next;
|
2014-06-20 12:05:05 +00:00
|
|
|
bool forceReadonly = false;
|
2013-07-08 10:08:46 +00:00
|
|
|
|
2019-01-16 14:49:07 +00:00
|
|
|
for (next = src; virStorageSourceIsBacking(next); next = next->backingStore) {
|
2016-02-16 14:37:01 +00:00
|
|
|
if (qemuSetupImageCgroupInternal(vm, next, forceReadonly) < 0)
|
2014-06-18 11:09:05 +00:00
|
|
|
return -1;
|
2014-06-20 12:05:05 +00:00
|
|
|
|
|
|
|
/* setup only the top level image for read-write */
|
|
|
|
forceReadonly = true;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
2014-06-18 11:09:05 +00:00
|
|
|
|
|
|
|
return 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-18 09:29:27 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownImageChainCgroup(virDomainObj *vm,
|
|
|
|
virStorageSource *src)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virStorageSource *next;
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2019-01-16 14:49:07 +00:00
|
|
|
for (next = src; virStorageSourceIsBacking(next); next = next->backingStore) {
|
2016-02-16 14:37:01 +00:00
|
|
|
if (qemuTeardownImageCgroup(vm, next) < 0)
|
2014-06-18 11:09:05 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2014-06-18 11:09:05 +00:00
|
|
|
return 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2014-06-18 11:09:05 +00:00
|
|
|
|
2011-02-16 02:18:40 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupChrSourceCgroup(virDomainObj *vm,
|
|
|
|
virDomainChrSourceDef *source)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2013-07-08 10:08:46 +00:00
|
|
|
int ret;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2017-02-22 14:20:15 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2015-11-19 15:26:56 +00:00
|
|
|
if (source->type != VIR_DOMAIN_CHR_TYPE_DEV)
|
2010-12-16 16:10:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2015-11-19 15:26:56 +00:00
|
|
|
VIR_DEBUG("Process path '%s' for device", source->data.file.path);
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2015-11-19 15:26:56 +00:00
|
|
|
ret = virCgroupAllowDevicePath(priv->cgroup, source->data.file.path,
|
2016-02-16 13:43:41 +00:00
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow",
|
2017-12-28 17:41:28 +00:00
|
|
|
source->data.file.path, "rw", ret);
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-07-08 10:08:46 +00:00
|
|
|
return ret;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2016-11-18 10:45:44 +00:00
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownChrSourceCgroup(virDomainObj *vm,
|
|
|
|
virDomainChrSourceDef *source)
|
2016-11-18 10:45:44 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-11-18 10:45:44 +00:00
|
|
|
int ret;
|
|
|
|
|
2017-02-22 14:20:15 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2016-11-18 10:45:44 +00:00
|
|
|
if (source->type != VIR_DOMAIN_CHR_TYPE_DEV)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Process path '%s' for device", source->data.file.path);
|
|
|
|
|
|
|
|
ret = virCgroupDenyDevicePath(priv->cgroup, source->data.file.path,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny",
|
2017-12-28 17:41:28 +00:00
|
|
|
source->data.file.path, "rw", ret);
|
2016-11-18 10:45:44 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-12 20:55:46 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupChardevCgroupCB(virDomainDef *def G_GNUC_UNUSED,
|
|
|
|
virDomainChrDef *dev,
|
2016-11-18 10:45:44 +00:00
|
|
|
void *opaque)
|
2013-04-12 20:55:46 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainObj *vm = opaque;
|
2015-11-19 13:25:44 +00:00
|
|
|
|
2016-10-21 11:45:54 +00:00
|
|
|
return qemuSetupChrSourceCgroup(vm, dev->source);
|
2013-04-12 20:55:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupTPMCgroup(virDomainObj *vm,
|
|
|
|
virDomainTPMDef *dev)
|
2013-04-12 20:55:46 +00:00
|
|
|
{
|
2013-07-08 10:08:46 +00:00
|
|
|
int ret = 0;
|
2013-04-12 20:55:46 +00:00
|
|
|
|
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_TPM_TYPE_PASSTHROUGH:
|
2015-11-19 13:25:44 +00:00
|
|
|
ret = qemuSetupChrSourceCgroup(vm, &dev->data.passthrough.source);
|
2013-04-12 20:55:46 +00:00
|
|
|
break;
|
2017-04-04 16:22:31 +00:00
|
|
|
case VIR_DOMAIN_TPM_TYPE_EMULATOR:
|
2013-04-12 20:55:46 +00:00
|
|
|
case VIR_DOMAIN_TPM_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-08 10:08:46 +00:00
|
|
|
return ret;
|
2013-04-12 20:55:46 +00:00
|
|
|
}
|
|
|
|
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2017-11-21 12:33:07 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupInputCgroup(virDomainObj *vm,
|
|
|
|
virDomainInputDef *dev)
|
2015-11-19 13:32:22 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2015-11-19 13:32:22 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2017-02-22 14:20:15 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2015-11-19 13:32:22 +00:00
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_INPUT_TYPE_PASSTHROUGH:
|
2021-05-21 11:01:07 +00:00
|
|
|
case VIR_DOMAIN_INPUT_TYPE_EVDEV:
|
2015-11-19 13:32:22 +00:00
|
|
|
VIR_DEBUG("Process path '%s' for input device", dev->source.evdev);
|
|
|
|
ret = virCgroupAllowDevicePath(priv->cgroup, dev->source.evdev,
|
2016-02-16 13:43:41 +00:00
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
2017-12-28 17:41:28 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", dev->source.evdev, "rw", ret);
|
2015-11-19 13:32:22 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-21 12:33:07 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownInputCgroup(virDomainObj *vm,
|
|
|
|
virDomainInputDef *dev)
|
2017-11-21 12:33:07 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2017-11-21 12:33:07 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_INPUT_TYPE_PASSTHROUGH:
|
2021-05-21 11:01:07 +00:00
|
|
|
case VIR_DOMAIN_INPUT_TYPE_EVDEV:
|
2017-11-21 12:33:07 +00:00
|
|
|
VIR_DEBUG("Process path '%s' for input device", dev->source.evdev);
|
|
|
|
ret = virCgroupDenyDevicePath(priv->cgroup, dev->source.evdev,
|
|
|
|
VIR_CGROUP_DEVICE_RWM, false);
|
2017-12-28 17:41:28 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny", dev->source.evdev, "rwm", ret);
|
2017-11-21 12:33:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-17 08:06:26 +00:00
|
|
|
/**
|
|
|
|
* qemuSetupHostdevCgroup:
|
|
|
|
* vm: domain object
|
|
|
|
* @dev: device to allow
|
|
|
|
*
|
|
|
|
* For given host device @dev allow access to in Cgroups.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise.
|
|
|
|
*/
|
2013-04-29 17:15:26 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupHostdevCgroup(virDomainObj *vm,
|
|
|
|
virDomainHostdevDef *dev)
|
2013-04-29 17:15:26 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-09-17 09:47:59 +00:00
|
|
|
g_autofree char *path = NULL;
|
|
|
|
int perms;
|
2019-09-17 09:57:04 +00:00
|
|
|
int rv;
|
2013-04-29 17:15:26 +00:00
|
|
|
|
2017-02-22 14:20:15 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2019-09-17 09:47:59 +00:00
|
|
|
if (qemuDomainGetHostdevPath(dev, &path, &perms) < 0)
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2013-04-29 17:15:26 +00:00
|
|
|
|
2020-01-09 14:40:14 +00:00
|
|
|
if (path) {
|
|
|
|
VIR_DEBUG("Cgroup allow %s perms=%d", path, perms);
|
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup, path, perms, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path,
|
|
|
|
virCgroupGetDevicePermsString(perms),
|
|
|
|
rv);
|
|
|
|
if (rv < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2013-04-29 17:15:26 +00:00
|
|
|
|
2019-09-17 08:06:26 +00:00
|
|
|
if (qemuHostdevNeedsVFIO(dev)) {
|
|
|
|
VIR_DEBUG("Cgroup allow %s perms=%d", QEMU_DEV_VFIO, VIR_CGROUP_DEVICE_RW);
|
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup, QEMU_DEV_VFIO,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow",
|
|
|
|
QEMU_DEV_VFIO, "rw", rv);
|
|
|
|
if (rv < 0)
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2019-09-17 08:06:26 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 09:57:04 +00:00
|
|
|
return 0;
|
2013-04-29 17:15:26 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 08:06:26 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuTeardownHostdevCgroup:
|
|
|
|
* @vm: doamin object
|
|
|
|
* @dev: device to tear down
|
|
|
|
*
|
|
|
|
* For given host device @dev deny access to it in CGroups.
|
|
|
|
* Note, @dev must not be in @vm's definition.
|
|
|
|
*
|
|
|
|
* Returns: 0 on success,
|
|
|
|
* -1 otherwise.
|
|
|
|
*/
|
2013-04-29 17:15:26 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownHostdevCgroup(virDomainObj *vm,
|
|
|
|
virDomainHostdevDef *dev)
|
2013-04-29 17:15:26 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-09-17 09:47:59 +00:00
|
|
|
g_autofree char *path = NULL;
|
2019-09-17 09:57:04 +00:00
|
|
|
int rv;
|
2013-04-29 17:15:26 +00:00
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2021-01-04 12:54:39 +00:00
|
|
|
/* Skip tearing down Cgroup for hostdevs that represents absent
|
|
|
|
* PCI devices, e.g. SR-IOV virtual functions that were removed from
|
|
|
|
* the host while the domain was still running. */
|
|
|
|
if (virHostdevIsPCIDevice(dev)) {
|
|
|
|
const virDomainHostdevSubsysPCI *pcisrc = &dev->source.subsys.u.pci;
|
|
|
|
|
|
|
|
if (!virPCIDeviceExists(&pcisrc->addr))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-17 09:47:59 +00:00
|
|
|
if (qemuDomainGetHostdevPath(dev, &path, NULL) < 0)
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2017-02-09 13:14:09 +00:00
|
|
|
|
2020-01-09 14:40:14 +00:00
|
|
|
if (path) {
|
|
|
|
VIR_DEBUG("Cgroup deny %s", path);
|
|
|
|
rv = virCgroupDenyDevicePath(priv->cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RWM, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup,
|
|
|
|
"deny", path, "rwm", rv);
|
|
|
|
if (rv < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2013-04-29 17:15:26 +00:00
|
|
|
|
2019-09-17 08:06:26 +00:00
|
|
|
if (qemuHostdevNeedsVFIO(dev) &&
|
|
|
|
!qemuDomainNeedsVFIO(vm->def)) {
|
|
|
|
VIR_DEBUG("Cgroup deny " QEMU_DEV_VFIO);
|
|
|
|
rv = virCgroupDenyDevicePath(priv->cgroup, QEMU_DEV_VFIO,
|
|
|
|
VIR_CGROUP_DEVICE_RWM, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny",
|
|
|
|
QEMU_DEV_VFIO, "rwm", rv);
|
|
|
|
if (rv < 0)
|
2019-09-17 09:57:04 +00:00
|
|
|
return -1;
|
2019-09-17 08:06:26 +00:00
|
|
|
}
|
|
|
|
|
2019-09-17 09:57:04 +00:00
|
|
|
return 0;
|
2013-04-29 17:15:26 +00:00
|
|
|
}
|
|
|
|
|
2017-02-09 16:53:53 +00:00
|
|
|
|
2017-02-22 15:33:12 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupMemoryDevicesCgroup(virDomainObj *vm,
|
|
|
|
virDomainMemoryDef *mem)
|
2017-02-22 15:33:12 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2017-02-22 15:33:12 +00:00
|
|
|
int rv;
|
|
|
|
|
2020-12-09 15:47:15 +00:00
|
|
|
if (mem->model != VIR_DOMAIN_MEMORY_MODEL_NVDIMM &&
|
|
|
|
mem->model != VIR_DOMAIN_MEMORY_MODEL_VIRTIO_PMEM)
|
2017-02-22 15:33:12 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Setting devices Cgroup for NVDIMM device: %s", mem->nvdimmPath);
|
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup, mem->nvdimmPath,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow",
|
2017-12-28 17:41:28 +00:00
|
|
|
mem->nvdimmPath, "rw", rv);
|
2017-02-22 15:33:12 +00:00
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownMemoryDevicesCgroup(virDomainObj *vm,
|
|
|
|
virDomainMemoryDef *mem)
|
2017-02-22 15:33:12 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2017-02-22 15:33:12 +00:00
|
|
|
int rv;
|
|
|
|
|
2020-12-09 15:47:15 +00:00
|
|
|
if (mem->model != VIR_DOMAIN_MEMORY_MODEL_NVDIMM &&
|
|
|
|
mem->model != VIR_DOMAIN_MEMORY_MODEL_VIRTIO_PMEM)
|
2017-02-22 15:33:12 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rv = virCgroupDenyDevicePath(priv->cgroup, mem->nvdimmPath,
|
|
|
|
VIR_CGROUP_DEVICE_RWM, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup,
|
2017-12-28 17:41:28 +00:00
|
|
|
"deny", mem->nvdimmPath, "rwm", rv);
|
2017-02-22 15:33:12 +00:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-02-09 16:53:53 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupGraphicsCgroup(virDomainObj *vm,
|
|
|
|
virDomainGraphicsDef *gfx)
|
2017-02-09 16:53:53 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2018-11-22 15:12:40 +00:00
|
|
|
const char *rendernode = virDomainGraphicsGetRenderNode(gfx);
|
2017-02-09 16:53:53 +00:00
|
|
|
int ret;
|
|
|
|
|
2018-11-22 15:12:40 +00:00
|
|
|
if (!rendernode ||
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
2017-02-09 16:53:53 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = virCgroupAllowDevicePath(priv->cgroup, rendernode,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", rendernode,
|
2017-12-28 17:41:28 +00:00
|
|
|
"rw", ret);
|
2017-02-09 16:53:53 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-09-23 10:44:28 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupVideoCgroup(virDomainObj *vm,
|
|
|
|
virDomainVideoDef *def)
|
2019-09-23 10:44:28 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
|
|
|
virDomainVideoAccelDef *accel = def->accel;
|
2019-09-23 10:44:28 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!accel)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!accel->rendernode ||
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = virCgroupAllowDevicePath(priv->cgroup, accel->rendernode,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", accel->rendernode,
|
|
|
|
"rw", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-17 11:59:31 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupBlkioCgroup(virDomainObj *vm)
|
2013-05-17 11:59:31 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2013-05-17 11:59:31 +00:00
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_BLKIO)) {
|
|
|
|
if (vm->def->blkio.weight || vm->def->blkio.ndevices) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Block I/O tuning is not available on this host"));
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-17 21:29:11 +00:00
|
|
|
return virDomainCgroupSetupBlkio(priv->cgroup, vm->def->blkio);
|
2013-05-17 11:59:31 +00:00
|
|
|
}
|
|
|
|
|
2013-04-29 17:15:26 +00:00
|
|
|
|
2013-05-20 11:39:54 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupMemoryCgroup(virDomainObj *vm)
|
2013-05-20 11:39:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2013-05-20 11:39:54 +00:00
|
|
|
|
2013-11-19 22:45:43 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_MEMORY)) {
|
2015-03-02 19:04:12 +00:00
|
|
|
if (virMemoryLimitIsSet(vm->def->mem.hard_limit) ||
|
|
|
|
virMemoryLimitIsSet(vm->def->mem.soft_limit) ||
|
|
|
|
virMemoryLimitIsSet(vm->def->mem.swap_hard_limit)) {
|
2013-05-20 11:39:54 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Memory cgroup is not available on this host"));
|
|
|
|
return -1;
|
2013-05-21 15:02:36 +00:00
|
|
|
} else {
|
|
|
|
return 0;
|
2013-05-20 11:39:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-17 21:29:12 +00:00
|
|
|
return virDomainCgroupSetupMemtune(priv->cgroup, vm->def->mem);
|
2013-05-20 11:39:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-16 15:26:01 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupFirmwareCgroup(virDomainObj *vm)
|
2016-02-16 15:26:01 +00:00
|
|
|
{
|
|
|
|
if (!vm->def->os.loader)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (vm->def->os.loader->path &&
|
|
|
|
qemuSetupImagePathCgroup(vm, vm->def->os.loader->path,
|
|
|
|
vm->def->os.loader->readonly == VIR_TRISTATE_BOOL_YES) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (vm->def->os.loader->nvram &&
|
|
|
|
qemuSetupImagePathCgroup(vm, vm->def->os.loader->nvram, false) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-11-18 10:17:51 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupRNGCgroup(virDomainObj *vm,
|
|
|
|
virDomainRNGDef *rng)
|
2016-11-18 10:17:51 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-11-18 10:17:51 +00:00
|
|
|
int rv;
|
|
|
|
|
2017-02-22 14:20:15 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2016-11-18 10:17:51 +00:00
|
|
|
if (rng->backend == VIR_DOMAIN_RNG_BACKEND_RANDOM) {
|
|
|
|
VIR_DEBUG("Setting Cgroup ACL for RNG device");
|
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup,
|
|
|
|
rng->source.file,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow",
|
|
|
|
rng->source.file,
|
2017-12-28 17:41:28 +00:00
|
|
|
"rw", rv);
|
2016-11-18 10:17:51 +00:00
|
|
|
if (rv < 0 &&
|
|
|
|
!virLastErrorIsSystemErrno(ENOENT))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownRNGCgroup(virDomainObj *vm,
|
|
|
|
virDomainRNGDef *rng)
|
2016-11-18 10:17:51 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-11-18 10:17:51 +00:00
|
|
|
int rv;
|
|
|
|
|
2017-02-22 14:20:15 +00:00
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2016-11-18 10:17:51 +00:00
|
|
|
if (rng->backend == VIR_DOMAIN_RNG_BACKEND_RANDOM) {
|
|
|
|
VIR_DEBUG("Tearing down Cgroup ACL for RNG device");
|
|
|
|
rv = virCgroupDenyDevicePath(priv->cgroup,
|
|
|
|
rng->source.file,
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny",
|
|
|
|
rng->source.file,
|
2017-12-28 17:41:28 +00:00
|
|
|
"rw", rv);
|
2016-11-18 10:17:51 +00:00
|
|
|
if (rv < 0 &&
|
|
|
|
!virLastErrorIsSystemErrno(ENOENT))
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-11-18 10:45:44 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupChardevCgroup(virDomainObj *vm,
|
|
|
|
virDomainChrDef *dev)
|
2016-11-18 10:45:44 +00:00
|
|
|
{
|
|
|
|
return qemuSetupChrSourceCgroup(vm, dev->source);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuTeardownChardevCgroup(virDomainObj *vm,
|
|
|
|
virDomainChrDef *dev)
|
2016-11-18 10:45:44 +00:00
|
|
|
{
|
|
|
|
return qemuTeardownChrSourceCgroup(vm, dev->source);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-21 13:50:11 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupSEVCgroup(virDomainObj *vm)
|
2019-01-21 13:50:11 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2019-01-21 13:50:11 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
ret = virCgroupAllowDevicePath(priv->cgroup, "/dev/sev",
|
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", "/dev/sev",
|
|
|
|
"rw", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-17 11:59:33 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupDevicesCgroup(virDomainObj *vm)
|
2013-05-17 11:59:33 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
|
2013-05-17 11:59:33 +00:00
|
|
|
const char *const *deviceACL = NULL;
|
2013-07-08 10:08:46 +00:00
|
|
|
int rv = -1;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2013-05-17 11:59:33 +00:00
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2013-07-08 10:08:46 +00:00
|
|
|
rv = virCgroupDenyAllDevices(priv->cgroup);
|
|
|
|
virDomainAuditCgroup(vm, priv->cgroup, "deny", "all", rv == 0);
|
|
|
|
if (rv < 0) {
|
|
|
|
if (virLastErrorIsSystemErrno(EPERM)) {
|
|
|
|
virResetLastError();
|
2020-06-16 10:24:48 +00:00
|
|
|
VIR_WARN("Group devices ACL is not accessible, disabling filtering");
|
2013-05-17 11:59:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
}
|
|
|
|
|
2016-02-16 15:26:01 +00:00
|
|
|
if (qemuSetupFirmwareCgroup(vm) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2016-02-16 15:26:01 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
2019-01-16 14:49:07 +00:00
|
|
|
if (qemuSetupImageChainCgroup(vm, vm->def->disks[i]->src) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
}
|
|
|
|
|
2016-02-16 12:57:10 +00:00
|
|
|
rv = virCgroupAllowDevice(priv->cgroup, 'c', DEVICE_PTY_MAJOR, -1,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
2013-05-17 11:59:33 +00:00
|
|
|
virDomainAuditCgroupMajor(vm, priv->cgroup, "allow", DEVICE_PTY_MAJOR,
|
2013-07-08 10:08:46 +00:00
|
|
|
"pty", "rw", rv == 0);
|
|
|
|
if (rv < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
|
|
|
|
deviceACL = cfg->cgroupDeviceACL ?
|
|
|
|
(const char *const *)cfg->cgroupDeviceACL :
|
|
|
|
defaultDeviceACL;
|
|
|
|
|
|
|
|
if (vm->def->nsounds &&
|
2013-08-17 19:30:47 +00:00
|
|
|
((!vm->def->ngraphics && cfg->nogfxAllowHostAudio) ||
|
2013-10-01 11:55:19 +00:00
|
|
|
(vm->def->graphics &&
|
|
|
|
((vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
|
2013-05-17 11:59:33 +00:00
|
|
|
cfg->vncAllowHostAudio) ||
|
2013-10-01 11:55:19 +00:00
|
|
|
(vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SDL))))) {
|
2016-02-16 12:57:10 +00:00
|
|
|
rv = virCgroupAllowDevice(priv->cgroup, 'c', DEVICE_SND_MAJOR, -1,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
2013-05-17 11:59:33 +00:00
|
|
|
virDomainAuditCgroupMajor(vm, priv->cgroup, "allow", DEVICE_SND_MAJOR,
|
2013-07-08 10:08:46 +00:00
|
|
|
"sound", "rw", rv == 0);
|
|
|
|
if (rv < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
}
|
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; deviceACL[i] != NULL; i++) {
|
2013-09-13 13:32:43 +00:00
|
|
|
if (!virFileExists(deviceACL[i])) {
|
2014-04-20 20:07:46 +00:00
|
|
|
VIR_DEBUG("Ignoring non-existent device %s", deviceACL[i]);
|
2013-05-17 11:59:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-07-08 10:08:46 +00:00
|
|
|
rv = virCgroupAllowDevicePath(priv->cgroup, deviceACL[i],
|
2016-02-16 13:43:41 +00:00
|
|
|
VIR_CGROUP_DEVICE_RW, false);
|
2017-12-28 17:41:28 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", deviceACL[i], "rw", rv);
|
2013-07-08 10:08:46 +00:00
|
|
|
if (rv < 0 &&
|
|
|
|
!virLastErrorIsSystemErrno(ENOENT))
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainChrDefForeach(vm->def,
|
|
|
|
true,
|
2016-11-18 10:45:44 +00:00
|
|
|
qemuSetupChardevCgroupCB,
|
2013-05-17 11:59:33 +00:00
|
|
|
vm) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
|
2020-06-10 18:11:47 +00:00
|
|
|
for (i = 0; i < vm->def->ntpms; i++) {
|
|
|
|
if (qemuSetupTPMCgroup(vm, vm->def->tpms[i]) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2013-05-17 11:59:33 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++) {
|
2019-09-17 08:06:26 +00:00
|
|
|
/* This may allow /dev/vfio/vfio multiple times, but that
|
|
|
|
* is not a problem. Kernel will have only one record. */
|
2015-11-19 13:35:46 +00:00
|
|
|
if (qemuSetupHostdevCgroup(vm, vm->def->hostdevs[i]) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-05-17 11:59:33 +00:00
|
|
|
}
|
|
|
|
|
2017-02-22 15:33:12 +00:00
|
|
|
for (i = 0; i < vm->def->nmems; i++) {
|
|
|
|
if (qemuSetupMemoryDevicesCgroup(vm, vm->def->mems[i]) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2017-02-22 15:33:12 +00:00
|
|
|
}
|
|
|
|
|
2017-02-09 16:53:53 +00:00
|
|
|
for (i = 0; i < vm->def->ngraphics; i++) {
|
|
|
|
if (qemuSetupGraphicsCgroup(vm, vm->def->graphics[i]) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2019-09-23 10:44:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nvideos; i++) {
|
|
|
|
if (qemuSetupVideoCgroup(vm, vm->def->videos[i]) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2017-02-09 16:53:53 +00:00
|
|
|
}
|
|
|
|
|
2015-11-19 13:32:22 +00:00
|
|
|
for (i = 0; i < vm->def->ninputs; i++) {
|
|
|
|
if (qemuSetupInputCgroup(vm, vm->def->inputs[i]) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2015-11-19 13:32:22 +00:00
|
|
|
}
|
|
|
|
|
2013-02-25 22:31:11 +00:00
|
|
|
for (i = 0; i < vm->def->nrngs; i++) {
|
2016-11-18 10:17:51 +00:00
|
|
|
if (qemuSetupRNGCgroup(vm, vm->def->rngs[i]) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2014-01-16 13:41:17 +00:00
|
|
|
}
|
|
|
|
|
2019-01-21 13:50:11 +00:00
|
|
|
if (vm->def->sev && qemuSetupSEVCgroup(vm) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2019-01-21 13:50:11 +00:00
|
|
|
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-05-17 11:59:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-07-08 07:53:06 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupCpusetCgroup(virDomainObj *vm)
|
2014-07-08 07:53:06 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2014-07-08 07:53:06 +00:00
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
|
|
|
return 0;
|
|
|
|
|
2015-03-11 10:17:15 +00:00
|
|
|
if (virCgroupSetCpusetMemoryMigrate(priv->cgroup, true) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2015-03-27 09:23:19 +00:00
|
|
|
return 0;
|
2013-05-17 11:59:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-24 09:08:27 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupCpuCgroup(virDomainObj *vm)
|
2013-05-24 09:08:27 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2013-05-24 09:08:27 +00:00
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
2014-03-04 11:39:46 +00:00
|
|
|
if (vm->def->cputune.sharesSpecified) {
|
2013-05-24 09:08:27 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("CPU tuning is not available on this host"));
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-04 12:56:24 +00:00
|
|
|
if (vm->def->cputune.sharesSpecified) {
|
2021-03-03 13:26:53 +00:00
|
|
|
if (virCgroupSetCpuShares(priv->cgroup, vm->def->cputune.shares) < 0)
|
2014-03-04 12:56:24 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2013-05-24 09:08:27 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-22 12:59:28 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuInitCgroup(virDomainObj *vm,
|
2015-01-16 11:25:50 +00:00
|
|
|
size_t nnicindexes,
|
|
|
|
int *nicindexes)
|
2013-03-21 14:40:29 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2020-03-31 15:42:43 +00:00
|
|
|
if (!priv->driver->privileged)
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-04-04 11:10:55 +00:00
|
|
|
|
2013-07-04 15:49:24 +00:00
|
|
|
if (!virCgroupAvailable())
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-07-04 15:49:24 +00:00
|
|
|
|
2020-09-22 12:07:27 +00:00
|
|
|
virCgroupFree(priv->cgroup);
|
|
|
|
priv->cgroup = NULL;
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2013-07-22 12:59:28 +00:00
|
|
|
if (!vm->def->resource) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainResourceDef *res;
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
|
2020-10-05 10:28:26 +00:00
|
|
|
res = g_new0(virDomainResourceDef, 1);
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
res->partition = g_strdup("/machine");
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
|
|
|
|
vm->def->resource = res;
|
2013-03-21 14:40:29 +00:00
|
|
|
}
|
|
|
|
|
2021-04-20 04:44:12 +00:00
|
|
|
if (!g_path_is_absolute(vm->def->resource->partition)) {
|
2013-07-22 12:59:28 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("Resource partition '%s' must start with '/'"),
|
|
|
|
vm->def->resource->partition);
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-07-22 12:59:28 +00:00
|
|
|
}
|
2013-07-22 15:44:52 +00:00
|
|
|
|
2016-02-01 15:50:54 +00:00
|
|
|
if (virCgroupNewMachine(priv->machineName,
|
2013-07-22 15:44:52 +00:00
|
|
|
"qemu",
|
|
|
|
vm->def->uuid,
|
|
|
|
NULL,
|
|
|
|
vm->pid,
|
|
|
|
false,
|
2015-01-16 11:25:50 +00:00
|
|
|
nnicindexes, nicindexes,
|
2013-07-22 15:44:52 +00:00
|
|
|
vm->def->resource->partition,
|
|
|
|
cfg->cgroupControllers,
|
2019-05-22 23:12:14 +00:00
|
|
|
cfg->maxThreadsPerProc,
|
2013-07-22 15:44:52 +00:00
|
|
|
&priv->cgroup) < 0) {
|
2013-07-22 12:59:28 +00:00
|
|
|
if (virCgroupNewIgnoreError())
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-07-22 12:59:28 +00:00
|
|
|
}
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-07-22 12:59:28 +00:00
|
|
|
}
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
|
2020-09-22 12:05:50 +00:00
|
|
|
static int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuRestoreCgroupThread(virCgroup *cgroup,
|
2020-09-22 12:05:50 +00:00
|
|
|
virCgroupThreadName thread,
|
|
|
|
int id)
|
|
|
|
{
|
2020-10-09 14:01:07 +00:00
|
|
|
g_autoptr(virCgroup) cgroup_temp = NULL;
|
2020-09-22 12:05:50 +00:00
|
|
|
g_autofree char *nodeset = NULL;
|
|
|
|
|
|
|
|
if (virCgroupNewThread(cgroup, thread, id, false, &cgroup_temp) < 0)
|
2020-10-09 14:01:07 +00:00
|
|
|
return -1;
|
2020-09-22 12:05:50 +00:00
|
|
|
|
|
|
|
if (virCgroupSetCpusetMemoryMigrate(cgroup_temp, true) < 0)
|
2020-10-09 14:01:07 +00:00
|
|
|
return -1;
|
2020-09-22 12:05:50 +00:00
|
|
|
|
|
|
|
if (virCgroupGetCpusetMems(cgroup_temp, &nodeset) < 0)
|
2020-10-09 14:01:07 +00:00
|
|
|
return -1;
|
2020-09-22 12:05:50 +00:00
|
|
|
|
|
|
|
if (virCgroupSetCpusetMems(cgroup_temp, nodeset) < 0)
|
2020-10-09 14:01:07 +00:00
|
|
|
return -1;
|
2020-09-22 12:05:50 +00:00
|
|
|
|
2020-10-09 14:01:07 +00:00
|
|
|
return 0;
|
2020-09-22 12:05:50 +00:00
|
|
|
}
|
|
|
|
|
2014-12-12 14:29:48 +00:00
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuRestoreCgroupState(virDomainObj *vm)
|
2014-12-12 14:29:48 +00:00
|
|
|
{
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autofree char *mem_mask = NULL;
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2015-03-11 10:17:15 +00:00
|
|
|
size_t i = 0;
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autoptr(virBitmap) all_nodes = NULL;
|
2014-12-12 14:29:48 +00:00
|
|
|
|
2016-11-24 12:28:00 +00:00
|
|
|
if (!virNumaIsAvailable() ||
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
|
|
|
return;
|
|
|
|
|
2016-09-13 13:55:06 +00:00
|
|
|
if (!(all_nodes = virNumaGetHostMemoryNodeset()))
|
2014-12-12 14:29:48 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (!(mem_mask = virBitmapFormat(all_nodes)))
|
|
|
|
goto error;
|
|
|
|
|
2020-10-13 11:24:32 +00:00
|
|
|
if (virCgroupHasEmptyTasks(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_CPUSET) <= 0)
|
2014-12-12 14:29:48 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (virCgroupSetCpusetMems(priv->cgroup, mem_mask) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2015-11-20 15:10:04 +00:00
|
|
|
for (i = 0; i < virDomainDefGetVcpusMax(vm->def); i++) {
|
2021-03-11 07:16:13 +00:00
|
|
|
virDomainVcpuDef *vcpu = virDomainDefGetVcpu(vm->def, i);
|
2015-11-20 15:10:04 +00:00
|
|
|
|
|
|
|
if (!vcpu->online)
|
|
|
|
continue;
|
|
|
|
|
2020-09-22 12:05:50 +00:00
|
|
|
if (qemuRestoreCgroupThread(priv->cgroup,
|
|
|
|
VIR_CGROUP_THREAD_VCPU, i) < 0)
|
|
|
|
return;
|
2015-03-11 10:17:15 +00:00
|
|
|
}
|
|
|
|
|
2015-04-10 13:21:23 +00:00
|
|
|
for (i = 0; i < vm->def->niothreadids; i++) {
|
2020-09-22 12:05:50 +00:00
|
|
|
if (qemuRestoreCgroupThread(priv->cgroup, VIR_CGROUP_THREAD_IOTHREAD,
|
|
|
|
vm->def->iothreadids[i]->iothread_id) < 0)
|
|
|
|
return;
|
2015-03-11 10:17:15 +00:00
|
|
|
}
|
|
|
|
|
2020-09-22 12:05:50 +00:00
|
|
|
if (qemuRestoreCgroupThread(priv->cgroup,
|
|
|
|
VIR_CGROUP_THREAD_EMULATOR, 0) < 0)
|
|
|
|
return;
|
2015-03-11 10:17:15 +00:00
|
|
|
|
2014-12-12 14:29:48 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
virResetLastError();
|
|
|
|
VIR_DEBUG("Couldn't restore cgroups to meaningful state");
|
2020-09-22 12:05:50 +00:00
|
|
|
return;
|
2014-12-12 14:29:48 +00:00
|
|
|
}
|
2013-07-22 12:59:28 +00:00
|
|
|
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuConnectCgroup(virDomainObj *vm)
|
2013-07-22 12:59:28 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
|
2013-07-22 12:59:28 +00:00
|
|
|
|
2020-03-31 15:42:43 +00:00
|
|
|
if (!priv->driver->privileged)
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-07-22 12:59:28 +00:00
|
|
|
|
|
|
|
if (!virCgroupAvailable())
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-07-22 12:59:28 +00:00
|
|
|
|
2020-09-22 12:07:27 +00:00
|
|
|
virCgroupFree(priv->cgroup);
|
|
|
|
priv->cgroup = NULL;
|
2013-07-22 12:59:28 +00:00
|
|
|
|
2013-07-24 16:36:42 +00:00
|
|
|
if (virCgroupNewDetectMachine(vm->def->name,
|
|
|
|
"qemu",
|
|
|
|
vm->pid,
|
2013-07-24 16:31:25 +00:00
|
|
|
cfg->cgroupControllers,
|
2017-07-21 13:51:03 +00:00
|
|
|
priv->machineName,
|
2013-07-24 16:36:42 +00:00
|
|
|
&priv->cgroup) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2014-12-12 14:29:48 +00:00
|
|
|
qemuRestoreCgroupState(vm);
|
2020-03-31 15:44:07 +00:00
|
|
|
return 0;
|
2013-03-21 14:40:29 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 09:29:27 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupCgroup(virDomainObj *vm,
|
2015-01-16 11:25:50 +00:00
|
|
|
size_t nnicindexes,
|
|
|
|
int *nicindexes)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-07-22 14:21:15 +00:00
|
|
|
if (!vm->pid) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Cannot setup cgroups until process is started"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-07-25 15:49:43 +00:00
|
|
|
if (qemuInitCgroup(vm, nnicindexes, nicindexes) < 0)
|
2013-03-21 14:40:29 +00:00
|
|
|
return -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (!priv->cgroup)
|
2013-05-24 09:08:27 +00:00
|
|
|
return 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2017-07-25 15:49:43 +00:00
|
|
|
if (qemuSetupDevicesCgroup(vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-05-17 11:59:31 +00:00
|
|
|
if (qemuSetupBlkioCgroup(vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2011-02-08 06:59:38 +00:00
|
|
|
|
2013-05-20 11:39:54 +00:00
|
|
|
if (qemuSetupMemoryCgroup(vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2017-07-25 15:49:43 +00:00
|
|
|
if (qemuSetupCpuCgroup(vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2011-03-29 13:41:25 +00:00
|
|
|
|
2015-03-27 09:11:00 +00:00
|
|
|
if (qemuSetupCpusetCgroup(vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2013-07-18 09:29:27 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupCgroupVcpuBW(virCgroup *cgroup,
|
2013-07-18 09:29:27 +00:00
|
|
|
unsigned long long period,
|
|
|
|
long long quota)
|
2011-07-21 02:10:31 +00:00
|
|
|
{
|
2020-02-17 21:29:15 +00:00
|
|
|
return virCgroupSetupCpuPeriodQuota(cgroup, period, quota);
|
2011-07-21 02:10:31 +00:00
|
|
|
}
|
|
|
|
|
2014-09-03 13:05:02 +00:00
|
|
|
|
2013-07-18 09:29:27 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupCgroupCpusetCpus(virCgroup *cgroup,
|
|
|
|
virBitmap *cpumask)
|
2012-08-21 09:18:33 +00:00
|
|
|
{
|
2020-02-17 21:29:13 +00:00
|
|
|
return virCgroupSetupCpusetCpus(cgroup, cpumask);
|
2012-08-21 09:18:30 +00:00
|
|
|
}
|
|
|
|
|
2011-07-21 02:10:31 +00:00
|
|
|
|
2018-04-05 19:06:55 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupCgroupForExtDevices(virDomainObj *vm,
|
|
|
|
virQEMUDriver *driver)
|
2018-04-05 19:06:55 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2020-10-09 14:01:07 +00:00
|
|
|
g_autoptr(virCgroup) cgroup_temp = NULL;
|
2018-04-05 19:06:55 +00:00
|
|
|
|
|
|
|
if (!qemuExtDevicesHasDevice(vm->def) ||
|
|
|
|
priv->cgroup == NULL)
|
|
|
|
return 0; /* Not supported, so claim success */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If CPU cgroup controller is not initialized here, then we need
|
|
|
|
* neither period nor quota settings. And if CPUSET controller is
|
|
|
|
* not initialized either, then there's nothing to do anyway.
|
|
|
|
*/
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) &&
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
|
|
|
|
false, &cgroup_temp) < 0)
|
2020-10-09 14:01:07 +00:00
|
|
|
return -1;
|
2018-04-05 19:06:55 +00:00
|
|
|
|
2020-10-09 14:01:07 +00:00
|
|
|
return qemuExtDevicesSetupCgroup(driver, vm, cgroup_temp);
|
2018-04-05 19:06:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-02-16 13:43:37 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuSetupGlobalCpuCgroup(virDomainObj *vm)
|
2016-02-16 13:43:37 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2016-02-16 13:43:37 +00:00
|
|
|
unsigned long long period = vm->def->cputune.global_period;
|
|
|
|
long long quota = vm->def->cputune.global_quota;
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autofree char *mem_mask = NULL;
|
2016-02-16 13:43:37 +00:00
|
|
|
virDomainNumatuneMemMode mem_mode;
|
|
|
|
|
|
|
|
if ((period || quota) &&
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If CPU cgroup controller is not initialized here, then we need
|
|
|
|
* neither period nor quota settings. And if CPUSET controller is
|
|
|
|
* not initialized either, then there's nothing to do anyway.
|
|
|
|
*/
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) &&
|
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
|
|
if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 &&
|
|
|
|
mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
|
|
|
|
virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
|
|
|
|
priv->autoNodeset,
|
|
|
|
&mem_mask, -1) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2016-02-16 13:43:37 +00:00
|
|
|
|
|
|
|
if (period || quota) {
|
|
|
|
if (qemuSetupCgroupVcpuBW(priv->cgroup, period, quota) < 0)
|
2020-03-31 15:44:07 +00:00
|
|
|
return -1;
|
2016-02-16 13:43:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-18 09:29:27 +00:00
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuRemoveCgroup(virDomainObj *vm)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuDomainObjPrivate *priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (priv->cgroup == NULL)
|
2010-12-16 16:10:54 +00:00
|
|
|
return 0; /* Not supported, so claim success */
|
|
|
|
|
2016-02-01 15:50:54 +00:00
|
|
|
if (virCgroupTerminateMachine(priv->machineName) < 0) {
|
2014-09-25 11:32:58 +00:00
|
|
|
if (!virCgroupNewIgnoreError())
|
|
|
|
VIR_DEBUG("Failed to terminate cgroup for %s", vm->def->name);
|
|
|
|
}
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
return virCgroupRemove(priv->cgroup);
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
2016-09-07 11:11:59 +00:00
|
|
|
|
|
|
|
|
|
|
|
static void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuCgroupEmulatorAllNodesDataFree(qemuCgroupEmulatorAllNodesData *data)
|
2016-09-07 11:11:59 +00:00
|
|
|
{
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
2020-09-22 12:07:27 +00:00
|
|
|
virCgroupFree(data->emulatorCgroup);
|
2021-02-03 19:36:01 +00:00
|
|
|
g_free(data->emulatorMemMask);
|
|
|
|
g_free(data);
|
2016-09-07 11:11:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuCgroupEmulatorAllNodesAllow:
|
|
|
|
* @cgroup: domain cgroup pointer
|
|
|
|
* @retData: filled with structure used to roll back the operation
|
|
|
|
*
|
|
|
|
* Allows all NUMA nodes for the qemu emulator thread temporarily. This is
|
|
|
|
* necessary when hotplugging cpus since it requires memory allocated in the
|
|
|
|
* DMA region. Afterwards the operation can be reverted by
|
2016-09-13 10:24:02 +00:00
|
|
|
* qemuCgroupEmulatorAllNodesRestore.
|
2016-09-07 11:11:59 +00:00
|
|
|
*
|
|
|
|
* Returns 0 on success -1 on error
|
|
|
|
*/
|
|
|
|
int
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuCgroupEmulatorAllNodesAllow(virCgroup *cgroup,
|
|
|
|
qemuCgroupEmulatorAllNodesData **retData)
|
2016-09-07 11:11:59 +00:00
|
|
|
{
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuCgroupEmulatorAllNodesData *data = NULL;
|
2020-03-31 15:44:07 +00:00
|
|
|
g_autofree char *all_nodes_str = NULL;
|
|
|
|
g_autoptr(virBitmap) all_nodes = NULL;
|
2016-09-07 11:11:59 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!virNumaIsAvailable() ||
|
|
|
|
!virCgroupHasController(cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
|
|
|
return 0;
|
|
|
|
|
2016-09-13 13:55:06 +00:00
|
|
|
if (!(all_nodes = virNumaGetHostMemoryNodeset()))
|
2016-09-07 11:11:59 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(all_nodes_str = virBitmapFormat(all_nodes)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2020-10-05 10:28:26 +00:00
|
|
|
data = g_new0(qemuCgroupEmulatorAllNodesData, 1);
|
2016-09-07 11:11:59 +00:00
|
|
|
|
|
|
|
if (virCgroupNewThread(cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
|
|
|
|
false, &data->emulatorCgroup) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virCgroupGetCpusetMems(data->emulatorCgroup, &data->emulatorMemMask) < 0 ||
|
|
|
|
virCgroupSetCpusetMems(data->emulatorCgroup, all_nodes_str) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
*retData = g_steal_pointer(&data);
|
2016-09-07 11:11:59 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuCgroupEmulatorAllNodesDataFree(data);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2016-09-13 10:24:02 +00:00
|
|
|
* qemuCgroupEmulatorAllNodesRestore:
|
2016-09-07 11:11:59 +00:00
|
|
|
* @data: data structure created by qemuCgroupEmulatorAllNodesAllow
|
|
|
|
*
|
|
|
|
* Rolls back the setting done by qemuCgroupEmulatorAllNodesAllow and frees the
|
|
|
|
* associated data.
|
|
|
|
*/
|
|
|
|
void
|
2021-03-11 07:16:13 +00:00
|
|
|
qemuCgroupEmulatorAllNodesRestore(qemuCgroupEmulatorAllNodesData *data)
|
2016-09-07 11:11:59 +00:00
|
|
|
{
|
|
|
|
virErrorPtr err;
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return;
|
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&err);
|
2016-09-07 11:11:59 +00:00
|
|
|
virCgroupSetCpusetMems(data->emulatorCgroup, data->emulatorMemMask);
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&err);
|
2016-09-07 11:11:59 +00:00
|
|
|
|
|
|
|
qemuCgroupEmulatorAllNodesDataFree(data);
|
|
|
|
}
|