2010-12-16 16:10:54 +00:00
|
|
|
/*
|
|
|
|
* qemu_cgroup.c: QEMU cgroup management
|
|
|
|
*
|
2013-01-09 23:39:18 +00:00
|
|
|
* Copyright (C) 2006-2013 Red Hat, Inc.
|
2010-12-16 16:10:54 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2010-12-16 16:10:54 +00:00
|
|
|
*
|
|
|
|
* Author: Daniel P. Berrange <berrange@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include "qemu_cgroup.h"
|
2011-07-21 02:10:31 +00:00
|
|
|
#include "qemu_domain.h"
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
#include "qemu_process.h"
|
2012-12-03 15:03:47 +00:00
|
|
|
#include "vircgroup.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2013-05-03 18:07:27 +00:00
|
|
|
#include "virscsi.h"
|
2010-12-16 16:10:54 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
|
|
|
static const char *const defaultDeviceACL[] = {
|
|
|
|
"/dev/null", "/dev/full", "/dev/zero",
|
|
|
|
"/dev/random", "/dev/urandom",
|
|
|
|
"/dev/ptmx", "/dev/kvm", "/dev/kqemu",
|
2013-04-29 17:15:26 +00:00
|
|
|
"/dev/rtc", "/dev/hpet", "/dev/vfio/vfio",
|
2010-12-16 16:10:54 +00:00
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
#define DEVICE_PTY_MAJOR 136
|
|
|
|
#define DEVICE_SND_MAJOR 116
|
|
|
|
|
2011-02-16 02:18:40 +00:00
|
|
|
static int
|
2011-03-09 03:13:18 +00:00
|
|
|
qemuSetupDiskPathAllow(virDomainDiskDefPtr disk,
|
2011-02-16 02:18:40 +00:00
|
|
|
const char *path,
|
|
|
|
size_t depth ATTRIBUTE_UNUSED,
|
|
|
|
void *opaque)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
VIR_DEBUG("Process path %s for disk", path);
|
2013-03-21 14:40:29 +00:00
|
|
|
rc = virCgroupAllowDevicePath(priv->cgroup, path,
|
2011-03-09 03:13:18 +00:00
|
|
|
(disk->readonly ? VIR_CGROUP_DEVICE_READ
|
|
|
|
: VIR_CGROUP_DEVICE_RW));
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path,
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
disk->readonly ? "r" : "rw", rc);
|
2011-02-17 00:05:54 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
if (rc == -EACCES) { /* Get this for root squash NFS */
|
2010-12-16 16:10:54 +00:00
|
|
|
VIR_DEBUG("Ignoring EACCES for %s", path);
|
|
|
|
} else {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to allow access for disk path %s"),
|
|
|
|
path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-17 20:22:24 +00:00
|
|
|
int qemuSetupDiskCgroup(virDomainObjPtr vm,
|
2010-12-16 16:10:54 +00:00
|
|
|
virDomainDiskDefPtr disk)
|
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2010-12-16 16:10:54 +00:00
|
|
|
return virDomainDiskDefForeachPath(disk,
|
|
|
|
true,
|
|
|
|
qemuSetupDiskPathAllow,
|
2013-03-21 14:40:29 +00:00
|
|
|
vm);
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-16 02:18:40 +00:00
|
|
|
static int
|
|
|
|
qemuTeardownDiskPathDeny(virDomainDiskDefPtr disk ATTRIBUTE_UNUSED,
|
|
|
|
const char *path,
|
|
|
|
size_t depth ATTRIBUTE_UNUSED,
|
|
|
|
void *opaque)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
VIR_DEBUG("Process path %s for disk", path);
|
2013-03-21 14:40:29 +00:00
|
|
|
rc = virCgroupDenyDevicePath(priv->cgroup, path,
|
2011-03-09 03:13:18 +00:00
|
|
|
VIR_CGROUP_DEVICE_RWM);
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "deny", path, "rwm", rc);
|
2011-02-17 00:05:54 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
if (rc == -EACCES) { /* Get this for root squash NFS */
|
2010-12-16 16:10:54 +00:00
|
|
|
VIR_DEBUG("Ignoring EACCES for %s", path);
|
|
|
|
} else {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to deny access for disk path %s"),
|
|
|
|
path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-17 20:22:24 +00:00
|
|
|
int qemuTeardownDiskCgroup(virDomainObjPtr vm,
|
2010-12-16 16:10:54 +00:00
|
|
|
virDomainDiskDefPtr disk)
|
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
2010-12-16 16:10:54 +00:00
|
|
|
return virDomainDiskDefForeachPath(disk,
|
|
|
|
true,
|
|
|
|
qemuTeardownDiskPathDeny,
|
2013-03-21 14:40:29 +00:00
|
|
|
vm);
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2011-02-16 02:18:40 +00:00
|
|
|
static int
|
2013-04-12 20:55:46 +00:00
|
|
|
qemuSetupChrSourceCgroup(virDomainDefPtr def,
|
|
|
|
virDomainChrSourceDefPtr dev,
|
2013-03-21 14:40:29 +00:00
|
|
|
void *opaque)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
int rc;
|
|
|
|
|
2013-04-12 20:55:46 +00:00
|
|
|
if (dev->type != VIR_DOMAIN_CHR_TYPE_DEV)
|
2010-12-16 16:10:54 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-04-12 20:55:46 +00:00
|
|
|
VIR_DEBUG("Process path '%s' for device", dev->data.file.path);
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
rc = virCgroupAllowDevicePath(priv->cgroup, dev->data.file.path,
|
2011-03-09 03:13:18 +00:00
|
|
|
VIR_CGROUP_DEVICE_RW);
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow",
|
2013-04-12 20:55:46 +00:00
|
|
|
dev->data.file.path, "rw", rc);
|
2011-02-17 00:05:54 +00:00
|
|
|
if (rc < 0) {
|
2010-12-16 16:10:54 +00:00
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to allow device %s for %s"),
|
2013-04-12 20:55:46 +00:00
|
|
|
dev->data.file.path, def->name);
|
2010-12-16 16:10:54 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-12 20:55:46 +00:00
|
|
|
static int
|
|
|
|
qemuSetupChardevCgroup(virDomainDefPtr def,
|
|
|
|
virDomainChrDefPtr dev,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
return qemuSetupChrSourceCgroup(def, &dev->source, opaque);
|
2013-04-12 20:55:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuSetupTPMCgroup(virDomainDefPtr def,
|
|
|
|
virDomainTPMDefPtr dev,
|
2013-03-21 14:40:29 +00:00
|
|
|
void *opaque)
|
2013-04-12 20:55:46 +00:00
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_TPM_TYPE_PASSTHROUGH:
|
|
|
|
rc = qemuSetupChrSourceCgroup(def, &dev->data.passthrough.source,
|
2013-03-21 14:40:29 +00:00
|
|
|
opaque);
|
2013-04-12 20:55:46 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_TPM_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-04-29 20:19:19 +00:00
|
|
|
static int
|
|
|
|
qemuSetupHostUsbDeviceCgroup(virUSBDevicePtr dev ATTRIBUTE_UNUSED,
|
|
|
|
const char *path,
|
|
|
|
void *opaque)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
VIR_DEBUG("Process path '%s' for USB device", path);
|
2013-03-21 14:40:29 +00:00
|
|
|
rc = virCgroupAllowDevicePath(priv->cgroup, path,
|
2011-03-09 03:13:18 +00:00
|
|
|
VIR_CGROUP_DEVICE_RW);
|
2013-03-21 14:40:29 +00:00
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path, "rw", rc);
|
2011-02-17 00:05:54 +00:00
|
|
|
if (rc < 0) {
|
2010-12-16 16:10:54 +00:00
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to allow device %s"),
|
|
|
|
path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-03 18:07:27 +00:00
|
|
|
static int
|
|
|
|
qemuSetupHostScsiDeviceCgroup(virSCSIDevicePtr dev ATTRIBUTE_UNUSED,
|
|
|
|
const char *path,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
VIR_DEBUG("Process path '%s' for SCSI device", path);
|
|
|
|
|
|
|
|
rc = virCgroupAllowDevicePath(priv->cgroup, path,
|
|
|
|
virSCSIDeviceGetReadonly(dev) ?
|
|
|
|
VIR_CGROUP_DEVICE_READ :
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", path,
|
|
|
|
virSCSIDeviceGetReadonly(dev) ? "r" : "rw", rc);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to allow device %s"),
|
|
|
|
path);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2013-03-21 14:40:29 +00:00
|
|
|
|
2013-04-29 17:15:26 +00:00
|
|
|
int
|
|
|
|
qemuSetupHostdevCGroup(virDomainObjPtr vm,
|
|
|
|
virDomainHostdevDefPtr dev)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virPCIDevicePtr pci = NULL;
|
2013-04-29 20:19:19 +00:00
|
|
|
virUSBDevicePtr usb = NULL;
|
2013-05-03 18:07:27 +00:00
|
|
|
virSCSIDevicePtr scsi = NULL;
|
2013-04-29 17:15:26 +00:00
|
|
|
char *path = NULL;
|
|
|
|
|
|
|
|
/* currently this only does something for PCI devices using vfio
|
|
|
|
* for device assignment, but it is called for *all* hostdev
|
|
|
|
* devices.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (dev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS) {
|
|
|
|
|
|
|
|
switch (dev->source.subsys.type) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
|
|
|
|
if (dev->source.subsys.u.pci.backend
|
2013-05-03 18:30:55 +00:00
|
|
|
== VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO) {
|
2013-04-29 17:15:26 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
pci = virPCIDeviceNew(dev->source.subsys.u.pci.addr.domain,
|
|
|
|
dev->source.subsys.u.pci.addr.bus,
|
|
|
|
dev->source.subsys.u.pci.addr.slot,
|
|
|
|
dev->source.subsys.u.pci.addr.function);
|
|
|
|
if (!pci)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(path = virPCIDeviceGetVFIOGroupDev(pci)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Cgroup allow %s for PCI device assignment", path);
|
|
|
|
rc = virCgroupAllowDevicePath(priv->cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup,
|
|
|
|
"allow", path, "rw", rc);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to allow access "
|
|
|
|
"for device path %s"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2013-04-29 20:19:19 +00:00
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
|
|
|
|
/* NB: hostdev->missing wasn't previously checked in the
|
|
|
|
* case of hotplug, only when starting a domain. Now it is
|
|
|
|
* always checked, and the cgroup setup skipped if true.
|
|
|
|
*/
|
|
|
|
if (dev->missing)
|
|
|
|
break;
|
|
|
|
if ((usb = virUSBDeviceNew(dev->source.subsys.u.usb.bus,
|
|
|
|
dev->source.subsys.u.usb.device,
|
|
|
|
NULL)) == NULL) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* oddly, qemuSetupHostUsbDeviceCgroup doesn't ever
|
|
|
|
* reference the usb object we just created
|
|
|
|
*/
|
|
|
|
if (virUSBDeviceFileIterate(usb, qemuSetupHostUsbDeviceCgroup,
|
|
|
|
vm) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
break;
|
2013-05-03 18:07:27 +00:00
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI:
|
|
|
|
if ((scsi = virSCSIDeviceNew(dev->source.subsys.u.scsi.adapter,
|
|
|
|
dev->source.subsys.u.scsi.bus,
|
|
|
|
dev->source.subsys.u.scsi.target,
|
|
|
|
dev->source.subsys.u.scsi.unit,
|
|
|
|
dev->readonly)) == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virSCSIDeviceFileIterate(scsi,
|
|
|
|
qemuSetupHostScsiDeviceCgroup,
|
|
|
|
vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-04-29 17:15:26 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virPCIDeviceFree(pci);
|
2013-04-29 20:19:19 +00:00
|
|
|
virUSBDeviceFree(usb);
|
2013-05-03 18:07:27 +00:00
|
|
|
virSCSIDeviceFree(scsi);
|
2013-04-29 17:15:26 +00:00
|
|
|
VIR_FREE(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuTeardownHostdevCgroup(virDomainObjPtr vm,
|
|
|
|
virDomainHostdevDefPtr dev)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virPCIDevicePtr pci = NULL;
|
|
|
|
char *path = NULL;
|
|
|
|
|
|
|
|
/* currently this only does something for PCI devices using vfio
|
|
|
|
* for device assignment, but it is called for *all* hostdev
|
|
|
|
* devices.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (dev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS) {
|
|
|
|
|
|
|
|
switch (dev->source.subsys.type) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
|
|
|
|
if (dev->source.subsys.u.pci.backend
|
2013-05-03 18:30:55 +00:00
|
|
|
== VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO) {
|
2013-04-29 17:15:26 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
pci = virPCIDeviceNew(dev->source.subsys.u.pci.addr.domain,
|
|
|
|
dev->source.subsys.u.pci.addr.bus,
|
|
|
|
dev->source.subsys.u.pci.addr.slot,
|
|
|
|
dev->source.subsys.u.pci.addr.function);
|
|
|
|
if (!pci)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(path = virPCIDeviceGetVFIOGroupDev(pci)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
VIR_DEBUG("Cgroup deny %s for PCI device assignment", path);
|
|
|
|
rc = virCgroupDenyDevicePath(priv->cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RWM);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup,
|
|
|
|
"deny", path, "rwm", rc);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to deny access "
|
|
|
|
"for device path %s"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2013-04-29 20:19:19 +00:00
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
|
|
|
|
/* nothing to tear down for USB */
|
|
|
|
break;
|
2013-04-29 17:15:26 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virPCIDeviceFree(pci);
|
|
|
|
VIR_FREE(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-17 11:59:31 +00:00
|
|
|
static int
|
|
|
|
qemuSetupBlkioCgroup(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int rc = -1;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup,
|
|
|
|
VIR_CGROUP_CONTROLLER_BLKIO)) {
|
|
|
|
if (vm->def->blkio.weight || vm->def->blkio.ndevices) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Block I/O tuning is not available on this host"));
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->blkio.weight != 0) {
|
|
|
|
rc = virCgroupSetBlkioWeight(priv->cgroup, vm->def->blkio.weight);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set io weight for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->blkio.ndevices) {
|
|
|
|
for (i = 0; i < vm->def->blkio.ndevices; i++) {
|
|
|
|
virBlkioDeviceWeightPtr dw = &vm->def->blkio.devices[i];
|
|
|
|
if (!dw->weight)
|
|
|
|
continue;
|
|
|
|
rc = virCgroupSetBlkioDeviceWeight(priv->cgroup, dw->path,
|
|
|
|
dw->weight);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set io device weight "
|
|
|
|
"for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-29 17:15:26 +00:00
|
|
|
|
2013-05-20 11:39:54 +00:00
|
|
|
static int
|
|
|
|
qemuSetupMemoryCgroup(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
unsigned long long hard_limit;
|
|
|
|
int rc;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup,VIR_CGROUP_CONTROLLER_MEMORY)) {
|
|
|
|
if (vm->def->mem.hard_limit != 0 ||
|
|
|
|
vm->def->mem.soft_limit != 0 ||
|
|
|
|
vm->def->mem.swap_hard_limit != 0) {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Memory cgroup is not available on this host"));
|
|
|
|
return -1;
|
2013-05-21 15:02:36 +00:00
|
|
|
} else {
|
|
|
|
return 0;
|
2013-05-20 11:39:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
hard_limit = vm->def->mem.hard_limit;
|
|
|
|
if (!hard_limit) {
|
|
|
|
/* If there is no hard_limit set, set a reasonable one to avoid
|
|
|
|
* system thrashing caused by exploited qemu. A 'reasonable
|
|
|
|
* limit' has been chosen:
|
|
|
|
* (1 + k) * (domain memory + total video memory) + (32MB for
|
|
|
|
* cache per each disk) + F
|
|
|
|
* where k = 0.5 and F = 200MB. The cache for disks is important as
|
|
|
|
* kernel cache on the host side counts into the RSS limit. */
|
|
|
|
hard_limit = vm->def->mem.max_balloon;
|
|
|
|
for (i = 0; i < vm->def->nvideos; i++)
|
|
|
|
hard_limit += vm->def->videos[i]->vram;
|
|
|
|
hard_limit = hard_limit * 1.5 + 204800;
|
|
|
|
hard_limit += vm->def->ndisks * 32768;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupSetMemoryHardLimit(priv->cgroup, hard_limit);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set memory hard limit for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (vm->def->mem.soft_limit != 0) {
|
|
|
|
rc = virCgroupSetMemorySoftLimit(priv->cgroup, vm->def->mem.soft_limit);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set memory soft limit for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->def->mem.swap_hard_limit != 0) {
|
|
|
|
rc = virCgroupSetMemSwapHardLimit(priv->cgroup, vm->def->mem.swap_hard_limit);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set swap hard limit for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-17 11:59:33 +00:00
|
|
|
static int
|
|
|
|
qemuSetupDevicesCgroup(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virQEMUDriverConfigPtr cfg = NULL;
|
|
|
|
const char *const *deviceACL = NULL;
|
|
|
|
int rc = -1;
|
|
|
|
int ret = -1;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_DEVICES))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rc = virCgroupDenyAllDevices(priv->cgroup);
|
|
|
|
virDomainAuditCgroup(vm, priv->cgroup, "deny", "all", rc == 0);
|
|
|
|
if (rc != 0) {
|
|
|
|
if (rc == -EPERM) {
|
|
|
|
VIR_WARN("Group devices ACL is not accessible, disabling whitelisting");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to deny all devices for %s"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks ; i++) {
|
|
|
|
if (qemuSetupDiskCgroup(vm, vm->def->disks[i]) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupAllowDeviceMajor(priv->cgroup, 'c', DEVICE_PTY_MAJOR,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
virDomainAuditCgroupMajor(vm, priv->cgroup, "allow", DEVICE_PTY_MAJOR,
|
|
|
|
"pty", "rw", rc == 0);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to allow /dev/pts/ devices"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
deviceACL = cfg->cgroupDeviceACL ?
|
|
|
|
(const char *const *)cfg->cgroupDeviceACL :
|
|
|
|
defaultDeviceACL;
|
|
|
|
|
|
|
|
if (vm->def->nsounds &&
|
|
|
|
(!vm->def->ngraphics ||
|
|
|
|
((vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
|
|
|
|
cfg->vncAllowHostAudio) ||
|
|
|
|
(vm->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SDL)))) {
|
|
|
|
rc = virCgroupAllowDeviceMajor(priv->cgroup, 'c', DEVICE_SND_MAJOR,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
virDomainAuditCgroupMajor(vm, priv->cgroup, "allow", DEVICE_SND_MAJOR,
|
|
|
|
"sound", "rw", rc == 0);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to allow /dev/snd/ devices"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; deviceACL[i] != NULL ; i++) {
|
|
|
|
if (access(deviceACL[i], F_OK) < 0) {
|
|
|
|
VIR_DEBUG("Ignoring non-existant device %s",
|
|
|
|
deviceACL[i]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupAllowDevicePath(priv->cgroup, deviceACL[i],
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
virDomainAuditCgroupPath(vm, priv->cgroup, "allow", deviceACL[i], "rw", rc);
|
|
|
|
if (rc < 0 &&
|
|
|
|
rc != -ENOENT) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("unable to allow device %s"),
|
|
|
|
deviceACL[i]);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainChrDefForeach(vm->def,
|
|
|
|
true,
|
|
|
|
qemuSetupChardevCgroup,
|
|
|
|
vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (vm->def->tpm &&
|
|
|
|
(qemuSetupTPMCgroup(vm->def,
|
|
|
|
vm->def->tpm,
|
|
|
|
vm) < 0))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nhostdevs; i++) {
|
|
|
|
if (qemuSetupHostdevCGroup(vm, vm->def->hostdevs[i]) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
virObjectUnref(cfg);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-05-17 11:59:34 +00:00
|
|
|
static int
|
|
|
|
qemuSetupCpusetCgroup(virDomainObjPtr vm,
|
|
|
|
virBitmapPtr nodemask)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
char *mask = NULL;
|
|
|
|
int rc;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if ((vm->def->numatune.memory.nodemask ||
|
|
|
|
(vm->def->numatune.memory.placement_mode ==
|
|
|
|
VIR_NUMA_TUNE_MEM_PLACEMENT_MODE_AUTO)) &&
|
|
|
|
vm->def->numatune.memory.mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT) {
|
|
|
|
|
|
|
|
if (vm->def->numatune.memory.placement_mode ==
|
|
|
|
VIR_NUMA_TUNE_MEM_PLACEMENT_MODE_AUTO)
|
|
|
|
mask = virBitmapFormat(nodemask);
|
|
|
|
else
|
|
|
|
mask = virBitmapFormat(vm->def->numatune.memory.nodemask);
|
|
|
|
|
|
|
|
if (!mask) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to convert memory nodemask"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupSetCpusetMems(priv->cgroup, mask);
|
|
|
|
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set cpuset.mems for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(mask);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
int qemuInitCgroup(virQEMUDriverPtr driver,
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
bool startup)
|
2013-03-21 14:40:29 +00:00
|
|
|
{
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
int rc = -1;
|
2013-03-21 14:40:29 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
virCgroupPtr parent = NULL;
|
2013-03-21 14:40:29 +00:00
|
|
|
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
2013-04-04 11:10:55 +00:00
|
|
|
if (!cfg->privileged)
|
|
|
|
goto done;
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
virCgroupFree(&priv->cgroup);
|
|
|
|
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
if (!vm->def->resource && startup) {
|
|
|
|
virDomainResourceDefPtr res;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(res) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
2013-03-21 14:40:29 +00:00
|
|
|
}
|
|
|
|
|
2013-04-18 10:07:17 +00:00
|
|
|
if (!(res->partition = strdup("/machine"))) {
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
virReportOOMError();
|
|
|
|
VIR_FREE(res);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm->def->resource = res;
|
2013-03-21 14:40:29 +00:00
|
|
|
}
|
|
|
|
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
if (vm->def->resource &&
|
|
|
|
vm->def->resource->partition) {
|
|
|
|
if (vm->def->resource->partition[0] != '/') {
|
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("Resource partition '%s' must start with '/'"),
|
|
|
|
vm->def->resource->partition);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
/* We only auto-create the default partition. In other
|
|
|
|
* cases we expec the sysadmin/app to have done so */
|
|
|
|
rc = virCgroupNewPartition(vm->def->resource->partition,
|
2013-04-22 16:11:11 +00:00
|
|
|
STREQ(vm->def->resource->partition, "/machine"),
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
cfg->cgroupControllers,
|
|
|
|
&parent);
|
|
|
|
if (rc != 0) {
|
|
|
|
if (rc == -ENXIO ||
|
|
|
|
rc == -EPERM ||
|
2013-05-16 17:47:07 +00:00
|
|
|
rc == -EACCES) { /* No cgroups mounts == success */
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
VIR_DEBUG("No cgroups present/configured/accessible, ignoring error");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to initialize %s cgroup"),
|
|
|
|
vm->def->resource->partition);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupNewDomainPartition(parent,
|
|
|
|
"qemu",
|
|
|
|
vm->def->name,
|
|
|
|
true,
|
|
|
|
&priv->cgroup);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to create cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rc = virCgroupNewDriver("qemu",
|
|
|
|
true,
|
|
|
|
cfg->cgroupControllers,
|
|
|
|
&parent);
|
|
|
|
if (rc != 0) {
|
|
|
|
if (rc == -ENXIO ||
|
|
|
|
rc == -EPERM ||
|
|
|
|
rc == -EACCES) { /* No cgroups mounts == success */
|
|
|
|
VIR_DEBUG("No cgroups present/configured/accessible, ignoring error");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to create cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupNewDomainDriver(parent,
|
|
|
|
vm->def->name,
|
|
|
|
true,
|
|
|
|
&priv->cgroup);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to create cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2013-03-21 14:40:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
rc = 0;
|
|
|
|
cleanup:
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
virCgroupFree(&parent);
|
2013-03-21 14:40:29 +00:00
|
|
|
virObjectUnref(cfg);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuSetupCgroup(virQEMUDriverPtr driver,
|
2012-05-12 12:53:15 +00:00
|
|
|
virDomainObjPtr vm,
|
2012-09-14 07:47:00 +00:00
|
|
|
virBitmapPtr nodemask)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
int rc = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
Change default cgroup layout for QEMU/LXC and honour XML config
Historically QEMU/LXC guests have been placed in a cgroup layout
that is
$LOCATION-OF-LIBVIRTD/libvirt/{qemu,lxc}/$VMNAME
This is bad for a number of reasons
- The cgroup hierarchy gets very deep which seriously
impacts kernel performance due to cgroups scalability
limitations.
- It is hard to setup cgroup policies which apply across
services and virtual machines, since all VMs are underneath
the libvirtd service.
To address this the default cgroup location is changed to
be
/system/$VMNAME.{lxc,qemu}.libvirt
This puts virtual machines at the same level in the hierarchy
as system services, allowing consistent policy to be setup
across all of them.
This also honours the new resource partition location from the
XML configuration, for example
<resource>
<partition>/virtualmachines/production</partitions>
</resource>
will result in the VM being placed at
/virtualmachines/production/$VMNAME.{lxc,qemu}.libvirt
NB, with the exception of the default, /system, path which
is intended to always exist, libvirt will not attempt to
auto-create the partitions in the XML. It is the responsibility
of the admin/app to configure the partitions. Later libvirt
APIs will provide a way todo this.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-04-03 10:01:49 +00:00
|
|
|
if (qemuInitCgroup(driver, vm, true) < 0)
|
2013-03-21 14:40:29 +00:00
|
|
|
return -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (!priv->cgroup)
|
|
|
|
goto done;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-05-17 11:59:33 +00:00
|
|
|
if (qemuSetupDevicesCgroup(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-05-17 11:59:31 +00:00
|
|
|
if (qemuSetupBlkioCgroup(vm) < 0)
|
|
|
|
goto cleanup;
|
2011-02-08 06:59:38 +00:00
|
|
|
|
2013-05-20 11:39:54 +00:00
|
|
|
if (qemuSetupMemoryCgroup(vm) < 0)
|
|
|
|
goto cleanup;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2011-04-01 03:41:33 +00:00
|
|
|
if (vm->def->cputune.shares != 0) {
|
2013-03-21 14:40:29 +00:00
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
rc = virCgroupSetCpuShares(priv->cgroup, vm->def->cputune.shares);
|
2012-10-17 09:23:12 +00:00
|
|
|
if (rc != 0) {
|
2011-03-29 13:41:25 +00:00
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to set io cpu shares for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-04-01 03:41:33 +00:00
|
|
|
} else {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("CPU tuning is not available on this host"));
|
2011-03-29 13:41:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-17 11:59:34 +00:00
|
|
|
if (qemuSetupCpusetCgroup(vm, nodemask) < 0)
|
|
|
|
goto cleanup;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
done:
|
|
|
|
rc = 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
cleanup:
|
2013-03-21 14:40:29 +00:00
|
|
|
return rc == 0 ? 0 : -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2011-07-21 02:10:31 +00:00
|
|
|
int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup, unsigned long long period,
|
|
|
|
long long quota)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
unsigned long long old_period;
|
|
|
|
|
|
|
|
if (period == 0 && quota == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (period) {
|
|
|
|
/* get old period, and we can rollback if set quota failed */
|
|
|
|
rc = virCgroupGetCpuCfsPeriod(cgroup, &old_period);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
2011-07-21 09:32:57 +00:00
|
|
|
"%s", _("Unable to get cpu bandwidth period"));
|
2011-07-21 02:10:31 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupSetCpuCfsPeriod(cgroup, period);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
2011-07-21 09:32:57 +00:00
|
|
|
"%s", _("Unable to set cpu bandwidth period"));
|
2011-07-21 02:10:31 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (quota) {
|
|
|
|
rc = virCgroupSetCpuCfsQuota(cgroup, quota);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
2011-07-21 09:32:57 +00:00
|
|
|
"%s", _("Unable to set cpu bandwidth quota"));
|
2011-07-21 02:10:31 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (period) {
|
|
|
|
rc = virCgroupSetCpuCfsPeriod(cgroup, old_period);
|
|
|
|
if (rc < 0)
|
2012-07-10 21:43:08 +00:00
|
|
|
virReportSystemError(-rc, "%s",
|
2012-07-06 01:53:11 +00:00
|
|
|
_("Unable to rollback cpu bandwidth period"));
|
2011-07-21 02:10:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-08-21 09:18:30 +00:00
|
|
|
int qemuSetupCgroupVcpuPin(virCgroupPtr cgroup,
|
|
|
|
virDomainVcpuPinDefPtr *vcpupin,
|
|
|
|
int nvcpupin,
|
|
|
|
int vcpuid)
|
|
|
|
{
|
2012-08-21 09:18:33 +00:00
|
|
|
int i;
|
2012-08-21 09:18:30 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nvcpupin; i++) {
|
|
|
|
if (vcpuid == vcpupin[i]->vcpuid) {
|
2012-10-17 14:39:31 +00:00
|
|
|
return qemuSetupCgroupEmulatorPin(cgroup, vcpupin[i]->cpumask);
|
2012-08-21 09:18:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-21 09:18:33 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qemuSetupCgroupEmulatorPin(virCgroupPtr cgroup,
|
2012-10-17 14:39:31 +00:00
|
|
|
virBitmapPtr cpumask)
|
2012-08-21 09:18:33 +00:00
|
|
|
{
|
|
|
|
int rc = 0;
|
|
|
|
char *new_cpus = NULL;
|
|
|
|
|
2012-10-17 14:39:31 +00:00
|
|
|
new_cpus = virBitmapFormat(cpumask);
|
2012-08-21 09:18:33 +00:00
|
|
|
if (!new_cpus) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to convert cpu mask"));
|
|
|
|
rc = -1;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupSetCpusetCpus(cgroup, new_cpus);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
"%s",
|
|
|
|
_("Unable to set cpuset.cpus"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-08-21 09:18:30 +00:00
|
|
|
cleanup:
|
|
|
|
VIR_FREE(new_cpus);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
int qemuSetupCgroupForVcpu(virDomainObjPtr vm)
|
2011-07-21 02:10:31 +00:00
|
|
|
{
|
|
|
|
virCgroupPtr cgroup_vcpu = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-08-21 09:18:30 +00:00
|
|
|
virDomainDefPtr def = vm->def;
|
2011-07-21 02:10:31 +00:00
|
|
|
int rc;
|
2012-09-04 13:26:46 +00:00
|
|
|
unsigned int i, j;
|
2011-07-21 02:10:31 +00:00
|
|
|
unsigned long long period = vm->def->cputune.period;
|
|
|
|
long long quota = vm->def->cputune.quota;
|
|
|
|
|
2012-08-29 13:30:34 +00:00
|
|
|
if ((period || quota) &&
|
2013-03-21 14:40:29 +00:00
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
2012-08-29 14:08:59 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
2012-08-21 09:18:41 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-08-29 14:08:59 +00:00
|
|
|
/* We are trying to setup cgroups for CPU pinning, which can also be done
|
|
|
|
* with virProcessInfoSetAffinity, thus the lack of cgroups is not fatal
|
|
|
|
* here.
|
|
|
|
*/
|
2013-03-21 14:40:29 +00:00
|
|
|
if (priv->cgroup == NULL)
|
2012-08-29 14:08:59 +00:00
|
|
|
return 0;
|
|
|
|
|
2011-07-21 02:10:31 +00:00
|
|
|
if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
|
2012-08-21 09:18:42 +00:00
|
|
|
/* If we don't know VCPU<->PID mapping or all vcpu runs in the same
|
2011-07-21 09:32:57 +00:00
|
|
|
* thread, we cannot control each vcpu.
|
2011-07-21 02:10:31 +00:00
|
|
|
*/
|
2012-08-24 15:31:47 +00:00
|
|
|
VIR_WARN("Unable to get vcpus' pids.");
|
|
|
|
return 0;
|
2011-07-21 02:10:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < priv->nvcpupids; i++) {
|
2013-03-28 16:33:22 +00:00
|
|
|
rc = virCgroupNewVcpu(priv->cgroup, i, true, &cgroup_vcpu);
|
2011-07-21 02:10:31 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to create vcpu cgroup for %s(vcpu:"
|
|
|
|
" %d)"),
|
|
|
|
vm->def->name, i);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* move the thread for vcpu to sub dir */
|
|
|
|
rc = virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("unable to add vcpu %d task %d to cgroup"),
|
|
|
|
i, priv->vcpupids[i]);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (period || quota) {
|
2012-08-21 09:18:41 +00:00
|
|
|
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
|
|
|
|
goto cleanup;
|
2011-07-21 02:10:31 +00:00
|
|
|
}
|
|
|
|
|
2012-08-21 09:18:30 +00:00
|
|
|
/* Set vcpupin in cgroup if vcpupin xml is provided */
|
2013-03-21 14:40:29 +00:00
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
|
2012-09-04 13:26:46 +00:00
|
|
|
/* find the right CPU to pin, otherwise
|
|
|
|
* qemuSetupCgroupVcpuPin will fail. */
|
|
|
|
for (j = 0; j < def->cputune.nvcpupin; j++) {
|
|
|
|
if (def->cputune.vcpupin[j]->vcpuid != i)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (qemuSetupCgroupVcpuPin(cgroup_vcpu,
|
|
|
|
def->cputune.vcpupin,
|
|
|
|
def->cputune.nvcpupin,
|
|
|
|
i) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-08-21 09:18:30 +00:00
|
|
|
|
2011-07-21 02:10:31 +00:00
|
|
|
virCgroupFree(&cgroup_vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
2012-08-21 09:18:26 +00:00
|
|
|
if (cgroup_vcpu) {
|
|
|
|
virCgroupRemove(cgroup_vcpu);
|
|
|
|
virCgroupFree(&cgroup_vcpu);
|
|
|
|
}
|
|
|
|
|
2011-07-21 02:10:31 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-11-28 16:43:10 +00:00
|
|
|
int qemuSetupCgroupForEmulator(virQEMUDriverPtr driver,
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
virBitmapPtr nodemask)
|
2012-08-21 09:18:26 +00:00
|
|
|
{
|
2012-10-17 14:39:31 +00:00
|
|
|
virBitmapPtr cpumask = NULL;
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
virBitmapPtr cpumap = NULL;
|
2012-08-21 09:18:26 +00:00
|
|
|
virCgroupPtr cgroup_emulator = NULL;
|
2012-08-21 09:18:33 +00:00
|
|
|
virDomainDefPtr def = vm->def;
|
2013-03-21 14:40:29 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2012-08-21 09:18:42 +00:00
|
|
|
unsigned long long period = vm->def->cputune.emulator_period;
|
|
|
|
long long quota = vm->def->cputune.emulator_quota;
|
2013-03-21 13:27:13 +00:00
|
|
|
int rc;
|
2012-08-21 09:18:26 +00:00
|
|
|
|
2012-08-29 14:08:59 +00:00
|
|
|
if ((period || quota) &&
|
2013-03-21 14:40:29 +00:00
|
|
|
!virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
2012-08-29 14:08:59 +00:00
|
|
|
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("cgroup cpu is required for scheduler tuning"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (priv->cgroup == NULL)
|
2012-08-21 09:18:26 +00:00
|
|
|
return 0; /* Not supported, so claim success */
|
|
|
|
|
2013-03-28 16:33:22 +00:00
|
|
|
rc = virCgroupNewEmulator(priv->cgroup, true, &cgroup_emulator);
|
2012-08-21 09:18:26 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to create emulator cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
rc = virCgroupMoveTask(priv->cgroup, cgroup_emulator);
|
2013-03-21 13:27:13 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to move tasks from domain cgroup to "
|
|
|
|
"emulator cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
2012-08-21 09:18:26 +00:00
|
|
|
}
|
|
|
|
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) {
|
|
|
|
if (!(cpumap = qemuPrepareCpumap(driver, nodemask)))
|
|
|
|
goto cleanup;
|
|
|
|
cpumask = cpumap;
|
|
|
|
} else if (def->cputune.emulatorpin) {
|
2012-10-17 14:39:31 +00:00
|
|
|
cpumask = def->cputune.emulatorpin->cpumask;
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
} else if (def->cpumask) {
|
2012-10-17 14:39:31 +00:00
|
|
|
cpumask = def->cpumask;
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
}
|
2012-10-17 14:39:31 +00:00
|
|
|
|
|
|
|
if (cpumask) {
|
2013-03-21 14:40:29 +00:00
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
|
2012-10-17 14:39:31 +00:00
|
|
|
rc = qemuSetupCgroupEmulatorPin(cgroup_emulator, cpumask);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
cpumask = NULL; /* sanity */
|
2012-09-06 10:13:52 +00:00
|
|
|
}
|
2012-08-21 09:18:33 +00:00
|
|
|
|
2012-08-21 09:18:42 +00:00
|
|
|
if (period || quota) {
|
2013-03-21 14:40:29 +00:00
|
|
|
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
|
2012-09-06 10:13:52 +00:00
|
|
|
if ((rc = qemuSetupCgroupVcpuBW(cgroup_emulator, period,
|
|
|
|
quota)) < 0)
|
2012-08-21 09:18:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-21 09:18:26 +00:00
|
|
|
virCgroupFree(&cgroup_emulator);
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
virBitmapFree(cpumap);
|
2012-08-21 09:18:26 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
qemu: Keep the affinity when creating cgroup for emulator thread
When the cpu placement model is "auto", it sets the affinity for
domain process with the advisory nodeset from numad, however,
creating cgroup for the domain process (called emulator thread
in some contexts) later overrides that with pinning it to all
available pCPUs.
How to reproduce:
* Configure the domain with "auto" placement for <vcpu>, e.g.
<vcpu placement='auto'>4</vcpu>
* % virsh start dom
* % cat /proc/$dompid/status
Though the emulator cgroup cause conflicts, but we can't simply
prohibit creating it, as other tunables are still useful, such
as "emulator_period", which is used by API
virDomainSetSchedulerParameter. So this patch doesn't prohibit
creating the emulator cgroup, but inherit the nodeset from numad,
and reset the affinity for domain process.
* src/qemu/qemu_cgroup.h: Modify definition of qemuSetupCgroupForEmulator
to accept the passed nodenet
* src/qemu/qemu_cgroup.c: Set the affinity with the passed nodeset
2012-10-24 09:27:56 +00:00
|
|
|
virBitmapFree(cpumap);
|
|
|
|
|
2012-08-21 09:18:26 +00:00
|
|
|
if (cgroup_emulator) {
|
|
|
|
virCgroupRemove(cgroup_emulator);
|
|
|
|
virCgroupFree(&cgroup_emulator);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
int qemuRemoveCgroup(virDomainObjPtr vm)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (priv->cgroup == NULL)
|
2010-12-16 16:10:54 +00:00
|
|
|
return 0; /* Not supported, so claim success */
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
return virCgroupRemove(priv->cgroup);
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
int qemuAddToCgroup(virDomainObjPtr vm)
|
2010-12-16 16:10:54 +00:00
|
|
|
{
|
2013-03-21 14:40:29 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-12-16 16:10:54 +00:00
|
|
|
int rc;
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
if (priv->cgroup == NULL)
|
2010-12-16 16:10:54 +00:00
|
|
|
return 0; /* Not supported, so claim success */
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
rc = virCgroupAddTask(priv->cgroup, getpid());
|
2010-12-16 16:10:54 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("unable to add domain %s task %d to cgroup"),
|
2013-03-21 14:40:29 +00:00
|
|
|
vm->def->name, getpid());
|
|
|
|
return -1;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
|
|
|
|
2013-03-21 14:40:29 +00:00
|
|
|
return 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|