2007-02-14 01:40:09 +00:00
|
|
|
/*
|
2010-10-26 14:04:46 +00:00
|
|
|
* qemu_driver.c: core driver methods for managing qemu guests
|
2007-02-14 01:40:09 +00:00
|
|
|
*
|
domain_conf: split source data out from ChrDef
This opens up the possibility of reusing the smaller ChrSourceDef
for both qemu monitor and a passthrough smartcard device.
* src/conf/domain_conf.h (_virDomainChrDef): Factor host
details...
(_virDomainChrSourceDef): ...into new struct.
(virDomainChrSourceDefFree): New prototype.
* src/conf/domain_conf.c (virDomainChrDefFree)
(virDomainChrDefParseXML, virDomainChrDefFormat): Split...
(virDomainChrSourceDefClear, virDomainChrSourceDefFree)
(virDomainChrSourceDefParseXML, virDomainChrSourceDefFormat):
...into new functions.
(virDomainChrDefParseTargetXML): Update clients to reflect type
split.
* src/vmx/vmx.c (virVMXParseSerial, virVMXParseParallel)
(virVMXFormatSerial, virVMXFormatParallel): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainOpenConsole): Likewise.
* src/xen/xend_internal.c (xenDaemonParseSxprChar)
(xenDaemonFormatSxprChr): Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainDumpXML, vboxAttachSerial)
(vboxAttachParallel): Likewise.
* src/security/security_dac.c (virSecurityDACSetChardevLabel)
(virSecurityDACSetChardevCallback)
(virSecurityDACRestoreChardevLabel)
(virSecurityDACRestoreChardevCallback): Likewise.
* src/security/security_selinux.c (SELinuxSetSecurityChardevLabel)
(SELinuxSetSecurityChardevCallback)
(SELinuxRestoreSecurityChardevLabel)
(SELinuxSetSecurityChardevCallback): Likewise.
* src/security/virt-aa-helper.c (get_files): Likewise.
* src/lxc/lxc_driver.c (lxcVmStart, lxcDomainOpenConsole):
Likewise.
* src/uml/uml_conf.c (umlBuildCommandLineChr): Likewise.
* src/uml/uml_driver.c (umlIdentifyOneChrPTY, umlIdentifyChrPTY)
(umlDomainOpenConsole): Likewise.
* src/qemu/qemu_command.c (qemuBuildChrChardevStr)
(qemuBuildChrArgStr, qemuBuildCommandLine)
(qemuParseCommandLineChr): Likewise.
* src/qemu/qemu_domain.c (qemuDomainObjPrivateXMLFormat)
(qemuDomainObjPrivateXMLParse): Likewise.
* src/qemu/qemu_cgroup.c (qemuSetupChardevCgroup): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainAttachNetDevice): Likewise.
* src/qemu/qemu_driver.c (qemudFindCharDevicePTYsMonitor)
(qemudFindCharDevicePTYs, qemuPrepareChardevDevice)
(qemuPrepareMonitorChr, qemudShutdownVMDaemon)
(qemuDomainOpenConsole): Likewise.
* src/qemu/qemu_command.h (qemuBuildChrChardevStr)
(qemuBuildChrArgStr): Delete, now that they are static.
* src/libvirt_private.syms (domain_conf.h): New exports.
* cfg.mk (useless_free_options): Update list.
* tests/qemuxml2argvtest.c (testCompareXMLToArgvFiles): Update
tests.
2011-01-07 22:45:01 +00:00
|
|
|
* Copyright (C) 2006-2011 Red Hat, Inc.
|
2007-02-14 01:40:09 +00:00
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
* Author: Daniel P. Berrange <berrange@redhat.com>
|
|
|
|
*/
|
|
|
|
|
2008-01-29 18:15:54 +00:00
|
|
|
#include <config.h>
|
2007-11-26 11:50:16 +00:00
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/poll.h>
|
2009-11-03 18:26:32 +00:00
|
|
|
#include <sys/time.h>
|
2007-02-14 01:40:09 +00:00
|
|
|
#include <dirent.h>
|
|
|
|
#include <limits.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <errno.h>
|
2007-06-26 19:49:50 +00:00
|
|
|
#include <sys/utsname.h>
|
2007-06-26 20:41:25 +00:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <paths.h>
|
2007-06-26 22:13:21 +00:00
|
|
|
#include <stdio.h>
|
2007-06-26 20:41:25 +00:00
|
|
|
#include <sys/wait.h>
|
2008-09-17 14:07:49 +00:00
|
|
|
#include <sys/ioctl.h>
|
Move xen driver code into src/xen/ directory
* src/Makefile.am, src/proxy_internal.c, src/proxy_internal.h
src/sexpr.c, src/sexpr.h, src/xen_unified.c, src/xen_unified.h,
src/xen_internal.c, src/xen_internal.h, src/xen_inotify.c,
src/xen_inotify.h, src/xend_internal.c, src/xend_internal.h,
src/xm_internal.c, src/xm_internal.h, src/xs_internal.c,
src/xs_internal.h: Move to src/xen/ directory
* proxy/Makefile.am, proxy/libvirt_proxy.c, src/Makefile.am,
src/libvirt.c, tests/sexpr2xmltest.c, tests/statstest.c,
tests/xencapstest.c, tests/xmconfigtest.c, tests/xml2sexprtest.c:
Adapt to changed xen location
* src/stats_linux.h, src/stats_linux.c: Remove xen specific block
stats APIs
* src/qemu_driver.c, src/uml_driver.c: Add missing sys/un.h include
uncovered after change to stats_linux.h
* src/xen/block_stats.h, src/xen/block_stats.c: Add xen specific
block stats APIs
2009-09-15 15:38:33 +00:00
|
|
|
#include <sys/un.h>
|
2011-04-14 18:48:03 +00:00
|
|
|
#include <byteswap.h>
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2010-04-17 01:49:31 +00:00
|
|
|
|
2008-07-11 19:34:11 +00:00
|
|
|
#include "qemu_driver.h"
|
|
|
|
#include "qemu_conf.h"
|
2010-12-16 15:07:07 +00:00
|
|
|
#include "qemu_capabilities.h"
|
2010-12-16 15:07:07 +00:00
|
|
|
#include "qemu_command.h"
|
2010-12-16 16:10:54 +00:00
|
|
|
#include "qemu_cgroup.h"
|
2010-12-16 16:10:54 +00:00
|
|
|
#include "qemu_hostdev.h"
|
2010-12-16 16:10:54 +00:00
|
|
|
#include "qemu_hotplug.h"
|
2009-10-09 18:07:55 +00:00
|
|
|
#include "qemu_monitor.h"
|
2009-11-03 22:41:23 +00:00
|
|
|
#include "qemu_bridge_filter.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
#include "qemu_process.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
#include "qemu_migration.h"
|
2011-02-14 16:09:39 +00:00
|
|
|
|
|
|
|
#include "virterror_internal.h"
|
|
|
|
#include "logging.h"
|
|
|
|
#include "datatypes.h"
|
2007-06-27 00:12:29 +00:00
|
|
|
#include "buf.h"
|
2007-07-19 16:22:40 +00:00
|
|
|
#include "util.h"
|
2007-07-25 23:16:30 +00:00
|
|
|
#include "nodeinfo.h"
|
2007-11-15 17:45:44 +00:00
|
|
|
#include "stats_linux.h"
|
2008-02-27 04:35:08 +00:00
|
|
|
#include "capabilities.h"
|
2008-05-22 16:20:31 +00:00
|
|
|
#include "memory.h"
|
2008-07-25 09:01:25 +00:00
|
|
|
#include "uuid.h"
|
2008-09-03 15:05:31 +00:00
|
|
|
#include "domain_conf.h"
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2009-03-02 16:32:52 +00:00
|
|
|
#include "node_device_conf.h"
|
|
|
|
#include "pci.h"
|
2009-08-28 12:44:43 +00:00
|
|
|
#include "hostusb.h"
|
2009-11-16 15:22:34 +00:00
|
|
|
#include "processinfo.h"
|
2009-09-30 10:51:54 +00:00
|
|
|
#include "libvirt_internal.h"
|
2009-11-26 17:57:00 +00:00
|
|
|
#include "xml.h"
|
2009-12-18 15:24:14 +00:00
|
|
|
#include "cpu/cpu.h"
|
2010-02-15 16:41:14 +00:00
|
|
|
#include "macvtap.h"
|
2010-10-29 12:18:29 +00:00
|
|
|
#include "sysinfo.h"
|
2010-06-21 18:18:31 +00:00
|
|
|
#include "domain_nwfilter.h"
|
2010-03-26 14:57:58 +00:00
|
|
|
#include "hooks.h"
|
2010-04-27 19:31:09 +00:00
|
|
|
#include "storage_file.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2010-09-22 18:32:21 +00:00
|
|
|
#include "fdstream.h"
|
2010-11-16 14:54:17 +00:00
|
|
|
#include "configmake.h"
|
2010-12-08 06:19:17 +00:00
|
|
|
#include "threadpool.h"
|
2010-10-26 14:04:46 +00:00
|
|
|
#include "locking/lock_manager.h"
|
2011-08-23 23:14:55 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2011-07-21 07:55:56 +00:00
|
|
|
#include "virkeycode.h"
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2009-01-20 17:13:33 +00:00
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2010-10-12 15:24:54 +00:00
|
|
|
#define QEMU_NB_MEM_PARAM 3
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
#if HAVE_LINUX_KVM_H
|
|
|
|
# include <linux/kvm.h>
|
|
|
|
#endif
|
2010-04-17 01:49:31 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* device for kvm ioctls */
|
|
|
|
#define KVM_DEVICE "/dev/kvm"
|
2010-12-14 07:40:12 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* add definitions missing in older linux/kvm.h */
|
|
|
|
#ifndef KVMIO
|
|
|
|
# define KVMIO 0xAE
|
|
|
|
#endif
|
|
|
|
#ifndef KVM_CHECK_EXTENSION
|
|
|
|
# define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03)
|
|
|
|
#endif
|
|
|
|
#ifndef KVM_CAP_NR_VCPUS
|
|
|
|
# define KVM_CAP_NR_VCPUS 9 /* returns max vcpus per vm */
|
|
|
|
#endif
|
|
|
|
|
2011-02-22 05:32:38 +00:00
|
|
|
#define QEMU_NB_BLKIO_PARAM 1
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2010-12-08 06:19:17 +00:00
|
|
|
static void processWatchdogEvent(void *data, void *opaque);
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudShutdown(void);
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
static int qemuDomainObjStart(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-08-27 23:07:18 +00:00
|
|
|
unsigned int flags);
|
2010-05-20 10:01:16 +00:00
|
|
|
|
2008-05-07 16:16:44 +00:00
|
|
|
static int qemudDomainGetMaxVcpus(virDomainPtr dom);
|
2008-10-23 13:18:18 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
struct qemud_driver *qemu_driver = NULL;
|
2009-01-20 15:52:11 +00:00
|
|
|
|
|
|
|
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
struct qemuAutostartData {
|
|
|
|
struct qemud_driver *driver;
|
|
|
|
virConnectPtr conn;
|
|
|
|
};
|
2011-02-14 16:09:39 +00:00
|
|
|
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
static void
|
2011-08-27 23:07:18 +00:00
|
|
|
qemuAutostartDomain(void *payload, const void *name ATTRIBUTE_UNUSED,
|
|
|
|
void *opaque)
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
{
|
|
|
|
virDomainObjPtr vm = payload;
|
|
|
|
struct qemuAutostartData *data = opaque;
|
2010-05-20 10:02:58 +00:00
|
|
|
virErrorPtr err;
|
2011-08-27 23:07:18 +00:00
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
if (data->driver->autoStartBypassCache)
|
|
|
|
flags |= VIR_DOMAIN_START_BYPASS_CACHE;
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
|
|
|
|
virDomainObjLock(vm);
|
2010-05-20 10:02:58 +00:00
|
|
|
virResetLastError();
|
2011-08-16 10:51:36 +00:00
|
|
|
if (vm->autostart &&
|
|
|
|
!virDomainObjIsActive(vm)) {
|
|
|
|
if (qemuDomainObjBeginJobWithDriver(data->driver, vm,
|
|
|
|
QEMU_JOB_MODIFY) < 0) {
|
|
|
|
err = virGetLastError();
|
|
|
|
VIR_ERROR(_("Failed to start job on VM '%s': %s"),
|
|
|
|
vm->def->name,
|
|
|
|
err ? err->message : _("unknown error"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjStart(data->conn, data->driver, vm, flags) < 0) {
|
2010-05-20 10:02:58 +00:00
|
|
|
err = virGetLastError();
|
2010-01-19 13:17:20 +00:00
|
|
|
VIR_ERROR(_("Failed to autostart VM '%s': %s"),
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
vm->def->name,
|
2010-05-20 06:44:27 +00:00
|
|
|
err ? err->message : _("unknown error"));
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
}
|
2010-05-20 10:02:58 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(data->driver, vm) == 0)
|
2010-05-20 10:02:58 +00:00
|
|
|
vm = NULL;
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
}
|
2010-05-20 10:02:58 +00:00
|
|
|
|
2011-08-16 10:51:36 +00:00
|
|
|
cleanup:
|
2010-05-20 10:02:58 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2008-10-10 14:20:37 +00:00
|
|
|
static void
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuAutostartDomains(struct qemud_driver *driver)
|
|
|
|
{
|
2008-12-08 11:18:47 +00:00
|
|
|
/* XXX: Figure out a better way todo this. The domain
|
|
|
|
* startup code needs a connection handle in order
|
|
|
|
* to lookup the bridge associated with a virtual
|
|
|
|
* network
|
|
|
|
*/
|
2009-06-12 13:20:13 +00:00
|
|
|
virConnectPtr conn = virConnectOpen(driver->privileged ?
|
|
|
|
"qemu:///system" :
|
|
|
|
"qemu:///session");
|
2008-12-08 11:18:47 +00:00
|
|
|
/* Ignoring NULL conn which is mostly harmless here */
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
struct qemuAutostartData data = { driver, conn };
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverLock(driver);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
virHashForEach(driver->domains.objs, qemuAutostartDomain, &data);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-08 11:18:47 +00:00
|
|
|
|
2009-03-16 10:30:04 +00:00
|
|
|
if (conn)
|
|
|
|
virConnectClose(conn);
|
2007-06-26 20:45:21 +00:00
|
|
|
}
|
|
|
|
|
2008-12-20 13:17:49 +00:00
|
|
|
static int
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuSecurityInit(struct qemud_driver *driver)
|
2008-12-20 13:17:49 +00:00
|
|
|
{
|
2011-02-14 16:09:39 +00:00
|
|
|
virSecurityManagerPtr mgr = virSecurityManagerNew(driver->securityDriverName,
|
|
|
|
driver->allowDiskFormatProbing);
|
|
|
|
if (!mgr)
|
|
|
|
goto error;
|
2009-11-03 15:24:46 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (driver->privileged) {
|
|
|
|
virSecurityManagerPtr dac = virSecurityManagerNewDAC(driver->user,
|
|
|
|
driver->group,
|
|
|
|
driver->allowDiskFormatProbing,
|
|
|
|
driver->dynamicOwnership);
|
|
|
|
if (!dac)
|
|
|
|
goto error;
|
2008-12-20 13:17:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!(driver->securityManager = virSecurityManagerNewStack(mgr,
|
2011-02-21 22:05:24 +00:00
|
|
|
dac))) {
|
|
|
|
|
|
|
|
virSecurityManagerFree(dac);
|
2011-02-14 16:09:39 +00:00
|
|
|
goto error;
|
2011-02-21 22:05:24 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
} else {
|
|
|
|
driver->securityManager = mgr;
|
|
|
|
}
|
2009-11-03 15:24:46 +00:00
|
|
|
|
2009-08-22 21:56:33 +00:00
|
|
|
return 0;
|
2009-10-09 19:13:29 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
error:
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_ERROR(_("Failed to initialize security drivers"));
|
2011-02-14 16:09:39 +00:00
|
|
|
virSecurityManagerFree(mgr);
|
|
|
|
return -1;
|
|
|
|
}
|
2009-10-09 19:13:29 +00:00
|
|
|
|
2010-10-12 11:22:03 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static virCapsPtr
|
|
|
|
qemuCreateCapabilities(virCapsPtr oldcaps,
|
|
|
|
struct qemud_driver *driver)
|
|
|
|
{
|
|
|
|
virCapsPtr caps;
|
2009-10-09 19:13:29 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Basic host arch / guest machine capabilities */
|
|
|
|
if (!(caps = qemuCapsInit(oldcaps))) {
|
|
|
|
virReportOOMError();
|
|
|
|
return NULL;
|
2011-01-18 11:07:13 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (driver->allowDiskFormatProbing) {
|
|
|
|
caps->defaultDiskDriverName = NULL;
|
|
|
|
caps->defaultDiskDriverType = NULL;
|
|
|
|
} else {
|
|
|
|
caps->defaultDiskDriverName = "qemu";
|
|
|
|
caps->defaultDiskDriverType = "raw";
|
2010-12-09 10:18:32 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDomainSetPrivateDataHooks(caps);
|
|
|
|
qemuDomainSetNamespaceHooks(caps);
|
2009-10-09 19:13:29 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virGetHostUUID(caps->host.host_uuid)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("cannot get the host uuid"));
|
|
|
|
goto err_exit;
|
2009-10-09 19:13:29 +00:00
|
|
|
}
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Security driver data */
|
|
|
|
const char *doi, *model;
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
doi = virSecurityManagerGetDOI(driver->securityManager);
|
|
|
|
model = virSecurityManagerGetModel(driver->securityManager);
|
|
|
|
if (STRNEQ(model, "none")) {
|
|
|
|
if (!(caps->host.secModel.model = strdup(model)))
|
|
|
|
goto no_memory;
|
|
|
|
if (!(caps->host.secModel.doi = strdup(doi)))
|
|
|
|
goto no_memory;
|
2009-10-09 19:34:24 +00:00
|
|
|
}
|
2010-02-11 14:28:16 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_DEBUG("Initialized caps for security driver \"%s\" with "
|
|
|
|
"DOI \"%s\"", model, doi);
|
Add support for an explicit IO error event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_IO_ERROR
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0,
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE,
VIR_DOMAIN_EVENT_IO_ERROR_REPORT,
} virDomainEventIOErrorAction;
In addition it has the source path of the disk that had the
error and its unique device alias. It does not include the
target device name (/dev/sda), since this would preclude
triggering IO errors from other file backed devices (eg
serial ports connected to a file)
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventIOErrorCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *srcPath,
const char *devAlias,
int action,
void *opaque);
This is currently wired up to the QEMU block IO error events
* daemon/remote.c: Dispatch IO error events to client
* examples/domain-events/events-c/event-test.c: Watch for
IO error events
* include/libvirt/libvirt.h.in: Define new IO error event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle IO error events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for block IO errors and emit a libvirt IO error event
* src/remote/remote_driver.c: Receive and dispatch IO error
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
IO error events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for BLOCK_IO_ERROR event
from QEMU monitor
2010-03-18 19:37:44 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return caps;
|
Add support for an explicit IO error event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_IO_ERROR
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0,
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE,
VIR_DOMAIN_EVENT_IO_ERROR_REPORT,
} virDomainEventIOErrorAction;
In addition it has the source path of the disk that had the
error and its unique device alias. It does not include the
target device name (/dev/sda), since this would preclude
triggering IO errors from other file backed devices (eg
serial ports connected to a file)
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventIOErrorCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *srcPath,
const char *devAlias,
int action,
void *opaque);
This is currently wired up to the QEMU block IO error events
* daemon/remote.c: Dispatch IO error events to client
* examples/domain-events/events-c/event-test.c: Watch for
IO error events
* include/libvirt/libvirt.h.in: Define new IO error event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle IO error events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for block IO errors and emit a libvirt IO error event
* src/remote/remote_driver.c: Receive and dispatch IO error
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
IO error events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for BLOCK_IO_ERROR event
from QEMU monitor
2010-03-18 19:37:44 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
err_exit:
|
|
|
|
virCapabilitiesFree(caps);
|
Add support for an explicit IO error event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_IO_ERROR
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0,
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE,
VIR_DOMAIN_EVENT_IO_ERROR_REPORT,
} virDomainEventIOErrorAction;
In addition it has the source path of the disk that had the
error and its unique device alias. It does not include the
target device name (/dev/sda), since this would preclude
triggering IO errors from other file backed devices (eg
serial ports connected to a file)
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventIOErrorCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *srcPath,
const char *devAlias,
int action,
void *opaque);
This is currently wired up to the QEMU block IO error events
* daemon/remote.c: Dispatch IO error events to client
* examples/domain-events/events-c/event-test.c: Watch for
IO error events
* include/libvirt/libvirt.h.in: Define new IO error event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle IO error events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for block IO errors and emit a libvirt IO error event
* src/remote/remote_driver.c: Receive and dispatch IO error
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
IO error events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for BLOCK_IO_ERROR event
from QEMU monitor
2010-03-18 19:37:44 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static void qemuDomainSnapshotLoad(void *payload,
|
Allow hash tables to use generic pointers as keys
Relax the restriction that the hash table key must be a string
by allowing an arbitrary hash code generator + comparison func
to be provided
* util/hash.c, util/hash.h: Allow any pointer as a key
* internal.h: Include stdbool.h as standard.
* conf/domain_conf.c, conf/domain_conf.c,
conf/nwfilter_params.c, nwfilter/nwfilter_gentech_driver.c,
nwfilter/nwfilter_gentech_driver.h, nwfilter/nwfilter_learnipaddr.c,
qemu/qemu_command.c, qemu/qemu_driver.c,
qemu/qemu_process.c, uml/uml_driver.c,
xen/xm_internal.c: s/char */void */ in hash callbacks
2011-02-22 15:11:59 +00:00
|
|
|
const void *name ATTRIBUTE_UNUSED,
|
2011-02-14 16:09:39 +00:00
|
|
|
void *data)
|
2009-10-09 19:34:24 +00:00
|
|
|
{
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjPtr vm = (virDomainObjPtr)payload;
|
|
|
|
char *baseDir = (char *)data;
|
|
|
|
char *snapDir = NULL;
|
|
|
|
DIR *dir = NULL;
|
|
|
|
struct dirent *entry;
|
|
|
|
char *xmlStr;
|
|
|
|
int ret;
|
|
|
|
char *fullpath;
|
|
|
|
virDomainSnapshotDefPtr def = NULL;
|
2011-08-09 13:47:57 +00:00
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
2011-08-25 21:11:03 +00:00
|
|
|
virDomainSnapshotObjPtr current = NULL;
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
2011-09-01 22:50:17 +00:00
|
|
|
unsigned int flags = (VIR_DOMAIN_SNAPSHOT_PARSE_REDEFINE |
|
2011-08-20 04:33:13 +00:00
|
|
|
VIR_DOMAIN_SNAPSHOT_PARSE_DISKS |
|
2011-09-01 22:50:17 +00:00
|
|
|
VIR_DOMAIN_SNAPSHOT_PARSE_INTERNAL);
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjLock(vm);
|
|
|
|
if (virAsprintf(&snapDir, "%s/%s", baseDir, vm->def->name) < 0) {
|
|
|
|
VIR_ERROR(_("Failed to allocate memory for snapshot directory for domain %s"),
|
|
|
|
vm->def->name);
|
Fully asynchronous monitor I/O processing
Change the QEMU monitor file handle watch to poll for both
read & write events, as well as EOF. All I/O to/from the
QEMU monitor FD is now done in the event callback thread.
When the QEMU driver needs to send a command, it puts the
data to be sent into a qemuMonitorMessagePtr object instance,
queues it for dispatch, and then goes to sleep on a condition
variable. The event thread sends all the data, and then waits
for the reply to arrive, putting the response / error data
back into the qemuMonitorMessagePtr and notifying the condition
variable.
There is a temporary hack in the disk passphrase callback to
avoid acquiring the domain lock. This avoids a deadlock in
the command processing, since the domain lock is still held
when running monitor commands. The next commit will remove
the locking when running commands & thus allow re-introduction
of locking the disk passphrase callback
* src/qemu/qemu_driver.c: Temporarily don't acquire lock in
disk passphrase callback. To be reverted in next commit
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h: Remove
raw I/O functions, and a generic qemuMonitorSend() for
invoking a command
* src/qemu/qemu_monitor_text.c, src/qemu/qemu_monitor_text.h:
Remove all low level I/O, and use the new qemuMonitorSend()
API. Provide a qemuMonitorTextIOProcess() method for detecting
command/reply/prompt boundaries in the monitor data stream
2009-10-14 17:40:51 +00:00
|
|
|
goto cleanup;
|
2009-10-09 19:34:24 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_INFO("Scanning for snapshots for domain %s in %s", vm->def->name,
|
|
|
|
snapDir);
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!(dir = opendir(snapDir))) {
|
|
|
|
if (errno != ENOENT)
|
|
|
|
VIR_ERROR(_("Failed to open snapshot directory %s for domain %s: %s"),
|
|
|
|
snapDir, vm->def->name,
|
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
Fully asynchronous monitor I/O processing
Change the QEMU monitor file handle watch to poll for both
read & write events, as well as EOF. All I/O to/from the
QEMU monitor FD is now done in the event callback thread.
When the QEMU driver needs to send a command, it puts the
data to be sent into a qemuMonitorMessagePtr object instance,
queues it for dispatch, and then goes to sleep on a condition
variable. The event thread sends all the data, and then waits
for the reply to arrive, putting the response / error data
back into the qemuMonitorMessagePtr and notifying the condition
variable.
There is a temporary hack in the disk passphrase callback to
avoid acquiring the domain lock. This avoids a deadlock in
the command processing, since the domain lock is still held
when running monitor commands. The next commit will remove
the locking when running commands & thus allow re-introduction
of locking the disk passphrase callback
* src/qemu/qemu_driver.c: Temporarily don't acquire lock in
disk passphrase callback. To be reverted in next commit
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h: Remove
raw I/O functions, and a generic qemuMonitorSend() for
invoking a command
* src/qemu/qemu_monitor_text.c, src/qemu/qemu_monitor_text.h:
Remove all low level I/O, and use the new qemuMonitorSend()
API. Provide a qemuMonitorTextIOProcess() method for detecting
command/reply/prompt boundaries in the monitor data stream
2009-10-14 17:40:51 +00:00
|
|
|
goto cleanup;
|
2009-10-09 19:34:24 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
while ((entry = readdir(dir))) {
|
|
|
|
if (entry->d_name[0] == '.')
|
|
|
|
continue;
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* NB: ignoring errors, so one malformed config doesn't
|
|
|
|
kill the whole process */
|
|
|
|
VIR_INFO("Loading snapshot file '%s'", entry->d_name);
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&fullpath, "%s/%s", snapDir, entry->d_name) < 0) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_ERROR(_("Failed to allocate memory for path"));
|
2011-02-14 16:09:39 +00:00
|
|
|
continue;
|
|
|
|
}
|
2009-10-09 19:34:24 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
ret = virFileReadAll(fullpath, 1024*1024*1, &xmlStr);
|
|
|
|
if (ret < 0) {
|
|
|
|
/* Nothing we can do here, skip this one */
|
|
|
|
VIR_ERROR(_("Failed to read snapshot file %s: %s"), fullpath,
|
|
|
|
virStrerror(errno, ebuf, sizeof(ebuf)));
|
|
|
|
VIR_FREE(fullpath);
|
|
|
|
continue;
|
|
|
|
}
|
2010-03-18 15:25:38 +00:00
|
|
|
|
snapshot: allow full domain xml in snapshot
Just like VM saved state images (virsh save), snapshots MUST
track the inactive domain xml to detect any ABI incompatibilities.
The indentation is not perfect, but functionality comes before form.
Later patches will actually supply a full domain; for now, this
wires up the storage to support one, but doesn't ever generate one
in dumpxml output.
Happily, libvirt.c was already rejecting use of VIR_DOMAIN_XML_SECURE
from read-only connections, even though before this patch, there was
no information to be secured by the use of that flag.
And while we're at it, mark the libvirt snapshot metadata files
as internal-use only.
* src/libvirt.c (virDomainSnapshotGetXMLDesc): Document flag.
* src/conf/domain_conf.h (_virDomainSnapshotDef): Add member.
(virDomainSnapshotDefParseString, virDomainSnapshotDefFormat):
Update signature.
* src/conf/domain_conf.c (virDomainSnapshotDefFree): Clean up.
(virDomainSnapshotDefParseString): Optionally parse domain.
(virDomainSnapshotDefFormat): Output full domain.
* src/esx/esx_driver.c (esxDomainSnapshotCreateXML)
(esxDomainSnapshotGetXMLDesc): Update callers.
* src/vbox/vbox_tmpl.c (vboxDomainSnapshotCreateXML)
(vboxDomainSnapshotGetXMLDesc): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotLoad, qemuDomainSnapshotGetXMLDesc)
(qemuDomainSnapshotWriteMetadata): Likewise.
* docs/formatsnapshot.html.in: Rework doc example.
Based on a patch by Philipp Hahn.
2011-08-13 01:19:47 +00:00
|
|
|
def = virDomainSnapshotDefParseString(xmlStr, qemu_driver->caps,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
flags);
|
2011-02-14 16:09:39 +00:00
|
|
|
if (def == NULL) {
|
|
|
|
/* Nothing we can do here, skip this one */
|
2011-08-25 21:11:03 +00:00
|
|
|
VIR_ERROR(_("Failed to parse snapshot XML from file '%s'"),
|
|
|
|
fullpath);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(fullpath);
|
|
|
|
VIR_FREE(xmlStr);
|
|
|
|
continue;
|
|
|
|
}
|
2010-03-18 15:25:38 +00:00
|
|
|
|
2011-08-09 13:47:57 +00:00
|
|
|
snap = virDomainSnapshotAssignDef(&vm->snapshots, def);
|
|
|
|
if (snap == NULL) {
|
|
|
|
virDomainSnapshotDefFree(def);
|
2011-08-25 21:11:03 +00:00
|
|
|
} else if (snap->def->current) {
|
|
|
|
current = snap;
|
|
|
|
if (!vm->current_snapshot)
|
|
|
|
vm->current_snapshot = snap;
|
2011-08-09 13:47:57 +00:00
|
|
|
}
|
2010-03-18 15:25:38 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(fullpath);
|
|
|
|
VIR_FREE(xmlStr);
|
2010-03-18 15:25:38 +00:00
|
|
|
}
|
|
|
|
|
2011-08-25 21:11:03 +00:00
|
|
|
if (vm->current_snapshot != current) {
|
|
|
|
VIR_ERROR(_("Too many snapshots claiming to be current for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
vm->current_snapshot = NULL;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* FIXME: qemu keeps internal track of snapshots. We can get access
|
|
|
|
* to this info via the "info snapshots" monitor command for running
|
|
|
|
* domains, or via "qemu-img snapshot -l" for shutoff domains. It would
|
|
|
|
* be nice to update our internal state based on that, but there is a
|
|
|
|
* a problem. qemu doesn't track all of the same metadata that we do.
|
|
|
|
* In particular we wouldn't be able to fill in the <parent>, which is
|
|
|
|
* pretty important in our metadata.
|
|
|
|
*/
|
2010-03-18 15:25:38 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virResetLastError();
|
2010-03-18 15:25:38 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
cleanup:
|
|
|
|
if (dir)
|
|
|
|
closedir(dir);
|
|
|
|
VIR_FREE(snapDir);
|
2010-12-09 10:18:32 +00:00
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/**
|
|
|
|
* qemudStartup:
|
|
|
|
*
|
|
|
|
* Initialization function for the QEmu daemon
|
|
|
|
*/
|
2010-02-16 12:07:49 +00:00
|
|
|
static int
|
2011-02-14 16:09:39 +00:00
|
|
|
qemudStartup(int privileged) {
|
|
|
|
char *base = NULL;
|
|
|
|
char *driverConf = NULL;
|
|
|
|
int rc;
|
|
|
|
virConnectPtr conn = NULL;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (VIR_ALLOC(qemu_driver) < 0)
|
|
|
|
return -1;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virMutexInit(&qemu_driver->lock) < 0) {
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_ERROR(_("cannot initialize mutex"));
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(qemu_driver);
|
|
|
|
return -1;
|
2010-02-16 12:07:49 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDriverLock(qemu_driver);
|
|
|
|
qemu_driver->privileged = privileged;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Don't have a dom0 so start from 1 */
|
|
|
|
qemu_driver->nextvmid = 1;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virDomainObjListInit(&qemu_driver->domains) < 0)
|
|
|
|
goto out_of_memory;
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-05-12 12:54:07 +00:00
|
|
|
/* Init domain events */
|
|
|
|
qemu_driver->domainEventState = virDomainEventStateNew(qemuDomainEventFlush,
|
|
|
|
qemu_driver,
|
|
|
|
NULL,
|
|
|
|
true);
|
|
|
|
if (!qemu_driver->domainEventState)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto error;
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Allocate bitmap for vnc port reservation */
|
|
|
|
if ((qemu_driver->reservedVNCPorts =
|
|
|
|
virBitmapAlloc(QEMU_VNC_PORT_MAX - QEMU_VNC_PORT_MIN)) == NULL)
|
|
|
|
goto out_of_memory;
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* read the host sysinfo */
|
|
|
|
if (privileged)
|
|
|
|
qemu_driver->hostsysinfo = virSysinfoRead();
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (privileged) {
|
|
|
|
if (virAsprintf(&qemu_driver->logDir,
|
|
|
|
"%s/log/libvirt/qemu", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if ((base = strdup (SYSCONFDIR "/libvirt")) == NULL)
|
|
|
|
goto out_of_memory;
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&qemu_driver->stateDir,
|
|
|
|
"%s/run/libvirt/qemu", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
2010-03-18 18:28:15 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&qemu_driver->libDir,
|
|
|
|
"%s/lib/libvirt/qemu", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
Add support for an explicit watchdog event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_WATCHDOG
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_WATCHDOG_NONE = 0,
VIR_DOMAIN_EVENT_WATCHDOG_PAUSE,
VIR_DOMAIN_EVENT_WATCHDOG_RESET,
VIR_DOMAIN_EVENT_WATCHDOG_POWEROFF,
VIR_DOMAIN_EVENT_WATCHDOG_SHUTDOWN,
VIR_DOMAIN_EVENT_WATCHDOG_DEBUG,
} virDomainEventWatchdogAction;
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventWatchdogCallback)(virConnectPtr conn,
virDomainPtr dom,
int action,
void *opaque);
* daemon/remote.c: Dispatch watchdog events to client
* examples/domain-events/events-c/event-test.c: Watch for
watchdog events
* include/libvirt/libvirt.h.in: Define new watchdg event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle watchdog events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for watchdogs and emit a libvirt watchdog event
* src/remote/remote_driver.c: Receive and dispatch watchdog
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
watchdog events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for WATCHDOG event
from QEMU monitor
2010-03-18 19:07:48 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&qemu_driver->cacheDir,
|
|
|
|
"%s/cache/libvirt/qemu", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->saveDir,
|
|
|
|
"%s/lib/libvirt/qemu/save", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->snapshotDir,
|
|
|
|
"%s/lib/libvirt/qemu/snapshot", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->autoDumpPath,
|
|
|
|
"%s/lib/libvirt/qemu/dump", LOCALSTATEDIR) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
} else {
|
|
|
|
uid_t uid = geteuid();
|
|
|
|
char *userdir = virGetUserDirectory(uid);
|
|
|
|
if (!userdir)
|
|
|
|
goto error;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&qemu_driver->logDir,
|
|
|
|
"%s/.libvirt/qemu/log", userdir) == -1) {
|
|
|
|
VIR_FREE(userdir);
|
|
|
|
goto out_of_memory;
|
|
|
|
}
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&base, "%s/.libvirt", userdir) == -1) {
|
|
|
|
VIR_FREE(userdir);
|
|
|
|
goto out_of_memory;
|
|
|
|
}
|
|
|
|
VIR_FREE(userdir);
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (virAsprintf(&qemu_driver->stateDir, "%s/qemu/run", base) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->libDir, "%s/qemu/lib", base) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->cacheDir, "%s/qemu/cache", base) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->saveDir, "%s/qemu/save", base) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->snapshotDir, "%s/qemu/snapshot", base) == -1)
|
|
|
|
goto out_of_memory;
|
|
|
|
if (virAsprintf(&qemu_driver->autoDumpPath, "%s/qemu/dump", base) == -1)
|
|
|
|
goto out_of_memory;
|
2010-02-16 12:07:49 +00:00
|
|
|
}
|
2010-12-08 06:19:17 +00:00
|
|
|
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(qemu_driver->stateDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
VIR_ERROR(_("Failed to create state dir '%s': %s"),
|
|
|
|
qemu_driver->stateDir, virStrerror(errno, ebuf, sizeof ebuf));
|
|
|
|
goto error;
|
2010-12-08 06:19:17 +00:00
|
|
|
}
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(qemu_driver->libDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
VIR_ERROR(_("Failed to create lib dir '%s': %s"),
|
|
|
|
qemu_driver->libDir, virStrerror(errno, ebuf, sizeof ebuf));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(qemu_driver->cacheDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
VIR_ERROR(_("Failed to create cache dir '%s': %s"),
|
|
|
|
qemu_driver->cacheDir, virStrerror(errno, ebuf, sizeof ebuf));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(qemu_driver->saveDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
VIR_ERROR(_("Failed to create save dir '%s': %s"),
|
|
|
|
qemu_driver->saveDir, virStrerror(errno, ebuf, sizeof ebuf));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(qemu_driver->snapshotDir) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
VIR_ERROR(_("Failed to create save dir '%s': %s"),
|
|
|
|
qemu_driver->snapshotDir, virStrerror(errno, ebuf, sizeof ebuf));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(qemu_driver->autoDumpPath) < 0) {
|
2011-02-14 16:09:39 +00:00
|
|
|
char ebuf[1024];
|
|
|
|
VIR_ERROR(_("Failed to create dump dir '%s': %s"),
|
|
|
|
qemu_driver->autoDumpPath, virStrerror(errno, ebuf, sizeof ebuf));
|
|
|
|
goto error;
|
Add support for an explicit watchdog event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_WATCHDOG
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_WATCHDOG_NONE = 0,
VIR_DOMAIN_EVENT_WATCHDOG_PAUSE,
VIR_DOMAIN_EVENT_WATCHDOG_RESET,
VIR_DOMAIN_EVENT_WATCHDOG_POWEROFF,
VIR_DOMAIN_EVENT_WATCHDOG_SHUTDOWN,
VIR_DOMAIN_EVENT_WATCHDOG_DEBUG,
} virDomainEventWatchdogAction;
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventWatchdogCallback)(virConnectPtr conn,
virDomainPtr dom,
int action,
void *opaque);
* daemon/remote.c: Dispatch watchdog events to client
* examples/domain-events/events-c/event-test.c: Watch for
watchdog events
* include/libvirt/libvirt.h.in: Define new watchdg event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle watchdog events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for watchdogs and emit a libvirt watchdog event
* src/remote/remote_driver.c: Receive and dispatch watchdog
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
watchdog events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for WATCHDOG event
from QEMU monitor
2010-03-18 19:07:48 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Configuration paths are either ~/.libvirt/qemu/... (session) or
|
|
|
|
* /etc/libvirt/qemu/... (system).
|
|
|
|
*/
|
|
|
|
if (virAsprintf(&driverConf, "%s/qemu.conf", base) < 0 ||
|
|
|
|
virAsprintf(&qemu_driver->configDir, "%s/qemu", base) < 0 ||
|
|
|
|
virAsprintf(&qemu_driver->autostartDir, "%s/qemu/autostart", base) < 0)
|
|
|
|
goto out_of_memory;
|
Add support for an explicit IO error event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_IO_ERROR
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0,
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE,
VIR_DOMAIN_EVENT_IO_ERROR_REPORT,
} virDomainEventIOErrorAction;
In addition it has the source path of the disk that had the
error and its unique device alias. It does not include the
target device name (/dev/sda), since this would preclude
triggering IO errors from other file backed devices (eg
serial ports connected to a file)
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventIOErrorCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *srcPath,
const char *devAlias,
int action,
void *opaque);
This is currently wired up to the QEMU block IO error events
* daemon/remote.c: Dispatch IO error events to client
* examples/domain-events/events-c/event-test.c: Watch for
IO error events
* include/libvirt/libvirt.h.in: Define new IO error event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle IO error events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for block IO errors and emit a libvirt IO error event
* src/remote/remote_driver.c: Receive and dispatch IO error
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
IO error events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for BLOCK_IO_ERROR event
from QEMU monitor
2010-03-18 19:37:44 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(base);
|
Add support for an explicit IO error event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_IO_ERROR
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0,
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE,
VIR_DOMAIN_EVENT_IO_ERROR_REPORT,
} virDomainEventIOErrorAction;
In addition it has the source path of the disk that had the
error and its unique device alias. It does not include the
target device name (/dev/sda), since this would preclude
triggering IO errors from other file backed devices (eg
serial ports connected to a file)
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventIOErrorCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *srcPath,
const char *devAlias,
int action,
void *opaque);
This is currently wired up to the QEMU block IO error events
* daemon/remote.c: Dispatch IO error events to client
* examples/domain-events/events-c/event-test.c: Watch for
IO error events
* include/libvirt/libvirt.h.in: Define new IO error event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle IO error events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for block IO errors and emit a libvirt IO error event
* src/remote/remote_driver.c: Receive and dispatch IO error
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
IO error events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for BLOCK_IO_ERROR event
from QEMU monitor
2010-03-18 19:37:44 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
rc = virCgroupForDriver("qemu", &qemu_driver->cgroup, privileged, 1);
|
|
|
|
if (rc < 0) {
|
|
|
|
char buf[1024];
|
|
|
|
VIR_INFO("Unable to create cgroup for driver: %s",
|
|
|
|
virStrerror(-rc, buf, sizeof(buf)));
|
Add support for an explicit IO error event
This introduces a new event type
VIR_DOMAIN_EVENT_ID_IO_ERROR
This event includes the action that is about to be taken
as a result of the watchdog triggering
typedef enum {
VIR_DOMAIN_EVENT_IO_ERROR_NONE = 0,
VIR_DOMAIN_EVENT_IO_ERROR_PAUSE,
VIR_DOMAIN_EVENT_IO_ERROR_REPORT,
} virDomainEventIOErrorAction;
In addition it has the source path of the disk that had the
error and its unique device alias. It does not include the
target device name (/dev/sda), since this would preclude
triggering IO errors from other file backed devices (eg
serial ports connected to a file)
Thus there is a new callback definition for this event type
typedef void (*virConnectDomainEventIOErrorCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *srcPath,
const char *devAlias,
int action,
void *opaque);
This is currently wired up to the QEMU block IO error events
* daemon/remote.c: Dispatch IO error events to client
* examples/domain-events/events-c/event-test.c: Watch for
IO error events
* include/libvirt/libvirt.h.in: Define new IO error event ID
and callback signature
* src/conf/domain_event.c, src/conf/domain_event.h,
src/libvirt_private.syms: Extend API to handle IO error events
* src/qemu/qemu_driver.c: Connect to the QEMU monitor event
for block IO errors and emit a libvirt IO error event
* src/remote/remote_driver.c: Receive and dispatch IO error
events to application
* src/remote/remote_protocol.x: Wire protocol definition for
IO error events
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c: Watch for BLOCK_IO_ERROR event
from QEMU monitor
2010-03-18 19:37:44 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemudLoadDriverConfig(qemu_driver, driverConf) < 0) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(driverConf);
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
/* We should always at least have the 'nop' manager, so
|
|
|
|
* NULLs here are a fatal error
|
|
|
|
*/
|
|
|
|
if (!qemu_driver->lockManager) {
|
|
|
|
VIR_ERROR(_("Missing lock manager implementation"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuSecurityInit(qemu_driver) < 0)
|
|
|
|
goto error;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if ((qemu_driver->caps = qemuCreateCapabilities(NULL,
|
|
|
|
qemu_driver)) == NULL)
|
|
|
|
goto error;
|
2010-02-16 12:07:49 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if ((qemu_driver->activePciHostdevs = pciDeviceListNew()) == NULL)
|
Refactor the security drivers to simplify usage
The current security driver usage requires horrible code like
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityHostdevLabel &&
driver->securityDriver->domainSetSecurityHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
This pair of checks for NULL clutters up the code, making the driver
calls 2 lines longer than they really need to be. The goal of the
patchset is to change the calling convention to simply
if (virSecurityManagerSetHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
The first check for 'driver->securityDriver' being NULL is removed
by introducing a 'no op' security driver that will always be present
if no real driver is enabled. This guarentees driver->securityDriver
!= NULL.
The second check for 'driver->securityDriver->domainSetSecurityHostdevLabel'
being non-NULL is hidden in a new abstraction called virSecurityManager.
This separates the driver callbacks, from main internal API. The addition
of a virSecurityManager object, that is separate from the virSecurityDriver
struct also allows for security drivers to carry state / configuration
information directly. Thus the DAC/Stack drivers from src/qemu which
used to pull config from 'struct qemud_driver' can now be moved into
the 'src/security' directory and store their config directly.
* src/qemu/qemu_conf.h, src/qemu/qemu_driver.c: Update to
use new virSecurityManager APIs
* src/qemu/qemu_security_dac.c, src/qemu/qemu_security_dac.h
src/qemu/qemu_security_stacked.c, src/qemu/qemu_security_stacked.h:
Move into src/security directory
* src/security/security_stack.c, src/security/security_stack.h,
src/security/security_dac.c, src/security/security_dac.h: Generic
versions of previous QEMU specific drivers
* src/security/security_apparmor.c, src/security/security_apparmor.h,
src/security/security_driver.c, src/security/security_driver.h,
src/security/security_selinux.c, src/security/security_selinux.h:
Update to take virSecurityManagerPtr object as the first param
in all callbacks
* src/security/security_nop.c, src/security/security_nop.h: Stub
implementation of all security driver APIs.
* src/security/security_manager.h, src/security/security_manager.c:
New internal API for invoking security drivers
* src/libvirt.c: Add missing debug for security APIs
2010-11-17 20:26:30 +00:00
|
|
|
goto error;
|
2010-02-02 16:19:20 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (privileged) {
|
|
|
|
if (chown(qemu_driver->libDir, qemu_driver->user, qemu_driver->group) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to set ownership of '%s' to user %d:%d"),
|
|
|
|
qemu_driver->libDir, qemu_driver->user, qemu_driver->group);
|
|
|
|
goto error;
|
2007-06-26 20:41:25 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
if (chown(qemu_driver->cacheDir, qemu_driver->user, qemu_driver->group) < 0) {
|
2011-01-23 21:09:40 +00:00
|
|
|
virReportSystemError(errno,
|
2011-02-14 16:09:39 +00:00
|
|
|
_("unable to set ownership of '%s' to %d:%d"),
|
|
|
|
qemu_driver->cacheDir, qemu_driver->user, qemu_driver->group);
|
|
|
|
goto error;
|
2011-01-23 21:09:40 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
if (chown(qemu_driver->saveDir, qemu_driver->user, qemu_driver->group) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to set ownership of '%s' to %d:%d"),
|
|
|
|
qemu_driver->saveDir, qemu_driver->user, qemu_driver->group);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (chown(qemu_driver->snapshotDir, qemu_driver->user, qemu_driver->group) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to set ownership of '%s' to %d:%d"),
|
|
|
|
qemu_driver->snapshotDir, qemu_driver->user, qemu_driver->group);
|
|
|
|
goto error;
|
2007-06-26 20:41:25 +00:00
|
|
|
}
|
2008-07-11 19:34:11 +00:00
|
|
|
}
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* If hugetlbfs is present, then we need to create a sub-directory within
|
|
|
|
* it, since we can't assume the root mount point has permissions that
|
|
|
|
* will let our spawned QEMU instances use it.
|
|
|
|
*
|
|
|
|
* NB the check for '/', since user may config "" to disable hugepages
|
|
|
|
* even when mounted
|
2010-02-11 16:19:34 +00:00
|
|
|
*/
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemu_driver->hugetlbfs_mount &&
|
|
|
|
qemu_driver->hugetlbfs_mount[0] == '/') {
|
|
|
|
char *mempath = NULL;
|
|
|
|
if (virAsprintf(&mempath, "%s/libvirt/qemu", qemu_driver->hugetlbfs_mount) < 0)
|
|
|
|
goto out_of_memory;
|
2010-03-26 14:57:58 +00:00
|
|
|
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(mempath) < 0) {
|
|
|
|
virReportSystemError(errno,
|
2011-02-14 16:09:39 +00:00
|
|
|
_("unable to create hugepage path %s"), mempath);
|
|
|
|
VIR_FREE(mempath);
|
|
|
|
goto error;
|
2010-11-16 15:39:26 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemu_driver->privileged &&
|
|
|
|
chown(mempath, qemu_driver->user, qemu_driver->group) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to set ownership on %s to %d:%d"),
|
|
|
|
mempath, qemu_driver->user, qemu_driver->group);
|
|
|
|
VIR_FREE(mempath);
|
|
|
|
goto error;
|
2009-01-19 21:55:54 +00:00
|
|
|
}
|
2010-11-22 23:09:13 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemu_driver->hugepage_path = mempath;
|
2009-05-11 13:37:19 +00:00
|
|
|
}
|
2009-05-11 13:34:37 +00:00
|
|
|
|
2011-06-23 09:37:57 +00:00
|
|
|
if (qemuProcessAutoDestroyInit(qemu_driver) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Get all the running persistent or transient configs first */
|
|
|
|
if (virDomainLoadAllConfigs(qemu_driver->caps,
|
|
|
|
&qemu_driver->domains,
|
|
|
|
qemu_driver->stateDir,
|
|
|
|
NULL,
|
2011-07-11 17:29:09 +00:00
|
|
|
1, QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
NULL, NULL) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto error;
|
2009-04-03 14:10:17 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
conn = virConnectOpen(qemu_driver->privileged ?
|
|
|
|
"qemu:///system" :
|
|
|
|
"qemu:///session");
|
2009-10-13 14:27:58 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuProcessReconnectAll(conn, qemu_driver);
|
2009-10-13 14:27:58 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Then inactive persistent configs */
|
|
|
|
if (virDomainLoadAllConfigs(qemu_driver->caps,
|
|
|
|
&qemu_driver->domains,
|
|
|
|
qemu_driver->configDir,
|
|
|
|
qemu_driver->autostartDir,
|
2011-07-11 17:29:09 +00:00
|
|
|
0, QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
NULL, NULL) < 0)
|
2011-02-14 16:09:39 +00:00
|
|
|
goto error;
|
2009-10-13 14:27:58 +00:00
|
|
|
|
Detect PCI addresses at QEMU startup
Hotunplug of devices requires that we know their PCI address. Even
hotplug of SCSI drives, required that we know the PCI address of
the SCSI controller to attach the drive to. We can find this out
by running 'info pci' and then correlating the vendor/product IDs
with the devices we booted with.
Although this approach is somewhat fragile, it is the only viable
option with QEMU < 0.12, since there is no way for libvirto set
explicit PCI addresses when creating devices in the first place.
For QEMU > 0.12, this code will not be used.
* src/qemu/qemu_driver.c: Assign all dynamic PCI addresses on
startup of QEMU VM, matching vendor/product IDs
* src/qemu/qemu_monitor.c, src/qemu/qemu_monitor.h,
src/qemu/qemu_monitor_json.c, src/qemu/qemu_monitor_json.h,
src/qemu/qemu_monitor_text.c, src/qemu/qemu_monitor_text.h: Add
API for fetching PCI device address mapping
2009-12-09 21:59:04 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virHashForEach(qemu_driver->domains.objs, qemuDomainSnapshotLoad,
|
|
|
|
qemu_driver->snapshotDir);
|
2009-11-26 13:51:42 +00:00
|
|
|
|
2011-08-12 12:04:31 +00:00
|
|
|
qemu_driver->workerPool = virThreadPoolNew(0, 1, 0, processWatchdogEvent, qemu_driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!qemu_driver->workerPool)
|
|
|
|
goto error;
|
2010-11-19 18:36:29 +00:00
|
|
|
|
2011-04-06 07:53:11 +00:00
|
|
|
qemuDriverUnlock(qemu_driver);
|
|
|
|
|
|
|
|
qemuAutostartDomains(qemu_driver);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (conn)
|
|
|
|
virConnectClose(conn);
|
2010-03-17 21:21:03 +00:00
|
|
|
|
2009-10-13 14:27:58 +00:00
|
|
|
return 0;
|
2009-04-03 14:10:17 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
out_of_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
error:
|
|
|
|
if (qemu_driver)
|
|
|
|
qemuDriverUnlock(qemu_driver);
|
|
|
|
if (conn)
|
|
|
|
virConnectClose(conn);
|
|
|
|
VIR_FREE(base);
|
|
|
|
VIR_FREE(driverConf);
|
|
|
|
qemudShutdown();
|
2009-10-13 14:27:58 +00:00
|
|
|
return -1;
|
2007-06-26 20:41:25 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static void qemudNotifyLoadDomain(virDomainObjPtr vm, int newVM, void *opaque)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = opaque;
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (newVM) {
|
|
|
|
virDomainEventPtr event =
|
|
|
|
virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED,
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_ADDED);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-02-02 18:16:41 +00:00
|
|
|
}
|
2011-02-14 16:09:39 +00:00
|
|
|
}
|
2011-02-02 18:16:41 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/**
|
|
|
|
* qemudReload:
|
|
|
|
*
|
|
|
|
* Function to restart the QEmu daemon, it will recheck the configuration
|
|
|
|
* files and update its state and the networking
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemudReload(void) {
|
|
|
|
if (!qemu_driver)
|
|
|
|
return 0;
|
2010-11-16 15:39:26 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDriverLock(qemu_driver);
|
|
|
|
virDomainLoadAllConfigs(qemu_driver->caps,
|
|
|
|
&qemu_driver->domains,
|
|
|
|
qemu_driver->configDir,
|
|
|
|
qemu_driver->autostartDir,
|
2011-07-11 17:29:09 +00:00
|
|
|
0, QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
qemudNotifyLoadDomain, qemu_driver);
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDriverUnlock(qemu_driver);
|
2010-11-16 15:39:26 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuAutostartDomains(qemu_driver);
|
2010-01-09 02:07:37 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-03-25 17:46:08 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/**
|
|
|
|
* qemudActive:
|
|
|
|
*
|
|
|
|
* Checks if the QEmu daemon is active, i.e. has an active domain or
|
|
|
|
* an active network
|
|
|
|
*
|
|
|
|
* Returns 1 if active, 0 otherwise
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemudActive(void) {
|
|
|
|
int active = 0;
|
2009-11-03 22:41:23 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!qemu_driver)
|
|
|
|
return 0;
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* XXX having to iterate here is not great because it requires many locks */
|
|
|
|
qemuDriverLock(qemu_driver);
|
|
|
|
active = virDomainObjListNumOfDomains(&qemu_driver->domains, 1);
|
|
|
|
qemuDriverUnlock(qemu_driver);
|
|
|
|
return active;
|
|
|
|
}
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/**
|
|
|
|
* qemudShutdown:
|
|
|
|
*
|
|
|
|
* Shutdown the QEmu daemon, it will stop all active domains and networks
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemudShutdown(void) {
|
|
|
|
int i;
|
2009-08-26 14:38:32 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (!qemu_driver)
|
|
|
|
return -1;
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDriverLock(qemu_driver);
|
|
|
|
pciDeviceListFree(qemu_driver->activePciHostdevs);
|
|
|
|
virCapabilitiesFree(qemu_driver->caps);
|
2010-03-26 14:57:58 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virDomainObjListDeinit(&qemu_driver->domains);
|
|
|
|
virBitmapFree(qemu_driver->reservedVNCPorts);
|
2010-03-26 14:57:58 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virSysinfoDefFree(qemu_driver->hostsysinfo);
|
2009-03-03 12:03:44 +00:00
|
|
|
|
2011-06-23 09:37:57 +00:00
|
|
|
qemuProcessAutoDestroyShutdown(qemu_driver);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(qemu_driver->configDir);
|
|
|
|
VIR_FREE(qemu_driver->autostartDir);
|
|
|
|
VIR_FREE(qemu_driver->logDir);
|
|
|
|
VIR_FREE(qemu_driver->stateDir);
|
|
|
|
VIR_FREE(qemu_driver->libDir);
|
|
|
|
VIR_FREE(qemu_driver->cacheDir);
|
|
|
|
VIR_FREE(qemu_driver->saveDir);
|
|
|
|
VIR_FREE(qemu_driver->snapshotDir);
|
2011-08-12 16:12:47 +00:00
|
|
|
VIR_FREE(qemu_driver->qemuImgBinary);
|
2011-02-14 16:09:39 +00:00
|
|
|
VIR_FREE(qemu_driver->autoDumpPath);
|
|
|
|
VIR_FREE(qemu_driver->vncTLSx509certdir);
|
|
|
|
VIR_FREE(qemu_driver->vncListen);
|
|
|
|
VIR_FREE(qemu_driver->vncPassword);
|
|
|
|
VIR_FREE(qemu_driver->vncSASLdir);
|
|
|
|
VIR_FREE(qemu_driver->spiceTLSx509certdir);
|
|
|
|
VIR_FREE(qemu_driver->spiceListen);
|
|
|
|
VIR_FREE(qemu_driver->spicePassword);
|
|
|
|
VIR_FREE(qemu_driver->hugetlbfs_mount);
|
|
|
|
VIR_FREE(qemu_driver->hugepage_path);
|
|
|
|
VIR_FREE(qemu_driver->saveImageFormat);
|
|
|
|
VIR_FREE(qemu_driver->dumpImageFormat);
|
2009-03-03 16:53:13 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virSecurityManagerFree(qemu_driver->securityManager);
|
2010-01-06 10:35:30 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
ebtablesContextFree(qemu_driver->ebtables);
|
2009-08-14 07:31:11 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemu_driver->cgroupDeviceACL) {
|
|
|
|
for (i = 0 ; qemu_driver->cgroupDeviceACL[i] != NULL ; i++)
|
|
|
|
VIR_FREE(qemu_driver->cgroupDeviceACL[i]);
|
|
|
|
VIR_FREE(qemu_driver->cgroupDeviceACL);
|
2010-02-18 14:13:48 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
/* Free domain callback list */
|
2011-05-12 12:54:07 +00:00
|
|
|
virDomainEventStateFree(qemu_driver->domainEventState);
|
2009-04-21 19:14:50 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemu_driver->brctl)
|
|
|
|
brShutdown(qemu_driver->brctl);
|
2010-05-21 13:52:09 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
virCgroupFree(&qemu_driver->cgroup);
|
2007-06-26 20:41:25 +00:00
|
|
|
|
2010-10-26 14:04:46 +00:00
|
|
|
virLockManagerPluginUnref(qemu_driver->lockManager);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuDriverUnlock(qemu_driver);
|
|
|
|
virMutexDestroy(&qemu_driver->lock);
|
|
|
|
virThreadPoolFree(qemu_driver->workerPool);
|
|
|
|
VIR_FREE(qemu_driver);
|
2010-01-09 02:07:37 +00:00
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
return 0;
|
2007-06-26 20:41:25 +00:00
|
|
|
}
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static virDrvOpenStatus qemudOpen(virConnectPtr conn,
|
2007-12-05 18:28:05 +00:00
|
|
|
virConnectAuthPtr auth ATTRIBUTE_UNUSED,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2011-07-06 20:40:19 +00:00
|
|
|
{
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(VIR_CONNECT_RO, VIR_DRV_OPEN_ERROR);
|
|
|
|
|
2008-11-17 11:44:51 +00:00
|
|
|
if (conn->uri == NULL) {
|
2009-06-12 12:06:15 +00:00
|
|
|
if (qemu_driver == NULL)
|
|
|
|
return VIR_DRV_OPEN_DECLINED;
|
|
|
|
|
2009-06-12 13:20:13 +00:00
|
|
|
conn->uri = xmlParseURI(qemu_driver->privileged ?
|
2009-06-12 12:06:15 +00:00
|
|
|
"qemu:///system" :
|
|
|
|
"qemu:///session");
|
2008-11-17 11:44:51 +00:00
|
|
|
if (!conn->uri) {
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
2008-11-17 11:44:51 +00:00
|
|
|
return VIR_DRV_OPEN_ERROR;
|
|
|
|
}
|
2009-06-12 12:06:15 +00:00
|
|
|
} else {
|
|
|
|
/* If URI isn't 'qemu' its definitely not for us */
|
|
|
|
if (conn->uri->scheme == NULL ||
|
|
|
|
STRNEQ(conn->uri->scheme, "qemu"))
|
|
|
|
return VIR_DRV_OPEN_DECLINED;
|
|
|
|
|
|
|
|
/* Allow remote driver to deal with URIs with hostname server */
|
|
|
|
if (conn->uri->server != NULL)
|
|
|
|
return VIR_DRV_OPEN_DECLINED;
|
|
|
|
|
2009-07-08 21:59:22 +00:00
|
|
|
if (qemu_driver == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("qemu state driver is not active"));
|
2009-07-08 21:59:22 +00:00
|
|
|
return VIR_DRV_OPEN_ERROR;
|
|
|
|
}
|
|
|
|
|
2009-12-10 16:39:07 +00:00
|
|
|
if (conn->uri->path == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no QEMU URI path given, try %s"),
|
|
|
|
qemu_driver->privileged
|
|
|
|
? "qemu:///system"
|
|
|
|
: "qemu:///session");
|
2009-12-10 16:39:07 +00:00
|
|
|
return VIR_DRV_OPEN_ERROR;
|
|
|
|
}
|
|
|
|
|
2009-06-12 13:20:13 +00:00
|
|
|
if (qemu_driver->privileged) {
|
2009-06-12 12:06:15 +00:00
|
|
|
if (STRNEQ (conn->uri->path, "/system") &&
|
|
|
|
STRNEQ (conn->uri->path, "/session")) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unexpected QEMU URI path '%s', try qemu:///system"),
|
|
|
|
conn->uri->path);
|
2009-06-12 12:06:15 +00:00
|
|
|
return VIR_DRV_OPEN_ERROR;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (STRNEQ (conn->uri->path, "/session")) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unexpected QEMU URI path '%s', try qemu:///session"),
|
|
|
|
conn->uri->path);
|
2009-06-12 12:06:15 +00:00
|
|
|
return VIR_DRV_OPEN_ERROR;
|
|
|
|
}
|
|
|
|
}
|
2007-06-26 22:39:53 +00:00
|
|
|
}
|
|
|
|
conn->privateData = qemu_driver;
|
|
|
|
|
|
|
|
return VIR_DRV_OPEN_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemudClose(virConnectPtr conn) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
2008-11-21 10:17:22 +00:00
|
|
|
|
|
|
|
/* Get rid of callbacks registered for this conn */
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-05-12 12:54:07 +00:00
|
|
|
virDomainEventCallbackListRemoveConn(conn,
|
|
|
|
driver->domainEventState->callbacks);
|
2011-06-23 09:37:57 +00:00
|
|
|
qemuProcessAutoDestroyRun(driver, conn);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-06-26 22:39:53 +00:00
|
|
|
|
|
|
|
conn->privateData = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-14 08:42:47 +00:00
|
|
|
/* Which features are supported by this driver? */
|
|
|
|
static int
|
|
|
|
qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature)
|
|
|
|
{
|
|
|
|
switch (feature) {
|
2009-09-17 17:10:04 +00:00
|
|
|
case VIR_DRV_FEATURE_MIGRATION_V2:
|
2011-02-03 11:09:28 +00:00
|
|
|
case VIR_DRV_FEATURE_MIGRATION_V3:
|
2009-09-17 17:10:04 +00:00
|
|
|
case VIR_DRV_FEATURE_MIGRATION_P2P:
|
2011-07-19 00:27:32 +00:00
|
|
|
case VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION:
|
2009-09-17 17:10:04 +00:00
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 0;
|
2008-11-14 08:42:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-26 22:39:53 +00:00
|
|
|
static const char *qemudGetType(virConnectPtr conn ATTRIBUTE_UNUSED) {
|
2007-07-04 03:59:13 +00:00
|
|
|
return "QEMU";
|
2007-06-26 22:39:53 +00:00
|
|
|
}
|
|
|
|
|
2008-09-17 14:07:49 +00:00
|
|
|
|
Implmentation of new APIs to checking state/persistence of objects
This implements the virConnectIsSecure, virConnectIsEncrypted,
virDomainIsPersistent, virDomainIsActive, virNetworkIsActive,
virNetworkIsPersistent, virStoragePoolIsActive,
virStoragePoolIsPersistent, virInterfaceIsActive APIs in
(nearly) all drivers. Exceptions are:
phyp: missing domainIsActive/Persistent
esx: missing domainIsPersistent
opennebula: missing domainIsActive/Persistent
* src/remote/remote_protocol.x: Define remote wire ABI for newly
added APIs.
* daemon/remote_dispatch*.h: Re-generated from remote_protocol.x
* src/esx/esx_driver.c, src/lxc/lxc_driver.c, src/network/bridge_driver.c,
src/opennebula/one_driver.c, src/openvz/openvz_conf.c,
src/openvz/openvz_driver.c, src/phyp/phyp_driver.c,
src/remote/remote_driver.c, src/storage/storage_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c, src/vbox/vbox_tmpl.c,
src/xen/xen_driver.c, src/xen/xen_driver.h, src/xen/xen_inotify.c,
src/xen/xen_inotify.h: Implement all the new APIs where possible
2009-10-20 14:12:03 +00:00
|
|
|
static int qemuIsSecure(virConnectPtr conn ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
/* Trivially secure, since always inside the daemon */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuIsEncrypted(virConnectPtr conn ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
/* Not encrypted, but remote driver takes care of that */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-09-17 14:07:49 +00:00
|
|
|
static int kvmGetMaxVCPUs(void) {
|
|
|
|
int maxvcpus = 1;
|
|
|
|
|
|
|
|
int r, fd;
|
2008-09-18 08:54:23 +00:00
|
|
|
|
2008-09-17 14:07:49 +00:00
|
|
|
fd = open(KVM_DEVICE, O_RDONLY);
|
|
|
|
if (fd < 0) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno, _("Unable to open %s"), KVM_DEVICE);
|
2009-02-05 16:11:25 +00:00
|
|
|
return -1;
|
2008-09-17 14:07:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
r = ioctl(fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
|
|
|
|
if (r > 0)
|
|
|
|
maxvcpus = r;
|
|
|
|
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2008-09-17 14:07:49 +00:00
|
|
|
return maxvcpus;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-07 23:16:04 +00:00
|
|
|
static char *
|
|
|
|
qemuGetSysinfo(virConnectPtr conn, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
|
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
|
|
|
if (!driver->hostsysinfo) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("Host SMBIOS information is not available"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return virSysinfoFormat(driver->hostsysinfo, "");
|
|
|
|
}
|
|
|
|
|
2010-02-09 18:15:41 +00:00
|
|
|
static int qemudGetMaxVCPUs(virConnectPtr conn ATTRIBUTE_UNUSED, const char *type) {
|
2007-06-26 22:39:53 +00:00
|
|
|
if (!type)
|
|
|
|
return 16;
|
|
|
|
|
2008-05-07 16:16:44 +00:00
|
|
|
if (STRCASEEQ(type, "qemu"))
|
2007-06-26 22:39:53 +00:00
|
|
|
return 16;
|
|
|
|
|
2008-05-07 16:16:44 +00:00
|
|
|
if (STRCASEEQ(type, "kvm"))
|
2008-09-17 14:07:49 +00:00
|
|
|
return kvmGetMaxVCPUs();
|
2007-06-26 22:39:53 +00:00
|
|
|
|
2008-05-07 16:16:44 +00:00
|
|
|
if (STRCASEEQ(type, "kqemu"))
|
2007-06-26 22:39:53 +00:00
|
|
|
return 1;
|
2008-05-07 16:16:44 +00:00
|
|
|
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("unknown type '%s'"), type);
|
2007-06-26 22:39:53 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2007-06-26 19:49:50 +00:00
|
|
|
|
2008-02-27 04:35:08 +00:00
|
|
|
static char *qemudGetCapabilities(virConnectPtr conn) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
2009-12-18 15:24:14 +00:00
|
|
|
virCapsPtr caps = NULL;
|
2009-05-06 14:20:34 +00:00
|
|
|
char *xml = NULL;
|
2007-06-26 19:49:50 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-10-09 19:13:29 +00:00
|
|
|
|
2010-01-18 16:24:25 +00:00
|
|
|
if ((caps = qemuCreateCapabilities(qemu_driver->caps,
|
2010-02-09 13:17:39 +00:00
|
|
|
qemu_driver)) == NULL) {
|
2010-01-18 16:24:25 +00:00
|
|
|
virCapabilitiesFree(caps);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2009-06-29 10:41:56 +00:00
|
|
|
|
2009-05-06 14:20:34 +00:00
|
|
|
virCapabilitiesFree(qemu_driver->caps);
|
2009-06-29 10:41:56 +00:00
|
|
|
qemu_driver->caps = caps;
|
|
|
|
|
|
|
|
if ((xml = virCapabilitiesFormatXML(driver->caps)) == NULL)
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
2009-06-29 10:41:56 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-06-26 19:49:50 +00:00
|
|
|
|
2008-02-27 04:35:08 +00:00
|
|
|
return xml;
|
2007-06-26 19:49:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-12-08 18:36:08 +00:00
|
|
|
static int
|
|
|
|
qemudGetProcessInfo(unsigned long long *cpuTime, int *lastCpu, int pid,
|
|
|
|
int tid)
|
|
|
|
{
|
|
|
|
char *proc;
|
2007-02-14 01:40:09 +00:00
|
|
|
FILE *pidinfo;
|
2007-04-15 19:58:44 +00:00
|
|
|
unsigned long long usertime, systime;
|
2009-07-27 15:30:25 +00:00
|
|
|
int cpu;
|
|
|
|
int ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2009-07-27 15:30:25 +00:00
|
|
|
if (tid)
|
2010-12-08 18:36:08 +00:00
|
|
|
ret = virAsprintf(&proc, "/proc/%d/task/%d/stat", pid, tid);
|
2009-07-27 15:30:25 +00:00
|
|
|
else
|
2010-12-08 18:36:08 +00:00
|
|
|
ret = virAsprintf(&proc, "/proc/%d/stat", pid);
|
|
|
|
if (ret < 0)
|
2007-02-14 01:40:09 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!(pidinfo = fopen(proc, "r"))) {
|
|
|
|
/* VM probably shut down, so fake 0 */
|
2009-07-27 15:30:25 +00:00
|
|
|
if (cpuTime)
|
|
|
|
*cpuTime = 0;
|
|
|
|
if (lastCpu)
|
|
|
|
*lastCpu = 0;
|
2010-12-08 18:36:08 +00:00
|
|
|
VIR_FREE(proc);
|
2007-02-14 01:40:09 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-12-08 18:36:08 +00:00
|
|
|
VIR_FREE(proc);
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2009-07-27 15:30:25 +00:00
|
|
|
/* See 'man proc' for information about what all these fields are. We're
|
|
|
|
* only interested in a very few of them */
|
|
|
|
if (fscanf(pidinfo,
|
|
|
|
/* pid -> stime */
|
|
|
|
"%*d %*s %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u %llu %llu"
|
|
|
|
/* cutime -> endcode */
|
|
|
|
"%*d %*d %*d %*d %*d %*u %*u %*d %*u %*u %*u %*u"
|
|
|
|
/* startstack -> processor */
|
|
|
|
"%*u %*u %*u %*u %*u %*u %*u %*u %*u %*u %*d %d",
|
|
|
|
&usertime, &systime, &cpu) != 3) {
|
2010-11-17 02:13:29 +00:00
|
|
|
VIR_FORCE_FCLOSE(pidinfo);
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_WARN("cannot parse process status data");
|
2009-07-27 15:30:25 +00:00
|
|
|
errno = -EINVAL;
|
2007-02-14 01:40:09 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We got jiffies
|
|
|
|
* We want nanoseconds
|
|
|
|
* _SC_CLK_TCK is jiffies per second
|
|
|
|
* So calulate thus....
|
|
|
|
*/
|
2009-07-27 15:30:25 +00:00
|
|
|
if (cpuTime)
|
|
|
|
*cpuTime = 1000ull * 1000ull * 1000ull * (usertime + systime) / (unsigned long long)sysconf(_SC_CLK_TCK);
|
|
|
|
if (lastCpu)
|
|
|
|
*lastCpu = cpu;
|
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2009-07-27 15:30:25 +00:00
|
|
|
VIR_DEBUG("Got status for %d/%d user=%llu sys=%llu cpu=%d",
|
|
|
|
pid, tid, usertime, systime, cpu);
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2010-11-17 02:13:29 +00:00
|
|
|
VIR_FORCE_FCLOSE(pidinfo);
|
2007-02-14 01:40:09 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static virDomainPtr qemudDomainLookupByID(virConnectPtr conn,
|
2008-07-11 19:34:11 +00:00
|
|
|
int id) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainPtr dom = NULL;
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByID(&driver->domains, id);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-06-26 22:39:53 +00:00
|
|
|
|
|
|
|
if (!vm) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching id %d"), id);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-06-26 22:39:53 +00:00
|
|
|
}
|
|
|
|
|
2007-06-26 23:48:46 +00:00
|
|
|
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
|
2008-07-11 19:34:11 +00:00
|
|
|
if (dom) dom->id = vm->def->id;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2007-06-26 22:39:53 +00:00
|
|
|
return dom;
|
|
|
|
}
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static virDomainPtr qemudDomainLookupByUUID(virConnectPtr conn,
|
2008-07-11 19:34:11 +00:00
|
|
|
const unsigned char *uuid) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainPtr dom = NULL;
|
2007-06-26 22:39:53 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2007-06-26 22:39:53 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-06-26 22:39:53 +00:00
|
|
|
}
|
|
|
|
|
2007-06-26 23:48:46 +00:00
|
|
|
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
|
2008-07-11 19:34:11 +00:00
|
|
|
if (dom) dom->id = vm->def->id;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2007-06-26 22:39:53 +00:00
|
|
|
return dom;
|
|
|
|
}
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static virDomainPtr qemudDomainLookupByName(virConnectPtr conn,
|
2008-07-11 19:34:11 +00:00
|
|
|
const char *name) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainPtr dom = NULL;
|
2007-06-26 22:39:53 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByName(&driver->domains, name);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2007-06-26 22:39:53 +00:00
|
|
|
if (!vm) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching name '%s'"), name);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-06-26 22:39:53 +00:00
|
|
|
}
|
|
|
|
|
2007-06-26 23:48:46 +00:00
|
|
|
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
|
2008-07-11 19:34:11 +00:00
|
|
|
if (dom) dom->id = vm->def->id;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2007-06-26 22:39:53 +00:00
|
|
|
return dom;
|
|
|
|
}
|
|
|
|
|
Implmentation of new APIs to checking state/persistence of objects
This implements the virConnectIsSecure, virConnectIsEncrypted,
virDomainIsPersistent, virDomainIsActive, virNetworkIsActive,
virNetworkIsPersistent, virStoragePoolIsActive,
virStoragePoolIsPersistent, virInterfaceIsActive APIs in
(nearly) all drivers. Exceptions are:
phyp: missing domainIsActive/Persistent
esx: missing domainIsPersistent
opennebula: missing domainIsActive/Persistent
* src/remote/remote_protocol.x: Define remote wire ABI for newly
added APIs.
* daemon/remote_dispatch*.h: Re-generated from remote_protocol.x
* src/esx/esx_driver.c, src/lxc/lxc_driver.c, src/network/bridge_driver.c,
src/opennebula/one_driver.c, src/openvz/openvz_conf.c,
src/openvz/openvz_driver.c, src/phyp/phyp_driver.c,
src/remote/remote_driver.c, src/storage/storage_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c, src/vbox/vbox_tmpl.c,
src/xen/xen_driver.c, src/xen/xen_driver.h, src/xen/xen_inotify.c,
src/xen/xen_inotify.h: Implement all the new APIs where possible
2009-10-20 14:12:03 +00:00
|
|
|
|
|
|
|
static int qemuDomainIsActive(virDomainPtr dom)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr obj;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
obj = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!obj) {
|
2011-02-14 23:51:59 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
Implmentation of new APIs to checking state/persistence of objects
This implements the virConnectIsSecure, virConnectIsEncrypted,
virDomainIsPersistent, virDomainIsActive, virNetworkIsActive,
virNetworkIsPersistent, virStoragePoolIsActive,
virStoragePoolIsPersistent, virInterfaceIsActive APIs in
(nearly) all drivers. Exceptions are:
phyp: missing domainIsActive/Persistent
esx: missing domainIsPersistent
opennebula: missing domainIsActive/Persistent
* src/remote/remote_protocol.x: Define remote wire ABI for newly
added APIs.
* daemon/remote_dispatch*.h: Re-generated from remote_protocol.x
* src/esx/esx_driver.c, src/lxc/lxc_driver.c, src/network/bridge_driver.c,
src/opennebula/one_driver.c, src/openvz/openvz_conf.c,
src/openvz/openvz_driver.c, src/phyp/phyp_driver.c,
src/remote/remote_driver.c, src/storage/storage_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c, src/vbox/vbox_tmpl.c,
src/xen/xen_driver.c, src/xen/xen_driver.h, src/xen/xen_inotify.c,
src/xen/xen_inotify.h: Implement all the new APIs where possible
2009-10-20 14:12:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
ret = virDomainObjIsActive(obj);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (obj)
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuDomainIsPersistent(virDomainPtr dom)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr obj;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
obj = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!obj) {
|
2011-02-14 23:51:59 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
Implmentation of new APIs to checking state/persistence of objects
This implements the virConnectIsSecure, virConnectIsEncrypted,
virDomainIsPersistent, virDomainIsActive, virNetworkIsActive,
virNetworkIsPersistent, virStoragePoolIsActive,
virStoragePoolIsPersistent, virInterfaceIsActive APIs in
(nearly) all drivers. Exceptions are:
phyp: missing domainIsActive/Persistent
esx: missing domainIsPersistent
opennebula: missing domainIsActive/Persistent
* src/remote/remote_protocol.x: Define remote wire ABI for newly
added APIs.
* daemon/remote_dispatch*.h: Re-generated from remote_protocol.x
* src/esx/esx_driver.c, src/lxc/lxc_driver.c, src/network/bridge_driver.c,
src/opennebula/one_driver.c, src/openvz/openvz_conf.c,
src/openvz/openvz_driver.c, src/phyp/phyp_driver.c,
src/remote/remote_driver.c, src/storage/storage_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c, src/vbox/vbox_tmpl.c,
src/xen/xen_driver.c, src/xen/xen_driver.h, src/xen/xen_inotify.c,
src/xen/xen_inotify.h: Implement all the new APIs where possible
2009-10-20 14:12:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
ret = obj->persistent;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (obj)
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-11-15 03:23:35 +00:00
|
|
|
static int qemuDomainIsUpdated(virDomainPtr dom)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr obj;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
obj = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!obj) {
|
2011-02-14 23:51:59 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2010-11-15 03:23:35 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
ret = obj->updated;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (obj)
|
|
|
|
virDomainObjUnlock(obj);
|
|
|
|
return ret;
|
|
|
|
}
|
Implmentation of new APIs to checking state/persistence of objects
This implements the virConnectIsSecure, virConnectIsEncrypted,
virDomainIsPersistent, virDomainIsActive, virNetworkIsActive,
virNetworkIsPersistent, virStoragePoolIsActive,
virStoragePoolIsPersistent, virInterfaceIsActive APIs in
(nearly) all drivers. Exceptions are:
phyp: missing domainIsActive/Persistent
esx: missing domainIsPersistent
opennebula: missing domainIsActive/Persistent
* src/remote/remote_protocol.x: Define remote wire ABI for newly
added APIs.
* daemon/remote_dispatch*.h: Re-generated from remote_protocol.x
* src/esx/esx_driver.c, src/lxc/lxc_driver.c, src/network/bridge_driver.c,
src/opennebula/one_driver.c, src/openvz/openvz_conf.c,
src/openvz/openvz_driver.c, src/phyp/phyp_driver.c,
src/remote/remote_driver.c, src/storage/storage_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c, src/vbox/vbox_tmpl.c,
src/xen/xen_driver.c, src/xen/xen_driver.h, src/xen/xen_inotify.c,
src/xen/xen_inotify.h: Implement all the new APIs where possible
2009-10-20 14:12:03 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudGetVersion(virConnectPtr conn, unsigned long *version) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2010-12-16 15:07:07 +00:00
|
|
|
if (qemuCapsExtractVersion(driver->caps, &driver->qemuVersion) < 0)
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 17:15:18 +00:00
|
|
|
|
2010-12-16 15:07:07 +00:00
|
|
|
*version = driver->qemuVersion;
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudListDomains(virConnectPtr conn, int *ids, int nids) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
int n;
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
n = virDomainObjListGetActiveIDs(&driver->domains, ids, nids);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-10-10 14:20:37 +00:00
|
|
|
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
return n;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudNumDomains(virConnectPtr conn) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
int n;
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
n = virDomainObjListNumOfDomains(&driver->domains, 1);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2008-07-11 19:34:11 +00:00
|
|
|
return n;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static virDomainPtr qemudDomainCreate(virConnectPtr conn, const char *xml,
|
2010-05-25 17:13:13 +00:00
|
|
|
unsigned int flags) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
2008-07-11 19:34:11 +00:00
|
|
|
virDomainDefPtr def;
|
2008-12-04 21:06:41 +00:00
|
|
|
virDomainObjPtr vm = NULL;
|
2008-12-04 21:04:30 +00:00
|
|
|
virDomainPtr dom = NULL;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2011-08-05 22:05:50 +00:00
|
|
|
virDomainEventPtr event2 = NULL;
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2011-06-23 10:41:57 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_START_PAUSED |
|
|
|
|
VIR_DOMAIN_START_AUTODESTROY, NULL);
|
2010-05-25 17:13:13 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2010-02-09 18:58:01 +00:00
|
|
|
if (!(def = virDomainDefParseString(driver->caps, xml,
|
2011-07-11 17:29:09 +00:00
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
2009-01-08 13:54:20 +00:00
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 08:48:02 +00:00
|
|
|
|
Refactor the security drivers to simplify usage
The current security driver usage requires horrible code like
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityHostdevLabel &&
driver->securityDriver->domainSetSecurityHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
This pair of checks for NULL clutters up the code, making the driver
calls 2 lines longer than they really need to be. The goal of the
patchset is to change the calling convention to simply
if (virSecurityManagerSetHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
The first check for 'driver->securityDriver' being NULL is removed
by introducing a 'no op' security driver that will always be present
if no real driver is enabled. This guarentees driver->securityDriver
!= NULL.
The second check for 'driver->securityDriver->domainSetSecurityHostdevLabel'
being non-NULL is hidden in a new abstraction called virSecurityManager.
This separates the driver callbacks, from main internal API. The addition
of a virSecurityManager object, that is separate from the virSecurityDriver
struct also allows for security drivers to carry state / configuration
information directly. Thus the DAC/Stack drivers from src/qemu which
used to pull config from 'struct qemud_driver' can now be moved into
the 'src/security' directory and store their config directly.
* src/qemu/qemu_conf.h, src/qemu/qemu_driver.c: Update to
use new virSecurityManager APIs
* src/qemu/qemu_security_dac.c, src/qemu/qemu_security_dac.h
src/qemu/qemu_security_stacked.c, src/qemu/qemu_security_stacked.h:
Move into src/security directory
* src/security/security_stack.c, src/security/security_stack.h,
src/security/security_dac.c, src/security/security_dac.h: Generic
versions of previous QEMU specific drivers
* src/security/security_apparmor.c, src/security/security_apparmor.h,
src/security/security_driver.c, src/security/security_driver.h,
src/security/security_selinux.c, src/security/security_selinux.h:
Update to take virSecurityManagerPtr object as the first param
in all callbacks
* src/security/security_nop.c, src/security/security_nop.h: Stub
implementation of all security driver APIs.
* src/security/security_manager.h, src/security/security_manager.c:
New internal API for invoking security drivers
* src/libvirt.c: Add missing debug for security APIs
2010-11-17 20:26:30 +00:00
|
|
|
if (virSecurityManagerVerify(driver->securityManager, def) < 0)
|
2009-04-03 10:55:51 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2009-11-02 18:37:38 +00:00
|
|
|
if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
|
|
|
|
goto cleanup;
|
2008-07-24 07:29:50 +00:00
|
|
|
|
2010-02-11 16:19:34 +00:00
|
|
|
if (qemudCanonicalizeMachine(driver, def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuDomainAssignPCIAddresses(def) < 0)
|
2010-02-11 16:19:34 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2010-02-09 18:58:01 +00:00
|
|
|
if (!(vm = virDomainAssignDef(driver->caps,
|
2008-07-11 19:34:11 +00:00
|
|
|
&driver->domains,
|
2010-03-24 14:31:21 +00:00
|
|
|
def, false)))
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
def = NULL;
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup; /* XXXX free the 'vm' we created ? */
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuProcessStart(conn, driver, vm, NULL,
|
|
|
|
(flags & VIR_DOMAIN_START_PAUSED) != 0,
|
2011-06-23 10:41:57 +00:00
|
|
|
(flags & VIR_DOMAIN_START_AUTODESTROY) != 0,
|
2011-08-25 20:44:48 +00:00
|
|
|
-1, NULL, NULL, VIR_VM_OP_CREATE) < 0) {
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "booted", false);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) > 0)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2008-12-04 21:06:41 +00:00
|
|
|
vm = NULL;
|
2010-04-27 20:16:02 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2008-12-04 21:09:20 +00:00
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_BOOTED);
|
2011-08-05 22:05:50 +00:00
|
|
|
if (event && (flags & VIR_DOMAIN_START_PAUSED)) {
|
|
|
|
/* There are two classes of event-watching clients - those
|
|
|
|
* that only care about on/off (and must see a started event
|
|
|
|
* no matter what, but don't care about suspend events), and
|
|
|
|
* those that also care about running/paused. To satisfy both
|
|
|
|
* client types, we have to send two events. */
|
|
|
|
event2 = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
|
|
|
}
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "booted", true);
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2007-06-26 23:48:46 +00:00
|
|
|
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
|
2008-07-11 19:34:11 +00:00
|
|
|
if (dom) dom->id = vm->def->id;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2009-12-08 14:42:43 +00:00
|
|
|
if (vm &&
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2011-08-05 22:05:50 +00:00
|
|
|
if (event) {
|
2008-12-04 21:09:20 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-08-05 22:05:50 +00:00
|
|
|
if (event2)
|
|
|
|
qemuDomainEventQueue(driver, event2);
|
|
|
|
}
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-06-26 22:39:53 +00:00
|
|
|
return dom;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudDomainSuspend(virDomainPtr dom) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2010-03-15 13:19:00 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-07-19 00:27:38 +00:00
|
|
|
virDomainPausedReason reason;
|
|
|
|
int eventDetail;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2010-03-15 13:19:00 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2010-03-15 13:19:00 +00:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
2011-07-19 00:27:38 +00:00
|
|
|
reason = VIR_DOMAIN_PAUSED_MIGRATION;
|
|
|
|
eventDetail = VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED;
|
2010-03-15 13:19:00 +00:00
|
|
|
} else {
|
2011-07-19 00:27:38 +00:00
|
|
|
reason = VIR_DOMAIN_PAUSED_USER;
|
|
|
|
eventDetail = VIR_DOMAIN_EVENT_SUSPENDED_PAUSED;
|
|
|
|
}
|
2010-03-15 13:19:00 +00:00
|
|
|
|
2011-07-19 00:27:38 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_SUSPEND) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_PAUSED) {
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuProcessStopCPUs(driver, vm, reason, QEMU_ASYNC_JOB_NONE) < 0) {
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-10-13 14:27:58 +00:00
|
|
|
}
|
2011-07-19 00:27:38 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
eventDetail);
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2011-07-19 00:27:38 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
goto endjob;
|
|
|
|
ret = 0;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:09:20 +00:00
|
|
|
|
2009-05-19 11:06:25 +00:00
|
|
|
if (event)
|
2008-12-04 21:09:20 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudDomainResume(virDomainPtr dom) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2011-05-04 09:07:01 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, dom->conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2009-07-21 09:53:17 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("resume operation failed"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2008-12-04 21:04:30 +00:00
|
|
|
}
|
2008-12-04 21:09:20 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_UNPAUSED);
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2010-02-09 18:58:01 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-05-19 11:06:25 +00:00
|
|
|
if (event)
|
2008-12-04 21:09:20 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
static int qemuDomainShutdown(virDomainPtr dom) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2008-01-09 16:05:21 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2008-01-09 16:05:21 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-01-09 16:05:21 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-07-31 14:50:46 +00:00
|
|
|
}
|
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
qemuDomainSetFakeReboot(driver, vm, false);
|
|
|
|
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
priv = vm->privateData;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2009-10-13 14:27:58 +00:00
|
|
|
ret = qemuMonitorSystemPowerdown(priv->mon);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-01-09 16:05:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
static int qemuDomainReboot(virDomainPtr dom, unsigned int flags) {
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2011-07-13 10:11:43 +00:00
|
|
|
#if HAVE_YAJL
|
2011-06-15 16:49:58 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-07-13 10:11:43 +00:00
|
|
|
#endif
|
2011-06-15 16:49:58 +00:00
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if HAVE_YAJL
|
2011-07-13 10:11:43 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-06-15 16:49:58 +00:00
|
|
|
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MONITOR_JSON)) {
|
2011-09-21 08:25:29 +00:00
|
|
|
if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_NO_SHUTDOWN)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("Reboot is not supported with this QEMU binary"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
ret = qemuMonitorSystemPowerdown(priv->mon);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-06-15 16:49:58 +00:00
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
if (ret == 0)
|
|
|
|
qemuDomainSetFakeReboot(driver, vm, true);
|
2011-06-15 16:49:58 +00:00
|
|
|
|
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2011-06-15 16:49:58 +00:00
|
|
|
vm = NULL;
|
|
|
|
} else {
|
|
|
|
#endif
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
2011-06-15 16:49:58 +00:00
|
|
|
_("Reboot is not supported without the JSON monitor"));
|
|
|
|
#if HAVE_YAJL
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-09-29 08:54:44 +00:00
|
|
|
static int
|
|
|
|
qemuDomainReset(virDomainPtr dom, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
ret = qemuMonitorSystemReset(priv->mon);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
priv->fakeReboot = false;
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-08-22 20:26:52 +00:00
|
|
|
/* Count how many snapshots in a set have external disk snapshots. */
|
|
|
|
static void
|
|
|
|
qemuDomainSnapshotCountExternal(void *payload,
|
|
|
|
const void *name ATTRIBUTE_UNUSED,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
virDomainSnapshotObjPtr snap = payload;
|
|
|
|
int *count = data;
|
|
|
|
|
|
|
|
if (snap->def->state == VIR_DOMAIN_DISK_SNAPSHOT)
|
|
|
|
(*count)++;
|
|
|
|
}
|
|
|
|
|
2011-07-20 16:41:24 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDestroyFlags(virDomainPtr dom,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2007-02-23 08:41:23 +00:00
|
|
|
|
2011-07-20 16:41:24 +00:00
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2007-02-14 01:40:09 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2011-09-28 10:10:13 +00:00
|
|
|
qemuDomainSetFakeReboot(driver, vm, false);
|
2011-06-15 16:49:58 +00:00
|
|
|
|
2011-04-21 15:19:06 +00:00
|
|
|
/* Although qemuProcessStop does this already, there may
|
|
|
|
* be an outstanding job active. We want to make sure we
|
|
|
|
* can kill the process even if a job is active. Killing
|
|
|
|
* it now means the job will be released
|
|
|
|
*/
|
2011-09-13 16:11:26 +00:00
|
|
|
qemuProcessKill(vm, false);
|
2011-04-21 15:19:06 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_DESTROY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-05-08 10:11:14 +00:00
|
|
|
}
|
2007-02-23 08:41:23 +00:00
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_DESTROYED);
|
2008-12-04 21:09:20 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "destroyed");
|
2010-10-26 16:34:16 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
if (!vm->persistent) {
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) > 0)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2008-12-04 21:06:41 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2009-12-08 14:42:43 +00:00
|
|
|
if (vm &&
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:09:20 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2011-07-20 16:41:24 +00:00
|
|
|
static int
|
|
|
|
qemuDomainDestroy(virDomainPtr dom)
|
|
|
|
{
|
|
|
|
return qemuDomainDestroyFlags(dom, 0);
|
|
|
|
}
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2007-06-26 22:39:53 +00:00
|
|
|
static char *qemudDomainGetOSType(virDomainPtr dom) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
char *type = NULL;
|
2007-06-26 22:39:53 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-06-26 22:39:53 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-06-26 22:39:53 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (!(type = strdup(vm->def->os.type)))
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2007-06-26 22:39:53 +00:00
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
2008-03-19 14:32:50 +00:00
|
|
|
/* Returns max memory in kb, 0 if error */
|
|
|
|
static unsigned long qemudDomainGetMaxMemory(virDomainPtr dom) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
unsigned long ret = 0;
|
2008-03-19 14:32:50 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2008-03-19 14:32:50 +00:00
|
|
|
if (!vm) {
|
2008-07-25 09:01:25 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-03-19 14:32:50 +00:00
|
|
|
}
|
|
|
|
|
2010-10-12 14:43:39 +00:00
|
|
|
ret = vm->def->mem.max_balloon;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-03-19 14:32:50 +00:00
|
|
|
}
|
|
|
|
|
2011-03-02 08:13:09 +00:00
|
|
|
static int qemudDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem,
|
|
|
|
unsigned int flags) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
2010-02-09 21:25:06 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2008-12-04 21:04:30 +00:00
|
|
|
virDomainObjPtr vm;
|
2011-03-02 08:13:09 +00:00
|
|
|
virDomainDefPtr persistentDef = NULL;
|
2010-02-09 21:25:06 +00:00
|
|
|
int ret = -1, r;
|
2011-03-23 05:48:24 +00:00
|
|
|
bool isActive;
|
2008-03-19 14:32:50 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG |
|
2011-04-08 05:08:13 +00:00
|
|
|
VIR_DOMAIN_MEM_MAXIMUM, -1);
|
2011-03-02 08:13:09 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-03-19 14:32:50 +00:00
|
|
|
if (!vm) {
|
2008-07-25 09:01:25 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-03-19 14:32:50 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-23 05:48:24 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
2011-03-23 05:48:24 +00:00
|
|
|
if (isActive)
|
2011-06-08 06:33:33 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
2011-03-23 05:48:24 +00:00
|
|
|
else
|
2011-06-08 06:33:33 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-03-23 05:48:24 +00:00
|
|
|
}
|
2011-04-08 05:08:13 +00:00
|
|
|
if (flags == VIR_DOMAIN_MEM_MAXIMUM) {
|
|
|
|
if (isActive)
|
2011-06-08 06:33:33 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_MEM_MAXIMUM;
|
2011-04-08 05:08:13 +00:00
|
|
|
else
|
2011-06-08 06:33:33 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG | VIR_DOMAIN_MEM_MAXIMUM;
|
2011-04-08 05:08:13 +00:00
|
|
|
}
|
2011-03-23 05:48:24 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (!isActive && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
|
2010-10-26 15:31:19 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-03-02 08:13:09 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto endjob;
|
|
|
|
}
|
2009-10-07 13:33:36 +00:00
|
|
|
|
2011-04-08 05:08:13 +00:00
|
|
|
if (flags & VIR_DOMAIN_MEM_MAXIMUM) {
|
|
|
|
/* resize the maximum memory */
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-04-08 05:08:13 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot resize the maximum memory on an "
|
|
|
|
"active domain"));
|
2011-03-02 08:13:09 +00:00
|
|
|
goto endjob;
|
2011-04-08 05:08:13 +00:00
|
|
|
}
|
2011-03-02 08:13:09 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-03 19:17:04 +00:00
|
|
|
/* Help clang 2.8 decipher the logic flow. */
|
|
|
|
sa_assert(persistentDef);
|
2011-04-08 05:08:13 +00:00
|
|
|
persistentDef->mem.max_balloon = newmem;
|
|
|
|
if (persistentDef->mem.cur_balloon > newmem)
|
|
|
|
persistentDef->mem.cur_balloon = newmem;
|
|
|
|
ret = virDomainSaveConfig(driver->configDir, persistentDef);
|
2011-03-02 08:13:09 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-04-08 05:08:13 +00:00
|
|
|
} else {
|
|
|
|
/* resize the current memory */
|
|
|
|
|
|
|
|
if (newmem > vm->def->mem.max_balloon) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("cannot set memory higher than max memory"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-04-08 05:08:13 +00:00
|
|
|
priv = vm->privateData;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-04-08 05:08:13 +00:00
|
|
|
r = qemuMonitorSetBalloon(priv->mon, newmem);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditMemory(vm, vm->def->mem.cur_balloon, newmem, "update",
|
|
|
|
r == 1);
|
2011-04-08 05:08:13 +00:00
|
|
|
if (r < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
/* Lack of balloon support is a fatal error */
|
|
|
|
if (r == 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot set memory of an active domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-03 19:17:04 +00:00
|
|
|
sa_assert(persistentDef);
|
2011-04-08 05:08:13 +00:00
|
|
|
persistentDef->mem.cur_balloon = newmem;
|
|
|
|
ret = virDomainSaveConfig(driver->configDir, persistentDef);
|
|
|
|
goto endjob;
|
|
|
|
}
|
2009-03-27 11:44:29 +00:00
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2010-02-09 21:25:06 +00:00
|
|
|
ret = 0;
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-03-19 14:32:50 +00:00
|
|
|
}
|
|
|
|
|
2011-04-08 05:08:13 +00:00
|
|
|
static int qemudDomainSetMemory(virDomainPtr dom, unsigned long newmem)
|
|
|
|
{
|
2011-06-08 06:33:33 +00:00
|
|
|
return qemudDomainSetMemoryFlags(dom, newmem, VIR_DOMAIN_AFFECT_LIVE);
|
2011-03-02 08:13:09 +00:00
|
|
|
}
|
|
|
|
|
2011-04-08 05:08:13 +00:00
|
|
|
static int qemudDomainSetMaxMemory(virDomainPtr dom, unsigned long memory)
|
|
|
|
{
|
|
|
|
return qemudDomainSetMemoryFlags(dom, memory, VIR_DOMAIN_MEM_MAXIMUM);
|
|
|
|
}
|
|
|
|
|
2011-05-10 08:26:06 +00:00
|
|
|
static int qemuDomainInjectNMI(virDomainPtr domain, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-05-10 08:26:06 +00:00
|
|
|
goto cleanup;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-05-10 08:26:06 +00:00
|
|
|
ret = qemuMonitorInjectNMI(priv->mon);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
2011-05-10 08:26:06 +00:00
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-07-21 07:55:56 +00:00
|
|
|
static int qemuDomainSendKey(virDomainPtr domain,
|
|
|
|
unsigned int codeset,
|
|
|
|
unsigned int holdtime,
|
|
|
|
unsigned int *keycodes,
|
|
|
|
int nkeycodes,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
2011-08-25 16:45:49 +00:00
|
|
|
/* translate the keycode to RFB for qemu driver */
|
|
|
|
if (codeset != VIR_KEYCODE_SET_RFB) {
|
2011-07-21 07:55:56 +00:00
|
|
|
int i;
|
|
|
|
int keycode;
|
|
|
|
|
|
|
|
for (i = 0; i < nkeycodes; i++) {
|
2011-08-25 16:45:49 +00:00
|
|
|
keycode = virKeycodeValueTranslate(codeset, VIR_KEYCODE_SET_RFB,
|
2011-07-21 07:55:56 +00:00
|
|
|
keycodes[i]);
|
|
|
|
if (keycode < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
2011-08-25 16:45:49 +00:00
|
|
|
_("cannot translate keycode %u of %s codeset to rfb keycode"),
|
2011-07-21 07:55:56 +00:00
|
|
|
keycodes[i],
|
|
|
|
virKeycodeSetTypeToString(codeset));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
keycodes[i] = keycode;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-07-21 07:55:56 +00:00
|
|
|
ret = qemuMonitorSendKey(priv->mon, holdtime, keycodes, nkeycodes);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudDomainGetInfo(virDomainPtr dom,
|
2011-04-08 05:08:13 +00:00
|
|
|
virDomainInfoPtr info)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2009-03-27 11:44:29 +00:00
|
|
|
int err;
|
|
|
|
unsigned long balloon;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-02-14 01:40:09 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
info->state = virDomainObjGetState(vm, NULL);
|
2007-02-14 01:40:09 +00:00
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2007-06-26 22:39:53 +00:00
|
|
|
info->cpuTime = 0;
|
2007-02-14 01:40:09 +00:00
|
|
|
} else {
|
2009-07-27 15:30:25 +00:00
|
|
|
if (qemudGetProcessInfo(&(info->cpuTime), NULL, vm->pid, 0) < 0) {
|
2011-01-28 03:39:44 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("cannot read cputime for domain"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-10-12 14:43:39 +00:00
|
|
|
info->maxMem = vm->def->mem.max_balloon;
|
2009-06-22 16:35:03 +00:00
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
2009-10-09 20:13:06 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2010-08-12 16:32:16 +00:00
|
|
|
|
|
|
|
if ((vm->def->memballoon != NULL) &&
|
|
|
|
(vm->def->memballoon->model == VIR_DOMAIN_MEMBALLOON_MODEL_NONE)) {
|
2010-10-12 14:43:39 +00:00
|
|
|
info->memory = vm->def->mem.max_balloon;
|
2011-09-29 13:14:13 +00:00
|
|
|
} else if (qemuDomainJobAllowed(priv, QEMU_JOB_QUERY)) {
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
2010-10-26 15:31:19 +00:00
|
|
|
if (!virDomainObjIsActive(vm))
|
|
|
|
err = 0;
|
|
|
|
else {
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2010-10-26 15:31:19 +00:00
|
|
|
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2010-10-26 15:31:19 +00:00
|
|
|
}
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
2010-10-26 15:31:19 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-10-05 14:07:36 +00:00
|
|
|
if (err < 0) {
|
|
|
|
/* We couldn't get current memory allocation but that's not
|
|
|
|
* a show stopper; we wouldn't get it if there was a job
|
|
|
|
* active either
|
|
|
|
*/
|
|
|
|
info->memory = vm->def->mem.cur_balloon;
|
|
|
|
} else if (err == 0) {
|
2009-11-03 18:26:32 +00:00
|
|
|
/* Balloon not supported, so maxmem is always the allocation */
|
2010-10-12 14:43:39 +00:00
|
|
|
info->memory = vm->def->mem.max_balloon;
|
2011-10-05 14:07:36 +00:00
|
|
|
} else {
|
2009-11-03 18:26:32 +00:00
|
|
|
info->memory = balloon;
|
2011-10-05 14:07:36 +00:00
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
} else {
|
2010-10-12 14:43:39 +00:00
|
|
|
info->memory = vm->def->mem.cur_balloon;
|
2009-11-03 18:26:32 +00:00
|
|
|
}
|
2009-06-22 16:35:03 +00:00
|
|
|
} else {
|
2010-10-12 14:43:39 +00:00
|
|
|
info->memory = vm->def->mem.cur_balloon;
|
2009-06-22 16:35:03 +00:00
|
|
|
}
|
|
|
|
|
2007-06-26 22:39:53 +00:00
|
|
|
info->nrVirtCpu = vm->def->vcpus;
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2011-05-02 09:35:29 +00:00
|
|
|
static int
|
|
|
|
qemuDomainGetState(virDomainPtr dom,
|
|
|
|
int *state,
|
|
|
|
int *reason,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
*state = virDomainObjGetState(vm, reason);
|
2011-05-02 09:35:29 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-31 16:34:20 +00:00
|
|
|
static int
|
|
|
|
qemuDomainGetControlInfo(virDomainPtr dom,
|
|
|
|
virDomainControlInfoPtr info,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
|
|
|
|
if (priv->monError) {
|
|
|
|
info->state = VIR_DOMAIN_CONTROL_ERROR;
|
2011-06-06 08:34:33 +00:00
|
|
|
} else if (priv->job.active) {
|
2011-05-31 16:34:20 +00:00
|
|
|
if (!priv->monStart) {
|
|
|
|
info->state = VIR_DOMAIN_CONTROL_JOB;
|
|
|
|
if (virTimeMs(&info->stateTime) < 0)
|
|
|
|
goto cleanup;
|
2011-06-06 08:34:33 +00:00
|
|
|
info->stateTime -= priv->job.start;
|
2011-05-31 16:34:20 +00:00
|
|
|
} else {
|
|
|
|
info->state = VIR_DOMAIN_CONTROL_OCCUPIED;
|
|
|
|
if (virTimeMs(&info->stateTime) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
info->stateTime -= priv->monStart;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
info->state = VIR_DOMAIN_CONTROL_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
#define QEMUD_SAVE_MAGIC "LibvirtQemudSave"
|
|
|
|
#define QEMUD_SAVE_PARTIAL "LibvirtQemudPart"
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
#define QEMUD_SAVE_VERSION 2
|
|
|
|
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
verify(sizeof(QEMUD_SAVE_MAGIC) == sizeof(QEMUD_SAVE_PARTIAL));
|
|
|
|
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
enum qemud_save_formats {
|
2009-09-08 18:52:37 +00:00
|
|
|
QEMUD_SAVE_FORMAT_RAW = 0,
|
|
|
|
QEMUD_SAVE_FORMAT_GZIP = 1,
|
|
|
|
QEMUD_SAVE_FORMAT_BZIP2 = 2,
|
2009-09-09 14:44:44 +00:00
|
|
|
/*
|
|
|
|
* Deprecated by xz and never used as part of a release
|
2009-09-10 09:13:33 +00:00
|
|
|
* QEMUD_SAVE_FORMAT_LZMA
|
2009-09-09 14:44:44 +00:00
|
|
|
*/
|
|
|
|
QEMUD_SAVE_FORMAT_XZ = 3,
|
2009-09-10 09:13:33 +00:00
|
|
|
QEMUD_SAVE_FORMAT_LZOP = 4,
|
2009-09-08 18:52:37 +00:00
|
|
|
/* Note: add new members only at the end.
|
|
|
|
These values are used in the on-disk format.
|
|
|
|
Do not change or re-use numbers. */
|
2009-09-09 08:10:38 +00:00
|
|
|
|
|
|
|
QEMUD_SAVE_FORMAT_LAST
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
};
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2009-09-09 08:10:38 +00:00
|
|
|
VIR_ENUM_DECL(qemudSaveCompression)
|
|
|
|
VIR_ENUM_IMPL(qemudSaveCompression, QEMUD_SAVE_FORMAT_LAST,
|
|
|
|
"raw",
|
|
|
|
"gzip",
|
|
|
|
"bzip2",
|
2009-09-10 09:13:33 +00:00
|
|
|
"xz",
|
|
|
|
"lzop")
|
2009-09-09 08:10:38 +00:00
|
|
|
|
2007-08-14 01:47:24 +00:00
|
|
|
struct qemud_save_header {
|
|
|
|
char magic[sizeof(QEMUD_SAVE_MAGIC)-1];
|
2011-04-14 18:48:03 +00:00
|
|
|
uint32_t version;
|
|
|
|
uint32_t xml_len;
|
|
|
|
uint32_t was_running;
|
|
|
|
uint32_t compressed;
|
|
|
|
uint32_t unused[15];
|
2007-08-14 01:47:24 +00:00
|
|
|
};
|
|
|
|
|
2011-04-14 18:48:03 +00:00
|
|
|
static inline void
|
|
|
|
bswap_header(struct qemud_save_header *hdr) {
|
|
|
|
hdr->version = bswap_32(hdr->version);
|
|
|
|
hdr->xml_len = bswap_32(hdr->xml_len);
|
|
|
|
hdr->was_running = bswap_32(hdr->was_running);
|
|
|
|
hdr->compressed = bswap_32(hdr->compressed);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-19 23:25:58 +00:00
|
|
|
/* return -errno on failure, or 0 on success */
|
2011-03-04 00:19:12 +00:00
|
|
|
static int
|
|
|
|
qemuDomainSaveHeader(int fd, const char *path, char *xml,
|
|
|
|
struct qemud_save_header *header)
|
|
|
|
{
|
2010-03-03 16:07:18 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2011-03-04 00:19:12 +00:00
|
|
|
if (safewrite(fd, header, sizeof(*header)) != sizeof(*header)) {
|
2010-07-19 23:25:58 +00:00
|
|
|
ret = -errno;
|
2010-03-03 16:07:18 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
2010-05-17 19:19:27 +00:00
|
|
|
_("failed to write header to domain save file '%s'"),
|
2011-03-04 00:19:12 +00:00
|
|
|
path);
|
2010-03-03 16:07:18 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-03-04 00:19:12 +00:00
|
|
|
if (safewrite(fd, xml, header->xml_len) != header->xml_len) {
|
2010-07-19 23:25:58 +00:00
|
|
|
ret = -errno;
|
2010-03-03 16:07:18 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
2011-03-04 00:19:12 +00:00
|
|
|
_("failed to write xml to '%s'"), path);
|
2010-03-03 16:07:18 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
endjob:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
/* Given a enum qemud_save_formats compression level, return the name
|
|
|
|
* of the program to run, or NULL if no program is needed. */
|
|
|
|
static const char *
|
|
|
|
qemuCompressProgramName(int compress)
|
|
|
|
{
|
|
|
|
return (compress == QEMUD_SAVE_FORMAT_RAW ? NULL :
|
|
|
|
qemudSaveCompressionTypeToString(compress));
|
|
|
|
}
|
|
|
|
|
2011-08-23 20:01:51 +00:00
|
|
|
/* Internal function to properly create or open existing files, with
|
|
|
|
* ownership affected by qemu driver setup. */
|
|
|
|
static int
|
|
|
|
qemuOpenFile(struct qemud_driver *driver, const char *path, int oflags,
|
|
|
|
bool *needUnlink, bool *bypassSecurityDriver)
|
|
|
|
{
|
|
|
|
struct stat sb;
|
|
|
|
bool is_reg = true;
|
|
|
|
bool need_unlink = false;
|
|
|
|
bool bypass_security = false;
|
|
|
|
int fd = -1;
|
|
|
|
uid_t uid = getuid();
|
|
|
|
gid_t gid = getgid();
|
|
|
|
|
|
|
|
/* path might be a pre-existing block dev, in which case
|
|
|
|
* we need to skip the create step, and also avoid unlink
|
|
|
|
* in the failure case */
|
|
|
|
if (oflags & O_CREAT) {
|
|
|
|
need_unlink = true;
|
|
|
|
if (stat(path, &sb) == 0) {
|
|
|
|
is_reg = !!S_ISREG(sb.st_mode);
|
|
|
|
/* If the path is regular file which exists
|
|
|
|
* already and dynamic_ownership is off, we don't
|
|
|
|
* want to change it's ownership, just open it as-is */
|
|
|
|
if (is_reg && !driver->dynamicOwnership) {
|
|
|
|
uid = sb.st_uid;
|
|
|
|
gid = sb.st_gid;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First try creating the file as root */
|
|
|
|
if (!is_reg) {
|
|
|
|
fd = open(path, oflags & ~O_CREAT);
|
|
|
|
if (fd < 0) {
|
|
|
|
virReportSystemError(errno, _("unable to open %s"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if ((fd = virFileOpenAs(path, oflags, S_IRUSR | S_IWUSR,
|
|
|
|
uid, gid, 0)) < 0) {
|
|
|
|
/* If we failed as root, and the error was permission-denied
|
|
|
|
(EACCES or EPERM), assume it's on a network-connected share
|
|
|
|
where root access is restricted (eg, root-squashed NFS). If the
|
|
|
|
qemu user (driver->user) is non-root, just set a flag to
|
|
|
|
bypass security driver shenanigans, and retry the operation
|
|
|
|
after doing setuid to qemu user */
|
|
|
|
if ((fd != -EACCES && fd != -EPERM) ||
|
|
|
|
driver->user == getuid()) {
|
|
|
|
virReportSystemError(-fd,
|
|
|
|
_("Failed to create file '%s'"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On Linux we can also verify the FS-type of the directory. */
|
|
|
|
switch (virStorageFileIsSharedFS(path)) {
|
|
|
|
case 1:
|
|
|
|
/* it was on a network share, so we'll continue
|
|
|
|
* as outlined above
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
case -1:
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Failed to create file "
|
|
|
|
"'%s': couldn't determine fs type"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
default:
|
|
|
|
/* local file - log the error returned by virFileOpenAs */
|
|
|
|
virReportSystemError(-fd,
|
|
|
|
_("Failed to create file '%s'"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Retry creating the file as driver->user */
|
|
|
|
|
|
|
|
if ((fd = virFileOpenAs(path, oflags,
|
|
|
|
S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP,
|
|
|
|
driver->user, driver->group,
|
|
|
|
VIR_FILE_OPEN_AS_UID)) < 0) {
|
|
|
|
virReportSystemError(-fd,
|
|
|
|
_("Error from child process creating '%s'"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Since we had to setuid to create the file, and the fstype
|
|
|
|
is NFS, we assume it's a root-squashing NFS share, and that
|
|
|
|
the security driver stuff would have failed anyway */
|
|
|
|
|
|
|
|
bypass_security = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cleanup:
|
|
|
|
if (needUnlink)
|
|
|
|
*needUnlink = need_unlink;
|
|
|
|
if (bypassSecurityDriver)
|
|
|
|
*bypassSecurityDriver = bypass_security;
|
|
|
|
|
|
|
|
return fd;
|
|
|
|
}
|
|
|
|
|
2011-02-17 03:18:47 +00:00
|
|
|
/* This internal function expects the driver lock to already be held on
|
2011-06-30 14:05:59 +00:00
|
|
|
* entry and the vm must be active + locked. Vm will be unlocked and
|
|
|
|
* potentially free'd after this returns (eg transient VMs are freed
|
|
|
|
* shutdown). So 'vm' must not be referenced by the caller after
|
|
|
|
* this returns (whether returning success or failure).
|
2011-02-17 03:18:47 +00:00
|
|
|
*/
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
static int
|
|
|
|
qemuDomainSaveInternal(struct qemud_driver *driver, virDomainPtr dom,
|
|
|
|
virDomainObjPtr vm, const char *path,
|
2011-08-27 13:12:32 +00:00
|
|
|
int compressed, const char *xmlin, unsigned int flags)
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
char *xml = NULL;
|
2007-08-14 01:47:24 +00:00
|
|
|
struct qemud_save_header header;
|
2011-03-10 00:35:13 +00:00
|
|
|
bool bypassSecurityDriver = false;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2009-11-11 12:07:00 +00:00
|
|
|
int rc;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2010-02-03 16:45:05 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-08-23 20:01:51 +00:00
|
|
|
bool needUnlink = false;
|
2011-07-18 23:27:59 +00:00
|
|
|
size_t len;
|
2010-04-21 13:06:37 +00:00
|
|
|
unsigned long long offset;
|
2011-07-18 23:27:59 +00:00
|
|
|
unsigned long long pad;
|
2011-03-03 19:23:19 +00:00
|
|
|
int fd = -1;
|
2011-07-11 18:07:50 +00:00
|
|
|
int directFlag = 0;
|
|
|
|
virFileDirectFdPtr directFd = NULL;
|
2011-08-27 13:12:32 +00:00
|
|
|
bool bypass_cache = flags & VIR_DOMAIN_SAVE_BYPASS_CACHE;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2011-08-26 23:29:18 +00:00
|
|
|
if (qemuProcessAutoDestroyActive(driver, vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is marked for auto destroy"));
|
2011-09-01 08:11:51 +00:00
|
|
|
goto cleanup;
|
2011-08-26 23:29:18 +00:00
|
|
|
}
|
|
|
|
|
2007-08-14 01:47:24 +00:00
|
|
|
memset(&header, 0, sizeof(header));
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
memcpy(header.magic, QEMUD_SAVE_PARTIAL, sizeof(header.magic));
|
2007-08-14 01:47:24 +00:00
|
|
|
header.version = QEMUD_SAVE_VERSION;
|
|
|
|
|
2010-04-01 15:57:32 +00:00
|
|
|
header.compressed = compressed;
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
|
2010-02-03 16:45:05 +00:00
|
|
|
priv = vm->privateData;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_SAVE) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-06-06 08:34:33 +00:00
|
|
|
memset(&priv->job.info, 0, sizeof(priv->job.info));
|
|
|
|
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
2010-02-03 16:45:05 +00:00
|
|
|
|
2007-08-14 01:47:24 +00:00
|
|
|
/* Pause */
|
2011-05-04 09:07:01 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2007-08-14 01:47:24 +00:00
|
|
|
header.was_running = 1;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
|
|
|
QEMU_ASYNC_JOB_SAVE) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2010-05-17 11:38:05 +00:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
2011-08-27 13:12:32 +00:00
|
|
|
/* libvirt.c already guaranteed these two flags are exclusive. */
|
|
|
|
if (flags & VIR_DOMAIN_SAVE_RUNNING)
|
|
|
|
header.was_running = 1;
|
|
|
|
else if (flags & VIR_DOMAIN_SAVE_PAUSED)
|
|
|
|
header.was_running = 0;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2011-07-18 23:27:59 +00:00
|
|
|
/* Get XML for the domain. Restore needs only the inactive xml,
|
|
|
|
* including secure. We should get the same result whether xmlin
|
|
|
|
* is NULL or whether it was the live xml of the domain moments
|
|
|
|
* before. */
|
|
|
|
if (xmlin) {
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
|
|
|
|
if (!(def = virDomainDefParseString(driver->caps, xmlin,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE))) {
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
if (!virDomainDefCheckABIStability(vm->def, def)) {
|
|
|
|
virDomainDefFree(def);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
xml = virDomainDefFormat(def, (VIR_DOMAIN_XML_INACTIVE |
|
|
|
|
VIR_DOMAIN_XML_SECURE));
|
|
|
|
} else {
|
|
|
|
xml = virDomainDefFormat(vm->def, (VIR_DOMAIN_XML_INACTIVE |
|
|
|
|
VIR_DOMAIN_XML_SECURE));
|
|
|
|
}
|
2007-08-14 01:47:24 +00:00
|
|
|
if (!xml) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("failed to get domain xml"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
2011-07-18 23:27:59 +00:00
|
|
|
len = strlen(xml) + 1;
|
|
|
|
offset = sizeof(header) + len;
|
|
|
|
|
|
|
|
/* Due to way we append QEMU state on our header with dd,
|
|
|
|
* we need to ensure there's a 512 byte boundary. Unfortunately
|
|
|
|
* we don't have an explicit offset in the header, so we fake
|
|
|
|
* it by padding the XML string with NUL bytes. Additionally,
|
|
|
|
* we want to ensure that virDomainSaveImageDefineXML can supply
|
|
|
|
* slightly larger XML, so we add a miminum padding prior to
|
|
|
|
* rounding out to page boundaries.
|
|
|
|
*/
|
|
|
|
pad = 1024;
|
|
|
|
pad += (QEMU_MONITOR_MIGRATE_TO_FILE_BS -
|
|
|
|
((offset + pad) % QEMU_MONITOR_MIGRATE_TO_FILE_BS));
|
|
|
|
if (VIR_EXPAND_N(xml, len, pad) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
offset += pad;
|
|
|
|
header.xml_len = len;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2011-07-18 23:27:59 +00:00
|
|
|
/* Obtain the file handle. */
|
2011-07-11 18:07:50 +00:00
|
|
|
if (bypass_cache) {
|
|
|
|
directFlag = virFileDirectFdFlag();
|
|
|
|
if (directFlag < 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("bypass cache unsupported by this system"));
|
2011-08-23 20:01:51 +00:00
|
|
|
goto cleanup;
|
2010-04-21 13:06:37 +00:00
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
}
|
2011-08-23 20:01:51 +00:00
|
|
|
fd = qemuOpenFile(driver, path, O_WRONLY | O_TRUNC | O_CREAT | directFlag,
|
|
|
|
&needUnlink, &bypassSecurityDriver);
|
|
|
|
if (fd < 0)
|
|
|
|
goto endjob;
|
2011-07-11 18:07:50 +00:00
|
|
|
if (bypass_cache && (directFd = virFileDirectFdNew(&fd, path)) == NULL)
|
|
|
|
goto endjob;
|
|
|
|
|
2011-03-03 19:23:19 +00:00
|
|
|
/* Write header to file, followed by XML */
|
2011-03-04 00:19:12 +00:00
|
|
|
if (qemuDomainSaveHeader(fd, path, xml, &header) < 0) {
|
2011-03-03 19:23:19 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
/* Perform the migration */
|
2011-05-04 12:12:57 +00:00
|
|
|
if (qemuMigrationToFile(driver, vm, fd, offset, path,
|
2011-03-10 00:35:13 +00:00
|
|
|
qemuCompressProgramName(compressed),
|
2011-08-23 20:01:51 +00:00
|
|
|
bypassSecurityDriver,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
QEMU_ASYNC_JOB_SAVE) < 0)
|
2011-03-28 22:59:15 +00:00
|
|
|
goto endjob;
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
|
|
|
|
/* Touch up file header to mark image complete. */
|
|
|
|
if (bypass_cache) {
|
|
|
|
/* Reopen the file to touch up the header, since we aren't set
|
|
|
|
* up to seek backwards on directFd. The reopened fd will
|
|
|
|
* trigger a single page of file system cache pollution, but
|
|
|
|
* that's acceptable. */
|
|
|
|
if (VIR_CLOSE(fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("unable to close %s"), path);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
if (virFileDirectFdClose(directFd) < 0)
|
|
|
|
goto endjob;
|
|
|
|
fd = qemuOpenFile(driver, path, O_WRONLY, NULL, NULL);
|
|
|
|
if (fd < 0)
|
|
|
|
goto endjob;
|
|
|
|
} else {
|
|
|
|
if (lseek(fd, 0, SEEK_SET) != 0) {
|
|
|
|
virReportSystemError(errno, _("unable to seek %s"), path);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memcpy(header.magic, QEMUD_SAVE_MAGIC, sizeof(header.magic));
|
|
|
|
if (safewrite(fd, &header, sizeof(header)) != sizeof(header)) {
|
|
|
|
virReportSystemError(errno, _("unable to write %s"), path);
|
|
|
|
goto endjob;
|
|
|
|
}
|
2011-03-03 19:23:19 +00:00
|
|
|
if (VIR_CLOSE(fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("unable to close %s"), path);
|
2011-03-28 22:59:15 +00:00
|
|
|
goto endjob;
|
2010-04-21 15:44:40 +00:00
|
|
|
}
|
|
|
|
|
2009-11-11 12:07:00 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2007-08-14 01:47:24 +00:00
|
|
|
/* Shut it down */
|
2011-05-04 09:07:01 +00:00
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_SAVED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "saved");
|
2008-12-04 21:09:20 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_SAVED);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (!vm->persistent) {
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndAsyncJob(driver, vm) > 0)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2008-12-04 21:06:41 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2010-04-19 14:41:48 +00:00
|
|
|
if (vm) {
|
2010-04-22 16:16:47 +00:00
|
|
|
if (ret != 0) {
|
2010-06-10 14:07:21 +00:00
|
|
|
if (header.was_running && virDomainObjIsActive(vm)) {
|
2011-05-04 09:07:01 +00:00
|
|
|
rc = qemuProcessStartCPUs(driver, vm, dom->conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
|
|
|
|
QEMU_ASYNC_JOB_SAVE);
|
2010-04-22 16:16:47 +00:00
|
|
|
if (rc < 0)
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_WARN("Unable to resume guest CPUs after save failure");
|
2010-04-22 16:16:47 +00:00
|
|
|
}
|
2010-04-21 15:44:40 +00:00
|
|
|
}
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
|
2010-02-03 16:45:05 +00:00
|
|
|
vm = NULL;
|
2010-04-19 14:41:48 +00:00
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2011-03-03 19:23:19 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2011-07-11 18:07:50 +00:00
|
|
|
virFileDirectFdFree(directFd);
|
2008-12-04 21:04:30 +00:00
|
|
|
VIR_FREE(xml);
|
2011-08-23 20:01:51 +00:00
|
|
|
if (ret != 0 && needUnlink)
|
2008-12-04 21:04:30 +00:00
|
|
|
unlink(path);
|
2008-12-04 21:09:20 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-06-30 14:05:59 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2010-10-28 08:51:18 +00:00
|
|
|
/* Returns true if a compression program is available in PATH */
|
|
|
|
static bool qemudCompressProgramAvailable(enum qemud_save_formats compress)
|
|
|
|
{
|
|
|
|
const char *prog;
|
|
|
|
char *c;
|
|
|
|
|
|
|
|
if (compress == QEMUD_SAVE_FORMAT_RAW)
|
|
|
|
return true;
|
|
|
|
prog = qemudSaveCompressionTypeToString(compress);
|
|
|
|
c = virFindFileInPath(prog);
|
|
|
|
if (!c)
|
|
|
|
return false;
|
|
|
|
VIR_FREE(c);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
static int
|
|
|
|
qemuDomainSaveFlags(virDomainPtr dom, const char *path, const char *dxml,
|
|
|
|
unsigned int flags)
|
2010-04-01 15:57:32 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int compressed;
|
2010-08-13 13:23:11 +00:00
|
|
|
int ret = -1;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE |
|
|
|
|
VIR_DOMAIN_SAVE_RUNNING |
|
|
|
|
VIR_DOMAIN_SAVE_PAUSED, -1);
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
|
2010-08-13 13:23:11 +00:00
|
|
|
qemuDriverLock(driver);
|
2010-04-01 15:57:32 +00:00
|
|
|
|
|
|
|
if (driver->saveImageFormat == NULL)
|
|
|
|
compressed = QEMUD_SAVE_FORMAT_RAW;
|
|
|
|
else {
|
|
|
|
compressed = qemudSaveCompressionTypeFromString(driver->saveImageFormat);
|
|
|
|
if (compressed < 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("Invalid save image format specified "
|
|
|
|
"in configuration file"));
|
2011-03-30 02:34:16 +00:00
|
|
|
goto cleanup;
|
2010-04-01 15:57:32 +00:00
|
|
|
}
|
2010-10-28 08:51:18 +00:00
|
|
|
if (!qemudCompressProgramAvailable(compressed)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("Compression program for image format "
|
|
|
|
"in configuration file isn't available"));
|
2011-03-30 02:34:16 +00:00
|
|
|
goto cleanup;
|
2010-10-28 08:51:18 +00:00
|
|
|
}
|
2010-04-01 15:57:32 +00:00
|
|
|
}
|
|
|
|
|
2010-08-13 13:23:11 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-02-17 03:18:47 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
ret = qemuDomainSaveInternal(driver, dom, vm, path, compressed,
|
2011-08-27 13:12:32 +00:00
|
|
|
dxml, flags);
|
2011-06-30 14:05:59 +00:00
|
|
|
vm = NULL;
|
2010-08-13 13:23:11 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
return ret;
|
2010-04-01 15:57:32 +00:00
|
|
|
}
|
|
|
|
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
static int
|
|
|
|
qemuDomainSave(virDomainPtr dom, const char *path)
|
|
|
|
{
|
|
|
|
return qemuDomainSaveFlags(dom, path, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2010-04-01 15:57:32 +00:00
|
|
|
static char *
|
|
|
|
qemuDomainManagedSavePath(struct qemud_driver *driver, virDomainObjPtr vm) {
|
|
|
|
char *ret;
|
|
|
|
|
|
|
|
if (virAsprintf(&ret, "%s/%s.save", driver->saveDir, vm->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainManagedSave(virDomainPtr dom, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
char *name = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
int compressed;
|
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE |
|
|
|
|
VIR_DOMAIN_SAVE_RUNNING |
|
|
|
|
VIR_DOMAIN_SAVE_PAUSED, -1);
|
2010-04-01 15:57:32 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2010-08-13 13:23:11 +00:00
|
|
|
goto cleanup;
|
2010-04-01 15:57:32 +00:00
|
|
|
}
|
|
|
|
|
2011-01-25 07:49:51 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-10 14:51:36 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot do managed save for transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-01-25 07:49:51 +00:00
|
|
|
|
2010-04-01 15:57:32 +00:00
|
|
|
name = qemuDomainManagedSavePath(driver, vm);
|
|
|
|
if (name == NULL)
|
2010-08-13 13:23:11 +00:00
|
|
|
goto cleanup;
|
2010-04-01 15:57:32 +00:00
|
|
|
|
2011-03-30 01:19:47 +00:00
|
|
|
VIR_INFO("Saving state to %s", name);
|
2010-04-01 15:57:32 +00:00
|
|
|
|
|
|
|
compressed = QEMUD_SAVE_FORMAT_RAW;
|
2011-07-11 18:07:50 +00:00
|
|
|
ret = qemuDomainSaveInternal(driver, dom, vm, name, compressed,
|
2011-08-27 13:12:32 +00:00
|
|
|
NULL, flags);
|
2011-06-30 14:05:59 +00:00
|
|
|
vm = NULL;
|
2010-04-01 15:57:32 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
2010-08-13 13:23:11 +00:00
|
|
|
VIR_FREE(name);
|
|
|
|
|
|
|
|
return ret;
|
2010-04-01 15:57:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainHasManagedSaveImage(virDomainPtr dom, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
char *name = NULL;
|
|
|
|
|
2010-04-16 12:04:31 +00:00
|
|
|
virCheckFlags(0, -1);
|
2010-04-01 15:57:32 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
name = qemuDomainManagedSavePath(driver, vm);
|
|
|
|
if (name == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = virFileExists(name);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(name);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainManagedSaveRemove(virDomainPtr dom, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
char *name = NULL;
|
|
|
|
|
2010-04-16 12:04:31 +00:00
|
|
|
virCheckFlags(0, -1);
|
2010-04-01 15:57:32 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
name = qemuDomainManagedSavePath(driver, vm);
|
|
|
|
if (name == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = unlink(name);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(name);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2011-07-11 18:07:11 +00:00
|
|
|
static int
|
|
|
|
doCoreDump(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *path,
|
2011-07-11 18:07:50 +00:00
|
|
|
enum qemud_save_formats compress,
|
|
|
|
bool bypass_cache)
|
2010-12-08 06:19:12 +00:00
|
|
|
{
|
|
|
|
int fd = -1;
|
|
|
|
int ret = -1;
|
2011-07-11 18:07:50 +00:00
|
|
|
virFileDirectFdPtr directFd = NULL;
|
|
|
|
int directFlag = 0;
|
2010-12-08 06:19:12 +00:00
|
|
|
|
|
|
|
/* Create an empty file with appropriate ownership. */
|
2011-07-11 18:07:50 +00:00
|
|
|
if (bypass_cache) {
|
|
|
|
directFlag = virFileDirectFdFlag();
|
|
|
|
if (directFlag < 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("bypass cache unsupported by this system"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2011-08-23 20:01:51 +00:00
|
|
|
/* Core dumps usually imply last-ditch analysis efforts are
|
|
|
|
* desired, so we intentionally do not unlink even if a file was
|
|
|
|
* created. */
|
|
|
|
if ((fd = qemuOpenFile(driver, path,
|
|
|
|
O_CREAT | O_TRUNC | O_WRONLY | directFlag,
|
|
|
|
NULL, NULL)) < 0)
|
2010-12-08 06:19:12 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
if (bypass_cache && (directFd = virFileDirectFdNew(&fd, path)) == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-04 12:12:57 +00:00
|
|
|
if (qemuMigrationToFile(driver, vm, fd, 0, path,
|
2011-08-23 20:01:51 +00:00
|
|
|
qemuCompressProgramName(compress), false,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
QEMU_ASYNC_JOB_DUMP) < 0)
|
2011-03-10 00:35:13 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2010-12-08 06:19:12 +00:00
|
|
|
if (VIR_CLOSE(fd) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to save file %s"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-07-11 18:07:50 +00:00
|
|
|
if (virFileDirectFdClose(directFd) < 0)
|
|
|
|
goto cleanup;
|
2010-12-08 06:19:12 +00:00
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
ret = 0;
|
2010-12-08 06:19:12 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-07-11 18:07:11 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2011-08-02 20:35:40 +00:00
|
|
|
virFileDirectFdFree(directFd);
|
2010-12-08 06:19:12 +00:00
|
|
|
if (ret != 0)
|
|
|
|
unlink(path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-11-30 07:12:54 +00:00
|
|
|
static enum qemud_save_formats
|
|
|
|
getCompressionType(struct qemud_driver *driver)
|
|
|
|
{
|
|
|
|
int compress = QEMUD_SAVE_FORMAT_RAW;
|
|
|
|
|
2010-10-28 07:31:46 +00:00
|
|
|
/*
|
|
|
|
* We reuse "save" flag for "dump" here. Then, we can support the same
|
|
|
|
* format in "save" and "dump".
|
|
|
|
*/
|
|
|
|
if (driver->dumpImageFormat) {
|
|
|
|
compress = qemudSaveCompressionTypeFromString(driver->dumpImageFormat);
|
2011-07-28 01:37:52 +00:00
|
|
|
/* Use "raw" as the format if the specified format is not valid,
|
|
|
|
* or the compress program is not available.
|
|
|
|
*/
|
2010-10-28 07:31:46 +00:00
|
|
|
if (compress < 0) {
|
2011-07-28 01:37:52 +00:00
|
|
|
VIR_WARN("%s", _("Invalid dump image format specified in "
|
|
|
|
"configuration file, using raw"));
|
2010-11-30 07:12:54 +00:00
|
|
|
return QEMUD_SAVE_FORMAT_RAW;
|
2010-10-28 07:31:46 +00:00
|
|
|
}
|
2010-10-28 08:51:18 +00:00
|
|
|
if (!qemudCompressProgramAvailable(compress)) {
|
2011-07-28 01:37:52 +00:00
|
|
|
VIR_WARN("%s", _("Compression program for dump image format "
|
|
|
|
"in configuration file isn't available, "
|
|
|
|
"using raw"));
|
2010-11-30 07:12:54 +00:00
|
|
|
return QEMUD_SAVE_FORMAT_RAW;
|
2010-10-28 08:51:18 +00:00
|
|
|
}
|
2010-10-28 07:31:46 +00:00
|
|
|
}
|
2010-11-30 07:12:54 +00:00
|
|
|
return compress;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemudDomainCoreDump(virDomainPtr dom,
|
|
|
|
const char *path,
|
2011-07-06 20:40:19 +00:00
|
|
|
unsigned int flags)
|
2011-04-22 10:02:39 +00:00
|
|
|
{
|
2010-11-30 07:12:54 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int resume = 0, paused = 0;
|
2010-12-08 06:19:12 +00:00
|
|
|
int ret = -1;
|
2010-11-30 07:12:54 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
virCheckFlags(VIR_DUMP_LIVE | VIR_DUMP_CRASH | VIR_DUMP_BYPASS_CACHE, -1);
|
2011-04-22 10:02:39 +00:00
|
|
|
|
2009-07-16 14:50:23 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2009-07-16 14:50:23 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_DUMP) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-07-16 14:50:23 +00:00
|
|
|
}
|
|
|
|
|
2009-12-14 11:10:38 +00:00
|
|
|
/* Migrate will always stop the VM, so the resume condition is
|
|
|
|
independent of whether the stop command is issued. */
|
2011-05-04 09:07:01 +00:00
|
|
|
resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
|
2009-07-16 14:50:23 +00:00
|
|
|
|
|
|
|
/* Pause domain for non-live dump */
|
2011-05-04 09:07:01 +00:00
|
|
|
if (!(flags & VIR_DUMP_LIVE) &&
|
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_DUMP,
|
|
|
|
QEMU_ASYNC_JOB_DUMP) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-07-16 14:50:23 +00:00
|
|
|
paused = 1;
|
2010-05-17 11:38:05 +00:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
2009-07-16 14:50:23 +00:00
|
|
|
}
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
ret = doCoreDump(driver, vm, path, getCompressionType(driver),
|
|
|
|
(flags & VIR_DUMP_BYPASS_CACHE) != 0);
|
2010-02-03 16:45:05 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
paused = 1;
|
2009-12-14 10:27:41 +00:00
|
|
|
|
|
|
|
endjob:
|
2009-12-14 10:59:27 +00:00
|
|
|
if ((ret == 0) && (flags & VIR_DUMP_CRASH)) {
|
2011-05-04 09:07:01 +00:00
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_CRASHED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "crashed");
|
2009-12-14 10:59:27 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_CRASHED);
|
|
|
|
}
|
|
|
|
|
2009-07-16 14:50:23 +00:00
|
|
|
/* Since the monitor is always attached to a pty for libvirt, it
|
|
|
|
will support synchronous operations so we always get here after
|
|
|
|
the migration is complete. */
|
2010-06-10 14:07:21 +00:00
|
|
|
else if (resume && paused && virDomainObjIsActive(vm)) {
|
2011-05-04 09:07:01 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, dom->conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_DUMP) < 0) {
|
2009-07-21 09:53:17 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("resuming after dump failed"));
|
2009-07-16 14:50:23 +00:00
|
|
|
}
|
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndAsyncJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-12-14 13:09:10 +00:00
|
|
|
else if ((ret == 0) && (flags & VIR_DUMP_CRASH) && !vm->persistent) {
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2009-12-14 10:59:27 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
|
|
|
cleanup:
|
2009-07-16 14:50:23 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-12-14 10:59:27 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2010-03-10 21:50:13 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-07-16 14:50:23 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-01 06:23:58 +00:00
|
|
|
static char *
|
|
|
|
qemuDomainScreenshot(virDomainPtr dom,
|
|
|
|
virStreamPtr st,
|
|
|
|
unsigned int screen,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2011-04-01 06:23:58 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
char *tmp = NULL;
|
|
|
|
int tmp_fd = -1;
|
|
|
|
char *ret = NULL;
|
2011-08-02 17:19:53 +00:00
|
|
|
bool unlink_tmp = false;
|
2011-04-01 06:23:58 +00:00
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
2011-04-01 06:23:58 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
2011-04-01 06:23:58 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Well, even if qemu allows multiple graphic cards, heads, whatever,
|
|
|
|
* screenshot command does not */
|
|
|
|
if (screen) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("currently is supported only taking "
|
|
|
|
"screenshots of screen ID 0"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virAsprintf(&tmp, "%s/qemu.screendump.XXXXXX", driver->cacheDir) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((tmp_fd = mkstemp(tmp)) == -1) {
|
|
|
|
virReportSystemError(errno, _("mkstemp(\"%s\") failed"), tmp);
|
|
|
|
goto endjob;
|
|
|
|
}
|
2011-08-02 17:19:53 +00:00
|
|
|
unlink_tmp = true;
|
2011-04-01 06:23:58 +00:00
|
|
|
|
2011-06-29 08:17:51 +00:00
|
|
|
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-04-01 06:23:58 +00:00
|
|
|
if (qemuMonitorScreendump(priv->mon, tmp) < 0) {
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-04-01 06:23:58 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-04-01 06:23:58 +00:00
|
|
|
|
|
|
|
if (VIR_CLOSE(tmp_fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("unable to close %s"), tmp);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-08-02 17:19:53 +00:00
|
|
|
if (virFDStreamOpenFile(st, tmp, 0, 0, O_RDONLY) < 0) {
|
2011-04-01 06:23:58 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("unable to open stream"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = strdup("image/x-portable-pixmap");
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
VIR_FORCE_CLOSE(tmp_fd);
|
2011-08-02 17:19:53 +00:00
|
|
|
if (unlink_tmp)
|
|
|
|
unlink(tmp);
|
2011-08-02 16:58:21 +00:00
|
|
|
VIR_FREE(tmp);
|
2011-04-01 06:23:58 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2011-04-01 06:23:58 +00:00
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-12-08 06:19:17 +00:00
|
|
|
static void processWatchdogEvent(void *data, void *opaque)
|
|
|
|
{
|
|
|
|
int ret;
|
2011-02-14 16:09:39 +00:00
|
|
|
struct qemuDomainWatchdogEvent *wdEvent = data;
|
2010-12-08 06:19:17 +00:00
|
|
|
struct qemud_driver *driver = opaque;
|
|
|
|
|
2011-04-15 03:11:39 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(wdEvent->vm);
|
|
|
|
|
2010-12-08 06:19:17 +00:00
|
|
|
switch (wdEvent->action) {
|
|
|
|
case VIR_DOMAIN_WATCHDOG_ACTION_DUMP:
|
|
|
|
{
|
|
|
|
char *dumpfile;
|
|
|
|
|
2011-02-14 23:51:59 +00:00
|
|
|
if (virAsprintf(&dumpfile, "%s/%s-%u",
|
2010-12-08 06:19:17 +00:00
|
|
|
driver->autoDumpPath,
|
|
|
|
wdEvent->vm->def->name,
|
2011-02-14 23:51:59 +00:00
|
|
|
(unsigned int)time(NULL)) < 0) {
|
|
|
|
virReportOOMError();
|
2011-04-15 03:11:39 +00:00
|
|
|
goto unlock;
|
2011-02-14 23:51:59 +00:00
|
|
|
}
|
2010-12-08 06:19:17 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJobWithDriver(driver, wdEvent->vm,
|
|
|
|
QEMU_ASYNC_JOB_DUMP) < 0) {
|
2011-04-15 03:11:39 +00:00
|
|
|
VIR_FREE(dumpfile);
|
|
|
|
goto unlock;
|
|
|
|
}
|
2010-12-08 06:19:17 +00:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(wdEvent->vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2011-04-15 03:11:39 +00:00
|
|
|
VIR_FREE(dumpfile);
|
|
|
|
goto endjob;
|
2010-12-08 06:19:17 +00:00
|
|
|
}
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
ret = doCoreDump(driver, wdEvent->vm, dumpfile,
|
2011-07-19 21:54:48 +00:00
|
|
|
getCompressionType(driver),
|
|
|
|
driver->autoDumpBypassCache);
|
2010-12-08 06:19:17 +00:00
|
|
|
if (ret < 0)
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("Dump failed"));
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
ret = qemuProcessStartCPUs(driver, wdEvent->vm, NULL,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_DUMP);
|
2010-12-08 06:19:17 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("Resuming after dump failed"));
|
|
|
|
|
|
|
|
VIR_FREE(dumpfile);
|
|
|
|
}
|
|
|
|
break;
|
2011-04-15 03:11:39 +00:00
|
|
|
default:
|
|
|
|
goto unlock;
|
2010-12-08 06:19:17 +00:00
|
|
|
}
|
|
|
|
|
2011-04-15 03:11:39 +00:00
|
|
|
endjob:
|
|
|
|
/* Safe to ignore value since ref count was incremented in
|
|
|
|
* qemuProcessHandleWatchdog().
|
|
|
|
*/
|
2011-06-06 08:28:38 +00:00
|
|
|
ignore_value(qemuDomainObjEndAsyncJob(driver, wdEvent->vm));
|
2011-04-15 03:11:39 +00:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
if (virDomainObjUnref(wdEvent->vm) > 0)
|
|
|
|
virDomainObjUnlock(wdEvent->vm);
|
|
|
|
qemuDriverUnlock(driver);
|
2010-12-08 06:19:17 +00:00
|
|
|
VIR_FREE(wdEvent);
|
|
|
|
}
|
2009-07-16 14:50:23 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
static int qemudDomainHotplugVcpus(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
unsigned int nvcpus)
|
2010-02-08 16:37:17 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-22 00:02:17 +00:00
|
|
|
int i, rc = 1;
|
2010-02-08 16:37:17 +00:00
|
|
|
int ret = -1;
|
2011-02-22 00:02:17 +00:00
|
|
|
int oldvcpus = vm->def->vcpus;
|
2011-03-15 02:20:53 +00:00
|
|
|
int vcpus = oldvcpus;
|
2010-02-08 16:37:17 +00:00
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2010-05-17 11:38:05 +00:00
|
|
|
|
2010-02-08 16:37:17 +00:00
|
|
|
/* We need different branches here, because we want to offline
|
|
|
|
* in reverse order to onlining, so any partial fail leaves us in a
|
|
|
|
* reasonably sensible state */
|
2011-03-15 02:20:53 +00:00
|
|
|
if (nvcpus > vcpus) {
|
|
|
|
for (i = vcpus ; i < nvcpus ; i++) {
|
2010-02-08 16:37:17 +00:00
|
|
|
/* Online new CPU */
|
|
|
|
rc = qemuMonitorSetCPU(priv->mon, i, 1);
|
|
|
|
if (rc == 0)
|
|
|
|
goto unsupported;
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-15 02:20:53 +00:00
|
|
|
vcpus++;
|
2010-02-08 16:37:17 +00:00
|
|
|
}
|
|
|
|
} else {
|
2011-03-15 02:20:53 +00:00
|
|
|
for (i = vcpus - 1 ; i >= nvcpus ; i--) {
|
2010-02-08 16:37:17 +00:00
|
|
|
/* Offline old CPU */
|
|
|
|
rc = qemuMonitorSetCPU(priv->mon, i, 0);
|
|
|
|
if (rc == 0)
|
|
|
|
goto unsupported;
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-15 02:20:53 +00:00
|
|
|
vcpus--;
|
2010-02-08 16:37:17 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-03-15 02:20:53 +00:00
|
|
|
vm->def->vcpus = vcpus;
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditVcpu(vm, oldvcpus, nvcpus, "update", rc == 1);
|
2010-02-08 16:37:17 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
unsupported:
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("cannot change vcpu count of this domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
static int
|
2011-07-15 07:01:19 +00:00
|
|
|
qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
|
|
|
|
unsigned int flags)
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2010-11-19 19:51:46 +00:00
|
|
|
virDomainDefPtr persistentDef;
|
2010-02-08 16:37:17 +00:00
|
|
|
const char * type;
|
|
|
|
int max;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2011-07-15 07:01:19 +00:00
|
|
|
bool isActive;
|
|
|
|
bool maximum;
|
2008-05-07 16:16:44 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG |
|
2010-09-29 23:40:45 +00:00
|
|
|
VIR_DOMAIN_VCPU_MAXIMUM, -1);
|
|
|
|
|
|
|
|
if (!nvcpus || (unsigned short) nvcpus != nvcpus) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("argument out of range: %d"), nvcpus);
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2008-05-07 16:16:44 +00:00
|
|
|
if (!vm) {
|
2008-07-25 09:01:25 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2010-05-17 17:28:44 +00:00
|
|
|
goto cleanup;
|
2008-05-07 16:16:44 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2010-05-14 10:38:43 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-15 07:01:19 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
maximum = (flags & VIR_DOMAIN_VCPU_MAXIMUM) != 0;
|
|
|
|
flags &= ~VIR_DOMAIN_VCPU_MAXIMUM;
|
|
|
|
|
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
|
|
|
if (isActive)
|
|
|
|
flags |= VIR_DOMAIN_AFFECT_LIVE;
|
|
|
|
else
|
|
|
|
flags |= VIR_DOMAIN_AFFECT_CONFIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MAXIMUM cannot be mixed with LIVE. */
|
|
|
|
if (maximum && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("cannot adjust maximum on running domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isActive && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
|
2010-02-09 21:31:51 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2010-03-10 20:52:39 +00:00
|
|
|
goto endjob;
|
2008-05-07 16:16:44 +00:00
|
|
|
}
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (!vm->persistent && (flags & VIR_DOMAIN_AFFECT_CONFIG)) {
|
2010-11-19 19:51:46 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2010-02-08 16:37:17 +00:00
|
|
|
if (!(type = virDomainVirtTypeToString(vm->def->virtType))) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown virt type in domain definition '%d'"),
|
|
|
|
vm->def->virtType);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((max = qemudGetMaxVCPUs(NULL, type)) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("could not determine max vcpus for the domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-07-15 07:01:19 +00:00
|
|
|
if (!maximum && vm->def->maxvcpus < max) {
|
2010-09-29 23:40:45 +00:00
|
|
|
max = vm->def->maxvcpus;
|
|
|
|
}
|
|
|
|
|
2010-02-08 16:37:17 +00:00
|
|
|
if (nvcpus > max) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("requested vcpus is greater than max allowable"
|
|
|
|
" vcpus for the domain: %d > %d"), nvcpus, max);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2010-11-19 19:51:46 +00:00
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto endjob;
|
|
|
|
|
2010-09-29 23:40:45 +00:00
|
|
|
switch (flags) {
|
2011-06-08 06:33:33 +00:00
|
|
|
case VIR_DOMAIN_AFFECT_CONFIG:
|
2011-07-15 07:01:19 +00:00
|
|
|
if (maximum) {
|
|
|
|
persistentDef->maxvcpus = nvcpus;
|
|
|
|
if (nvcpus < persistentDef->vcpus)
|
|
|
|
persistentDef->vcpus = nvcpus;
|
|
|
|
} else {
|
|
|
|
persistentDef->vcpus = nvcpus;
|
|
|
|
}
|
2010-09-29 23:40:45 +00:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
case VIR_DOMAIN_AFFECT_LIVE:
|
2011-06-06 08:28:38 +00:00
|
|
|
ret = qemudDomainHotplugVcpus(driver, vm, nvcpus);
|
2010-09-29 23:40:45 +00:00
|
|
|
break;
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
case VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG:
|
2011-06-06 08:28:38 +00:00
|
|
|
ret = qemudDomainHotplugVcpus(driver, vm, nvcpus);
|
2010-11-19 19:51:46 +00:00
|
|
|
if (ret == 0) {
|
|
|
|
persistentDef->vcpus = nvcpus;
|
|
|
|
}
|
2010-09-29 23:40:45 +00:00
|
|
|
break;
|
|
|
|
}
|
2010-02-08 16:37:17 +00:00
|
|
|
|
2010-11-22 19:07:02 +00:00
|
|
|
/* Save the persistent config to disk */
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG)
|
2010-11-22 19:07:02 +00:00
|
|
|
ret = virDomainSaveConfig(driver->configDir, persistentDef);
|
|
|
|
|
2010-02-08 16:37:17 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2010-02-08 16:37:17 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-05-07 16:16:44 +00:00
|
|
|
}
|
|
|
|
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
static int
|
2011-07-15 07:01:19 +00:00
|
|
|
qemuDomainSetVcpus(virDomainPtr dom, unsigned int nvcpus)
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
{
|
2011-07-15 07:01:19 +00:00
|
|
|
return qemuDomainSetVcpusFlags(dom, nvcpus, VIR_DOMAIN_AFFECT_LIVE);
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
}
|
|
|
|
|
2008-05-22 16:20:31 +00:00
|
|
|
|
|
|
|
static int
|
2011-06-13 15:42:09 +00:00
|
|
|
qemudDomainPinVcpuFlags(virDomainPtr dom,
|
|
|
|
unsigned int vcpu,
|
|
|
|
unsigned char *cpumap,
|
|
|
|
int maplen,
|
|
|
|
unsigned int flags) {
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2011-06-13 15:42:09 +00:00
|
|
|
virDomainDefPtr persistentDef = NULL;
|
2009-11-16 15:22:34 +00:00
|
|
|
int maxcpu, hostcpus;
|
2008-05-22 16:20:31 +00:00
|
|
|
virNodeInfo nodeinfo;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2011-06-13 15:42:09 +00:00
|
|
|
bool isActive;
|
2009-11-26 17:57:00 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-06-10 06:41:18 +00:00
|
|
|
bool canResetting = true;
|
2011-06-24 03:57:58 +00:00
|
|
|
int pcpu;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2011-06-13 15:42:09 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2009-05-08 09:58:46 +00:00
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2009-05-08 09:58:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-13 15:42:09 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
|
|
|
if (isActive)
|
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
|
|
|
else
|
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isActive && (flags & VIR_DOMAIN_AFFECT_LIVE)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("a domain is inactive; can change only "
|
|
|
|
"persistent config"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
|
2009-11-26 17:57:00 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
if (vcpu > (priv->nvcpupids-1)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("vcpu number out of range %d > %d"),
|
|
|
|
vcpu, priv->nvcpupids);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
|
2011-06-13 15:42:09 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2011-06-10 06:41:18 +00:00
|
|
|
if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
|
|
|
|
maxcpu = maplen * 8;
|
|
|
|
if (maxcpu > hostcpus)
|
|
|
|
maxcpu = hostcpus;
|
|
|
|
/* pinning to all physical cpus means resetting,
|
|
|
|
* so check if we can reset setting.
|
|
|
|
*/
|
2011-06-24 03:57:58 +00:00
|
|
|
for (pcpu = 0; pcpu < hostcpus; pcpu++) {
|
2011-06-10 06:41:18 +00:00
|
|
|
if ((cpumap[pcpu/8] & (1 << (pcpu % 8))) == 0) {
|
|
|
|
canResetting = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-06-13 15:42:09 +00:00
|
|
|
|
2011-06-10 06:41:18 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-06-13 15:42:09 +00:00
|
|
|
|
|
|
|
if (priv->vcpupids != NULL) {
|
|
|
|
if (virProcessInfoSetAffinity(priv->vcpupids[vcpu],
|
|
|
|
cpumap, maplen, maxcpu) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
2011-06-13 15:42:09 +00:00
|
|
|
"%s", _("cpu affinity is not supported"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-10 06:41:18 +00:00
|
|
|
if (canResetting) {
|
2011-06-24 23:09:46 +00:00
|
|
|
if (virDomainVcpuPinDel(vm->def, vcpu) < 0) {
|
2011-06-10 06:41:18 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to delete vcpupin xml of "
|
|
|
|
"a running domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
2011-06-24 23:09:46 +00:00
|
|
|
if (virDomainVcpuPinAdd(vm->def, cpumap, maplen, vcpu) < 0) {
|
2011-06-10 06:41:18 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to update or add vcpupin xml of "
|
|
|
|
"a running domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-06-13 15:42:09 +00:00
|
|
|
}
|
|
|
|
|
2011-06-28 03:43:09 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
2011-03-29 13:41:25 +00:00
|
|
|
|
2011-06-13 15:42:09 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
|
2011-06-10 06:41:18 +00:00
|
|
|
if (canResetting) {
|
2011-06-24 23:09:46 +00:00
|
|
|
if (virDomainVcpuPinDel(persistentDef, vcpu) < 0) {
|
2011-06-10 06:41:18 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to delete vcpupin xml of "
|
|
|
|
"a persistent domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
2011-06-24 23:09:46 +00:00
|
|
|
if (virDomainVcpuPinAdd(persistentDef, cpumap, maplen, vcpu) < 0) {
|
2011-06-10 06:41:18 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("failed to update or add vcpupin xml of "
|
|
|
|
"a persistent domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-06-13 15:42:09 +00:00
|
|
|
}
|
2011-06-10 06:41:18 +00:00
|
|
|
|
2011-06-13 15:42:09 +00:00
|
|
|
ret = virDomainSaveConfig(driver->configDir, persistentDef);
|
2011-03-29 13:41:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
|
2011-06-13 15:42:09 +00:00
|
|
|
static int
|
|
|
|
qemudDomainPinVcpu(virDomainPtr dom,
|
|
|
|
unsigned int vcpu,
|
|
|
|
unsigned char *cpumap,
|
|
|
|
int maplen) {
|
|
|
|
return qemudDomainPinVcpuFlags(dom, vcpu, cpumap, maplen,
|
|
|
|
VIR_DOMAIN_AFFECT_LIVE);
|
|
|
|
}
|
|
|
|
|
2011-06-24 08:57:34 +00:00
|
|
|
static int
|
2011-06-24 23:09:46 +00:00
|
|
|
qemudDomainGetVcpuPinInfo(virDomainPtr dom,
|
2011-06-24 08:57:34 +00:00
|
|
|
int ncpumaps,
|
|
|
|
unsigned char *cpumaps,
|
|
|
|
int maplen,
|
|
|
|
unsigned int flags) {
|
|
|
|
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
2011-06-25 02:53:53 +00:00
|
|
|
virDomainObjPtr vm = NULL;
|
2011-06-24 08:57:34 +00:00
|
|
|
virNodeInfo nodeinfo;
|
|
|
|
virDomainDefPtr targetDef = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
bool isActive;
|
|
|
|
int maxcpu, hostcpus, vcpu, pcpu;
|
|
|
|
int n;
|
2011-06-24 23:09:46 +00:00
|
|
|
virDomainVcpuPinDefPtr *vcpupin_list;
|
2011-06-24 08:57:34 +00:00
|
|
|
char *cpumask = NULL;
|
|
|
|
unsigned char *cpumap;
|
|
|
|
|
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
|
|
|
|
|
|
|
if ((flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) ==
|
|
|
|
(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("cannot get live and persistent info concurrently"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
|
|
|
if (isActive)
|
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
|
|
|
else
|
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
targetDef = vm->def;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot get persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(targetDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-02 22:45:23 +00:00
|
|
|
/* Coverity didn't realize that targetDef must be set if we got here. */
|
|
|
|
sa_assert(targetDef);
|
|
|
|
|
2011-06-24 08:57:34 +00:00
|
|
|
if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
|
|
|
|
maxcpu = maplen * 8;
|
|
|
|
if (maxcpu > hostcpus)
|
|
|
|
maxcpu = hostcpus;
|
|
|
|
|
|
|
|
/* Clamp to actual number of vcpus */
|
|
|
|
if (ncpumaps > targetDef->vcpus)
|
|
|
|
ncpumaps = targetDef->vcpus;
|
|
|
|
|
|
|
|
if (ncpumaps < 1) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* initialize cpumaps */
|
|
|
|
memset(cpumaps, 0xff, maplen * ncpumaps);
|
|
|
|
if (maxcpu % 8) {
|
|
|
|
for (vcpu = 0; vcpu < ncpumaps; vcpu++) {
|
|
|
|
cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu);
|
|
|
|
cpumap[maplen - 1] &= (1 << maxcpu % 8) - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if vcpupin setting exists, there are unused physical cpus */
|
|
|
|
for (n = 0; n < targetDef->cputune.nvcpupin; n++) {
|
|
|
|
vcpupin_list = targetDef->cputune.vcpupin;
|
|
|
|
vcpu = vcpupin_list[n]->vcpuid;
|
|
|
|
cpumask = vcpupin_list[n]->cpumask;
|
|
|
|
cpumap = VIR_GET_CPUMAP(cpumaps, maplen, vcpu);
|
|
|
|
for (pcpu = 0; pcpu < maxcpu; pcpu++) {
|
|
|
|
if (cpumask[pcpu] == 0)
|
|
|
|
VIR_UNUSE_CPU(cpumap, pcpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = ncpumaps;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-05-22 16:20:31 +00:00
|
|
|
static int
|
|
|
|
qemudDomainGetVcpus(virDomainPtr dom,
|
|
|
|
virVcpuInfoPtr info,
|
|
|
|
int maxinfo,
|
|
|
|
unsigned char *cpumaps,
|
|
|
|
int maplen) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2008-05-22 16:20:31 +00:00
|
|
|
virNodeInfo nodeinfo;
|
2009-11-05 15:21:33 +00:00
|
|
|
int i, v, maxcpu, hostcpus;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2009-11-26 17:57:00 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2009-05-08 09:58:46 +00:00
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2009-05-08 09:58:46 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s",
|
|
|
|
_("cannot list vcpu pinning for an inactive domain"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
|
2009-11-26 17:57:00 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
2009-06-03 13:28:02 +00:00
|
|
|
if (nodeGetInfo(dom->conn, &nodeinfo) < 0)
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2009-11-05 15:21:33 +00:00
|
|
|
hostcpus = VIR_NODEINFO_MAXCPUS(nodeinfo);
|
2008-05-22 16:20:31 +00:00
|
|
|
maxcpu = maplen * 8;
|
2009-11-05 15:21:33 +00:00
|
|
|
if (maxcpu > hostcpus)
|
|
|
|
maxcpu = hostcpus;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
|
|
|
/* Clamp to actual number of vcpus */
|
2009-11-26 17:57:00 +00:00
|
|
|
if (maxinfo > priv->nvcpupids)
|
|
|
|
maxinfo = priv->nvcpupids;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (maxinfo >= 1) {
|
|
|
|
if (info != NULL) {
|
|
|
|
memset(info, 0, sizeof(*info) * maxinfo);
|
|
|
|
for (i = 0 ; i < maxinfo ; i++) {
|
|
|
|
info[i].number = i;
|
|
|
|
info[i].state = VIR_VCPU_RUNNING;
|
2009-07-27 15:30:25 +00:00
|
|
|
|
2009-11-26 17:57:00 +00:00
|
|
|
if (priv->vcpupids != NULL &&
|
2009-07-27 15:30:25 +00:00
|
|
|
qemudGetProcessInfo(&(info[i].cpuTime),
|
|
|
|
&(info[i].cpu),
|
|
|
|
vm->pid,
|
2009-11-26 17:57:00 +00:00
|
|
|
priv->vcpupids[i]) < 0) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2009-07-27 15:30:25 +00:00
|
|
|
_("cannot get vCPU placement & pCPU time"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
}
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (cpumaps != NULL) {
|
|
|
|
memset(cpumaps, 0, maplen * maxinfo);
|
2009-11-26 17:57:00 +00:00
|
|
|
if (priv->vcpupids != NULL) {
|
2008-12-04 21:04:30 +00:00
|
|
|
for (v = 0 ; v < maxinfo ; v++) {
|
|
|
|
unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
|
|
|
|
|
2009-11-26 17:57:00 +00:00
|
|
|
if (virProcessInfoGetAffinity(priv->vcpupids[v],
|
2009-11-16 15:22:34 +00:00
|
|
|
cpumap, maplen, maxcpu) < 0)
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
} else {
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
2010-02-09 18:15:41 +00:00
|
|
|
"%s", _("cpu affinity is not available"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = maxinfo;
|
2008-05-22 16:20:31 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-05-22 16:20:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
static int
|
|
|
|
qemudDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2010-09-29 23:40:45 +00:00
|
|
|
virDomainDefPtr def;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2011-07-15 23:35:47 +00:00
|
|
|
bool active;
|
2008-05-07 16:16:44 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG |
|
2010-09-29 23:40:45 +00:00
|
|
|
VIR_DOMAIN_VCPU_MAXIMUM, -1);
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2008-05-07 16:16:44 +00:00
|
|
|
if (!vm) {
|
2008-07-25 09:01:25 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-05-07 16:16:44 +00:00
|
|
|
}
|
|
|
|
|
2011-07-15 23:35:47 +00:00
|
|
|
active = virDomainObjIsActive(vm);
|
|
|
|
|
|
|
|
if ((flags & (VIR_DOMAIN_VCPU_LIVE | VIR_DOMAIN_VCPU_CONFIG)) == 0) {
|
|
|
|
if (active)
|
|
|
|
flags |= VIR_DOMAIN_VCPU_LIVE;
|
|
|
|
else
|
|
|
|
flags |= VIR_DOMAIN_VCPU_CONFIG;
|
|
|
|
}
|
|
|
|
if ((flags & VIR_DOMAIN_AFFECT_LIVE) && (flags & VIR_DOMAIN_AFFECT_CONFIG)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid flag combination: (0x%x)"), flags);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-07-15 23:35:47 +00:00
|
|
|
if (!active) {
|
2010-09-29 23:40:45 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("domain not active"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
def = vm->def;
|
|
|
|
} else {
|
2011-07-15 23:35:47 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("domain is transient"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2010-09-29 23:40:45 +00:00
|
|
|
def = vm->newDef ? vm->newDef : vm->def;
|
2008-05-07 16:16:44 +00:00
|
|
|
}
|
|
|
|
|
2010-09-29 23:40:45 +00:00
|
|
|
ret = (flags & VIR_DOMAIN_VCPU_MAXIMUM) ? def->maxvcpus : def->vcpus;
|
2008-05-07 16:16:44 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-05-07 16:16:44 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
static int
|
|
|
|
qemudDomainGetMaxVcpus(virDomainPtr dom)
|
|
|
|
{
|
2011-06-08 06:33:33 +00:00
|
|
|
return qemudDomainGetVcpusFlags(dom, (VIR_DOMAIN_AFFECT_LIVE |
|
vcpu: make old API trivially wrap to new API
Note - this wrapping is completely mechanical; the old API will
function identically, since the new API validates that the exact
same flags are provided by the old API. On a per-driver basis,
it may make sense to have the old API pass a different set of flags,
but that should be done in the per-driver patch that implements
the full range of flag support in the new API.
* src/esx/esx_driver.c (esxDomainSetVcpus, escDomainGetMaxVpcus):
Move guts...
(esxDomainSetVcpusFlags, esxDomainGetVcpusFlags): ...to new
functions.
(esxDriver): Trivially support the new API.
* src/openvz/openvz_driver.c (openvzDomainSetVcpus)
(openvzDomainSetVcpusFlags, openvzDomainGetMaxVcpus)
(openvzDomainGetVcpusFlags, openvzDriver): Likewise.
* src/phyp/phyp_driver.c (phypDomainSetCPU)
(phypDomainSetVcpusFlags, phypGetLparCPUMAX)
(phypDomainGetVcpusFlags, phypDriver): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSetVcpus)
(qemudDomainSetVcpusFlags, qemudDomainGetMaxVcpus)
(qemudDomainGetVcpusFlags, qemuDriver): Likewise.
* src/test/test_driver.c (testSetVcpus, testDomainSetVcpusFlags)
(testDomainGetMaxVcpus, testDomainGetVcpusFlags, testDriver):
Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainSetVcpus)
(vboxDomainSetVcpusFlags, virDomainGetMaxVcpus)
(virDomainGetVcpusFlags, virDriver): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSetVcpus)
(xenUnifiedDomainSetVcpusFlags, xenUnifiedDomainGetMaxVcpus)
(xenUnifiedDomainGetVcpusFlags, xenUnifiedDriver): Likewise.
* src/xenapi/xenapi_driver.c (xenapiDomainSetVcpus)
(xenapiDomainSetVcpusFlags, xenapiDomainGetMaxVcpus)
(xenapiDomainGetVcpusFlags, xenapiDriver): Likewise.
(xenapiError): New helper macro.
2010-09-27 22:37:53 +00:00
|
|
|
VIR_DOMAIN_VCPU_MAXIMUM));
|
|
|
|
}
|
|
|
|
|
2009-03-03 12:03:44 +00:00
|
|
|
static int qemudDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = (struct qemud_driver *)dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
2009-06-25 09:37:22 +00:00
|
|
|
memset(seclabel, 0, sizeof(*seclabel));
|
|
|
|
|
2009-03-03 12:03:44 +00:00
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2009-03-03 12:03:44 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2010-04-07 15:51:56 +00:00
|
|
|
if (!virDomainVirtTypeToString(vm->def->virtType)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown virt type in domain definition '%d'"),
|
|
|
|
vm->def->virtType);
|
2009-03-03 12:03:44 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Theoretically, the pid can be replaced during this operation and
|
|
|
|
* return the label of a different process. If atomicity is needed,
|
|
|
|
* further validation will be required.
|
|
|
|
*
|
|
|
|
* Comment from Dan Berrange:
|
|
|
|
*
|
|
|
|
* Well the PID as stored in the virDomainObjPtr can't be changed
|
|
|
|
* because you've got a locked object. The OS level PID could have
|
|
|
|
* exited, though and in extreme circumstances have cycled through all
|
|
|
|
* PIDs back to ours. We could sanity check that our PID still exists
|
|
|
|
* after reading the label, by checking that our FD connecting to the
|
|
|
|
* QEMU monitor hasn't seen SIGHUP/ERR on poll().
|
|
|
|
*/
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
Refactor the security drivers to simplify usage
The current security driver usage requires horrible code like
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityHostdevLabel &&
driver->securityDriver->domainSetSecurityHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
This pair of checks for NULL clutters up the code, making the driver
calls 2 lines longer than they really need to be. The goal of the
patchset is to change the calling convention to simply
if (virSecurityManagerSetHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
The first check for 'driver->securityDriver' being NULL is removed
by introducing a 'no op' security driver that will always be present
if no real driver is enabled. This guarentees driver->securityDriver
!= NULL.
The second check for 'driver->securityDriver->domainSetSecurityHostdevLabel'
being non-NULL is hidden in a new abstraction called virSecurityManager.
This separates the driver callbacks, from main internal API. The addition
of a virSecurityManager object, that is separate from the virSecurityDriver
struct also allows for security drivers to carry state / configuration
information directly. Thus the DAC/Stack drivers from src/qemu which
used to pull config from 'struct qemud_driver' can now be moved into
the 'src/security' directory and store their config directly.
* src/qemu/qemu_conf.h, src/qemu/qemu_driver.c: Update to
use new virSecurityManager APIs
* src/qemu/qemu_security_dac.c, src/qemu/qemu_security_dac.h
src/qemu/qemu_security_stacked.c, src/qemu/qemu_security_stacked.h:
Move into src/security directory
* src/security/security_stack.c, src/security/security_stack.h,
src/security/security_dac.c, src/security/security_dac.h: Generic
versions of previous QEMU specific drivers
* src/security/security_apparmor.c, src/security/security_apparmor.h,
src/security/security_driver.c, src/security/security_driver.h,
src/security/security_selinux.c, src/security/security_selinux.h:
Update to take virSecurityManagerPtr object as the first param
in all callbacks
* src/security/security_nop.c, src/security/security_nop.h: Stub
implementation of all security driver APIs.
* src/security/security_manager.h, src/security/security_manager.c:
New internal API for invoking security drivers
* src/libvirt.c: Add missing debug for security APIs
2010-11-17 20:26:30 +00:00
|
|
|
if (virSecurityManagerGetProcessLabel(driver->securityManager,
|
|
|
|
vm, seclabel) < 0) {
|
2010-06-15 16:44:19 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Failed to get security label"));
|
|
|
|
goto cleanup;
|
2009-03-03 12:03:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-03-03 12:03:44 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-05-06 14:20:34 +00:00
|
|
|
static int qemudNodeGetSecurityModel(virConnectPtr conn,
|
|
|
|
virSecurityModelPtr secmodel)
|
2009-03-03 12:03:44 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = (struct qemud_driver *)conn->privateData;
|
|
|
|
char *p;
|
2009-05-06 14:20:34 +00:00
|
|
|
int ret = 0;
|
2009-03-03 12:03:44 +00:00
|
|
|
|
2009-05-06 14:20:34 +00:00
|
|
|
qemuDriverLock(driver);
|
Refactor the security drivers to simplify usage
The current security driver usage requires horrible code like
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityHostdevLabel &&
driver->securityDriver->domainSetSecurityHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
This pair of checks for NULL clutters up the code, making the driver
calls 2 lines longer than they really need to be. The goal of the
patchset is to change the calling convention to simply
if (virSecurityManagerSetHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
The first check for 'driver->securityDriver' being NULL is removed
by introducing a 'no op' security driver that will always be present
if no real driver is enabled. This guarentees driver->securityDriver
!= NULL.
The second check for 'driver->securityDriver->domainSetSecurityHostdevLabel'
being non-NULL is hidden in a new abstraction called virSecurityManager.
This separates the driver callbacks, from main internal API. The addition
of a virSecurityManager object, that is separate from the virSecurityDriver
struct also allows for security drivers to carry state / configuration
information directly. Thus the DAC/Stack drivers from src/qemu which
used to pull config from 'struct qemud_driver' can now be moved into
the 'src/security' directory and store their config directly.
* src/qemu/qemu_conf.h, src/qemu/qemu_driver.c: Update to
use new virSecurityManager APIs
* src/qemu/qemu_security_dac.c, src/qemu/qemu_security_dac.h
src/qemu/qemu_security_stacked.c, src/qemu/qemu_security_stacked.h:
Move into src/security directory
* src/security/security_stack.c, src/security/security_stack.h,
src/security/security_dac.c, src/security/security_dac.h: Generic
versions of previous QEMU specific drivers
* src/security/security_apparmor.c, src/security/security_apparmor.h,
src/security/security_driver.c, src/security/security_driver.h,
src/security/security_selinux.c, src/security/security_selinux.h:
Update to take virSecurityManagerPtr object as the first param
in all callbacks
* src/security/security_nop.c, src/security/security_nop.h: Stub
implementation of all security driver APIs.
* src/security/security_manager.h, src/security/security_manager.c:
New internal API for invoking security drivers
* src/libvirt.c: Add missing debug for security APIs
2010-11-17 20:26:30 +00:00
|
|
|
memset(secmodel, 0, sizeof(*secmodel));
|
|
|
|
|
|
|
|
/* NULL indicates no driver, which we treat as
|
|
|
|
* success, but simply return no data in *secmodel */
|
|
|
|
if (driver->caps->host.secModel.model == NULL)
|
2009-05-06 14:20:34 +00:00
|
|
|
goto cleanup;
|
2008-05-07 16:16:44 +00:00
|
|
|
|
2009-03-03 12:03:44 +00:00
|
|
|
p = driver->caps->host.secModel.model;
|
|
|
|
if (strlen(p) >= VIR_SECURITY_MODEL_BUFLEN-1) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("security model string exceeds max %d bytes"),
|
|
|
|
VIR_SECURITY_MODEL_BUFLEN-1);
|
2009-05-06 14:20:34 +00:00
|
|
|
ret = -1;
|
|
|
|
goto cleanup;
|
2009-03-03 12:03:44 +00:00
|
|
|
}
|
|
|
|
strcpy(secmodel->model, p);
|
|
|
|
|
|
|
|
p = driver->caps->host.secModel.doi;
|
|
|
|
if (strlen(p) >= VIR_SECURITY_DOI_BUFLEN-1) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("security DOI string exceeds max %d bytes"),
|
|
|
|
VIR_SECURITY_DOI_BUFLEN-1);
|
2009-05-06 14:20:34 +00:00
|
|
|
ret = -1;
|
|
|
|
goto cleanup;
|
2009-03-03 12:03:44 +00:00
|
|
|
}
|
|
|
|
strcpy(secmodel->doi, p);
|
2009-05-06 14:20:34 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
2009-03-03 12:03:44 +00:00
|
|
|
}
|
|
|
|
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
/* Return -1 on most failures after raising error, -2 if edit was specified
|
2011-08-27 13:12:32 +00:00
|
|
|
* but xmlin and state (-1 for no change, 0 for paused, 1 for running) do
|
|
|
|
* not represent any changes (no error raised), -3 if corrupt image was
|
|
|
|
* unlinked (no error raised), and opened fd on success. */
|
2011-07-22 03:31:46 +00:00
|
|
|
static int ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4)
|
2011-03-04 19:30:35 +00:00
|
|
|
qemuDomainSaveImageOpen(struct qemud_driver *driver,
|
|
|
|
const char *path,
|
|
|
|
virDomainDefPtr *ret_def,
|
2011-07-11 18:07:50 +00:00
|
|
|
struct qemud_save_header *ret_header,
|
2011-07-18 23:27:59 +00:00
|
|
|
bool bypass_cache, virFileDirectFdPtr *directFd,
|
2011-08-27 13:12:32 +00:00
|
|
|
const char *xmlin, int state, bool edit,
|
|
|
|
bool unlink_corrupt)
|
2010-05-20 09:55:32 +00:00
|
|
|
{
|
|
|
|
int fd;
|
2007-08-14 01:47:24 +00:00
|
|
|
struct qemud_save_header header;
|
2010-05-20 09:55:32 +00:00
|
|
|
char *xml = NULL;
|
|
|
|
virDomainDefPtr def = NULL;
|
2011-07-22 03:31:46 +00:00
|
|
|
int oflags = edit ? O_RDWR : O_RDONLY;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
if (bypass_cache) {
|
2011-07-22 03:31:46 +00:00
|
|
|
int directFlag = virFileDirectFdFlag();
|
2011-07-11 18:07:50 +00:00
|
|
|
if (directFlag < 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("bypass cache unsupported by this system"));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-07-22 03:31:46 +00:00
|
|
|
oflags |= directFlag;
|
2011-07-11 18:07:50 +00:00
|
|
|
}
|
2010-03-03 15:38:42 +00:00
|
|
|
|
2011-08-23 20:01:51 +00:00
|
|
|
if ((fd = qemuOpenFile(driver, path, oflags, NULL, NULL)) < 0)
|
|
|
|
goto error;
|
2011-07-11 18:07:50 +00:00
|
|
|
if (bypass_cache && (*directFd = virFileDirectFdNew(&fd, path)) == NULL)
|
|
|
|
goto error;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
|
|
|
if (saferead(fd, &header, sizeof(header)) != sizeof(header)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("failed to read qemu header"));
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(header.magic, QEMUD_SAVE_MAGIC, sizeof(header.magic)) != 0) {
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
const char *msg = _("image magic is incorrect");
|
|
|
|
|
|
|
|
if (memcmp(header.magic, QEMUD_SAVE_PARTIAL,
|
|
|
|
sizeof(header.magic)) == 0) {
|
|
|
|
msg = _("save image is incomplete");
|
|
|
|
if (unlink_corrupt) {
|
|
|
|
if (VIR_CLOSE(fd) < 0 || unlink(path) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot remove corrupt file: %s"),
|
|
|
|
path);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
return -3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s", msg);
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (header.version > QEMUD_SAVE_VERSION) {
|
2011-04-14 18:48:03 +00:00
|
|
|
/* convert endianess and try again */
|
|
|
|
bswap_header(&header);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (header.version > QEMUD_SAVE_VERSION) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("image version is not supported (%d > %d)"),
|
|
|
|
header.version, QEMUD_SAVE_VERSION);
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
|
|
|
|
2010-03-03 10:27:16 +00:00
|
|
|
if (header.xml_len <= 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("invalid XML length: %d"), header.xml_len);
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
|
|
|
|
2010-03-23 13:11:29 +00:00
|
|
|
if (VIR_ALLOC_N(xml, header.xml_len) < 0) {
|
|
|
|
virReportOOMError();
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2010-03-23 13:11:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (saferead(fd, xml, header.xml_len) != header.xml_len) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("failed to read XML"));
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
if (edit && STREQ(xml, xmlin) &&
|
|
|
|
(state < 0 || state == header.was_running)) {
|
2011-07-22 03:31:46 +00:00
|
|
|
VIR_FREE(xml);
|
|
|
|
if (VIR_CLOSE(fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("cannot close file: %s"), path);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
return -2;
|
|
|
|
}
|
2011-08-27 13:12:32 +00:00
|
|
|
if (state >= 0)
|
|
|
|
header.was_running = state;
|
2011-07-22 03:31:46 +00:00
|
|
|
|
2007-08-14 01:47:24 +00:00
|
|
|
/* Create a domain from this XML */
|
2010-02-09 18:58:01 +00:00
|
|
|
if (!(def = virDomainDefParseString(driver->caps, xml,
|
2011-07-11 17:29:09 +00:00
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
2011-01-31 10:47:03 +00:00
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
2010-05-20 09:55:32 +00:00
|
|
|
goto error;
|
2011-07-18 23:27:59 +00:00
|
|
|
if (xmlin) {
|
|
|
|
virDomainDefPtr def2 = NULL;
|
|
|
|
|
|
|
|
if (!(def2 = virDomainDefParseString(driver->caps, xmlin,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto error;
|
|
|
|
if (!virDomainDefCheckABIStability(def, def2)) {
|
|
|
|
virDomainDefFree(def2);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
virDomainDefFree(def);
|
|
|
|
def = def2;
|
|
|
|
}
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
VIR_FREE(xml);
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
*ret_def = def;
|
|
|
|
*ret_header = header;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
return fd;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
error:
|
|
|
|
virDomainDefFree(def);
|
|
|
|
VIR_FREE(xml);
|
2011-03-04 19:30:35 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2010-05-20 09:55:32 +00:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-03-04 19:30:35 +00:00
|
|
|
static int ATTRIBUTE_NONNULL(4) ATTRIBUTE_NONNULL(5) ATTRIBUTE_NONNULL(6)
|
|
|
|
qemuDomainSaveImageStartVM(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
int *fd,
|
|
|
|
const struct qemud_save_header *header,
|
2011-08-05 22:05:50 +00:00
|
|
|
const char *path,
|
|
|
|
bool start_paused)
|
2010-05-20 09:55:32 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virDomainEventPtr event;
|
|
|
|
int intermediatefd = -1;
|
2011-05-06 20:10:40 +00:00
|
|
|
virCommandPtr cmd = NULL;
|
2010-05-20 09:55:32 +00:00
|
|
|
|
|
|
|
if (header->version == 2) {
|
|
|
|
const char *prog = qemudSaveCompressionTypeToString(header->compressed);
|
2009-09-10 09:26:00 +00:00
|
|
|
if (prog == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("Invalid compressed save format %d"),
|
2010-05-20 09:55:32 +00:00
|
|
|
header->compressed);
|
|
|
|
goto out;
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
}
|
2009-09-10 09:26:00 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
if (header->compressed != QEMUD_SAVE_FORMAT_RAW) {
|
2011-05-06 20:10:40 +00:00
|
|
|
cmd = virCommandNewArgList(prog, "-dc", NULL);
|
2011-03-02 03:59:38 +00:00
|
|
|
intermediatefd = *fd;
|
|
|
|
*fd = -1;
|
2011-05-06 20:10:40 +00:00
|
|
|
|
|
|
|
virCommandSetInputFD(cmd, intermediatefd);
|
|
|
|
virCommandSetOutputFD(cmd, fd);
|
|
|
|
|
|
|
|
if (virCommandRunAsync(cmd, NULL) < 0) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Failed to start decompression binary %s"),
|
2011-05-06 20:10:40 +00:00
|
|
|
prog);
|
2011-03-02 03:59:38 +00:00
|
|
|
*fd = intermediatefd;
|
2010-05-20 09:55:32 +00:00
|
|
|
goto out;
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2007-08-14 01:47:24 +00:00
|
|
|
/* Set the migration source and start it up. */
|
2011-06-23 09:37:57 +00:00
|
|
|
ret = qemuProcessStart(conn, driver, vm, "stdio", true,
|
2011-08-25 20:44:48 +00:00
|
|
|
false, *fd, path, NULL, VIR_VM_OP_RESTORE);
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2011-05-06 20:10:40 +00:00
|
|
|
if (intermediatefd != -1) {
|
2011-01-24 16:58:15 +00:00
|
|
|
if (ret < 0) {
|
2011-03-04 19:30:35 +00:00
|
|
|
/* if there was an error setting up qemu, the intermediate
|
|
|
|
* process will wait forever to write to stdout, so we
|
|
|
|
* must manually kill it.
|
2011-01-24 16:58:15 +00:00
|
|
|
*/
|
|
|
|
VIR_FORCE_CLOSE(intermediatefd);
|
2011-03-02 03:59:38 +00:00
|
|
|
VIR_FORCE_CLOSE(*fd);
|
2011-01-24 16:58:15 +00:00
|
|
|
}
|
|
|
|
|
2011-05-06 20:10:40 +00:00
|
|
|
if (virCommandWait(cmd, NULL) < 0)
|
|
|
|
ret = -1;
|
Compressed save image format for Qemu.
Implement a compressed save image format for qemu. While ideally
we would have the choice between compressed/non-compressed
available to the libvirt API, unfortunately there is no "flags"
parameter to the virDomainSave() API. Therefore, implement this
as a qemu.conf option. gzip, bzip2, and lzma are implemented, and
it should be very easy to implement additional compression
methods.
One open question is if/how we should detect the compression
binaries. One way to do it is to do compile-time setting of the
paths (via configure.in), but that doesn't seem like a great thing
to do. My preferred solution is not to detect at all;
when we go to run the commands that need them, if they
aren't available, or aren't available in one of the standard paths,
then we'll fail. That's also the solution implemented in this patch.
In the future, we'll have a more robust (managed) save/restore API,
at which time we can expose this functionality properly in the API.
V2: get rid of redundant dd command and just use >> to append data.
V3: Add back the missing pieces for the enum and bumping the save version.
V4: Make the compressed field in the save_header an int.
Implement LZMA compression.
Signed-off-by: Chris Lalancette <clalance@redhat.com>
2009-08-07 11:34:05 +00:00
|
|
|
}
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(intermediatefd);
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2011-03-04 19:30:35 +00:00
|
|
|
if (VIR_CLOSE(*fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("cannot close file: %s"), path);
|
|
|
|
ret = -1;
|
2010-03-03 15:38:42 +00:00
|
|
|
}
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2010-10-26 16:34:16 +00:00
|
|
|
if (ret < 0) {
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "restored", false);
|
2010-05-20 09:55:32 +00:00
|
|
|
goto out;
|
2010-10-26 16:34:16 +00:00
|
|
|
}
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2008-12-04 21:09:20 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_RESTORED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "restored", true);
|
2010-05-20 09:55:32 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
|
2008-11-17 16:43:00 +00:00
|
|
|
|
2011-08-05 22:05:50 +00:00
|
|
|
/* If it was running before, resume it now unless caller requested pause. */
|
|
|
|
if (header->was_running && !start_paused) {
|
2011-05-04 09:07:01 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_RESTORED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0) {
|
2009-07-21 09:53:17 +00:00
|
|
|
if (virGetLastError() == NULL)
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("failed to resume domain"));
|
2010-05-20 09:55:32 +00:00
|
|
|
goto out;
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
2010-05-18 12:07:18 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
2010-05-20 09:55:32 +00:00
|
|
|
goto out;
|
2010-05-18 12:07:18 +00:00
|
|
|
}
|
2011-08-05 22:05:50 +00:00
|
|
|
} else {
|
|
|
|
int detail = (start_paused ? VIR_DOMAIN_EVENT_SUSPENDED_PAUSED :
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_RESTORED);
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
detail);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2007-08-14 01:47:24 +00:00
|
|
|
}
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
2007-08-14 01:47:24 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
out:
|
2011-05-06 20:10:40 +00:00
|
|
|
virCommandFree(cmd);
|
Refactor the security drivers to simplify usage
The current security driver usage requires horrible code like
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityHostdevLabel &&
driver->securityDriver->domainSetSecurityHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
This pair of checks for NULL clutters up the code, making the driver
calls 2 lines longer than they really need to be. The goal of the
patchset is to change the calling convention to simply
if (virSecurityManagerSetHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
The first check for 'driver->securityDriver' being NULL is removed
by introducing a 'no op' security driver that will always be present
if no real driver is enabled. This guarentees driver->securityDriver
!= NULL.
The second check for 'driver->securityDriver->domainSetSecurityHostdevLabel'
being non-NULL is hidden in a new abstraction called virSecurityManager.
This separates the driver callbacks, from main internal API. The addition
of a virSecurityManager object, that is separate from the virSecurityDriver
struct also allows for security drivers to carry state / configuration
information directly. Thus the DAC/Stack drivers from src/qemu which
used to pull config from 'struct qemud_driver' can now be moved into
the 'src/security' directory and store their config directly.
* src/qemu/qemu_conf.h, src/qemu/qemu_driver.c: Update to
use new virSecurityManager APIs
* src/qemu/qemu_security_dac.c, src/qemu/qemu_security_dac.h
src/qemu/qemu_security_stacked.c, src/qemu/qemu_security_stacked.h:
Move into src/security directory
* src/security/security_stack.c, src/security/security_stack.h,
src/security/security_dac.c, src/security/security_dac.h: Generic
versions of previous QEMU specific drivers
* src/security/security_apparmor.c, src/security/security_apparmor.h,
src/security/security_driver.c, src/security/security_driver.h,
src/security/security_selinux.c, src/security/security_selinux.h:
Update to take virSecurityManagerPtr object as the first param
in all callbacks
* src/security/security_nop.c, src/security/security_nop.h: Stub
implementation of all security driver APIs.
* src/security/security_manager.h, src/security/security_manager.c:
New internal API for invoking security drivers
* src/libvirt.c: Add missing debug for security APIs
2010-11-17 20:26:30 +00:00
|
|
|
if (virSecurityManagerRestoreSavedStateLabel(driver->securityManager,
|
|
|
|
vm, path) < 0)
|
2010-06-24 21:58:59 +00:00
|
|
|
VIR_WARN("failed to restore save state label on %s", path);
|
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-03-04 19:30:35 +00:00
|
|
|
static int
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
qemuDomainRestoreFlags(virConnectPtr conn,
|
|
|
|
const char *path,
|
|
|
|
const char *dxml,
|
|
|
|
unsigned int flags)
|
2011-03-04 19:30:35 +00:00
|
|
|
{
|
2010-05-20 09:55:32 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int fd = -1;
|
|
|
|
int ret = -1;
|
|
|
|
struct qemud_save_header header;
|
2011-07-11 18:07:50 +00:00
|
|
|
virFileDirectFdPtr directFd = NULL;
|
2011-08-27 13:12:32 +00:00
|
|
|
int state = -1;
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SAVE_BYPASS_CACHE |
|
|
|
|
VIR_DOMAIN_SAVE_RUNNING |
|
|
|
|
VIR_DOMAIN_SAVE_PAUSED, -1);
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
|
2010-05-20 09:55:32 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
if (flags & VIR_DOMAIN_SAVE_RUNNING)
|
|
|
|
state = 1;
|
|
|
|
else if (flags & VIR_DOMAIN_SAVE_PAUSED)
|
|
|
|
state = 0;
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
fd = qemuDomainSaveImageOpen(driver, path, &def, &header,
|
|
|
|
(flags & VIR_DOMAIN_SAVE_BYPASS_CACHE) != 0,
|
2011-08-27 13:12:32 +00:00
|
|
|
&directFd, dxml, state, false, false);
|
2010-05-20 09:55:32 +00:00
|
|
|
if (fd < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(vm = virDomainAssignDef(driver->caps,
|
|
|
|
&driver->domains,
|
|
|
|
def, true))) {
|
2010-05-11 20:44:09 +00:00
|
|
|
/* virDomainAssignDef already set the error */
|
2010-05-20 09:55:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
def = NULL;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2010-05-20 09:55:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-08-05 22:05:50 +00:00
|
|
|
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path,
|
|
|
|
false);
|
2011-07-11 18:07:50 +00:00
|
|
|
if (virFileDirectFdClose(directFd) < 0)
|
|
|
|
VIR_WARN("Failed to close %s", path);
|
2010-05-20 09:55:32 +00:00
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2010-05-20 09:55:32 +00:00
|
|
|
else if (ret < 0 && !vm->persistent) {
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2010-05-20 09:55:32 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
2011-03-04 19:30:35 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2011-07-11 18:07:50 +00:00
|
|
|
virFileDirectFdFree(directFd);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
static int
|
|
|
|
qemuDomainRestore(virConnectPtr conn,
|
|
|
|
const char *path)
|
|
|
|
{
|
|
|
|
return qemuDomainRestoreFlags(conn, path, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2011-07-22 03:31:46 +00:00
|
|
|
static char *
|
|
|
|
qemuDomainSaveImageGetXMLDesc(virConnectPtr conn, const char *path,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
char *ret = NULL;
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
int fd = -1;
|
|
|
|
struct qemud_save_header header;
|
|
|
|
|
|
|
|
/* We only take subset of virDomainDefFormat flags. */
|
|
|
|
virCheckFlags(VIR_DOMAIN_XML_SECURE, NULL);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
|
|
|
fd = qemuDomainSaveImageOpen(driver, path, &def, &header, false, NULL,
|
2011-08-27 13:12:32 +00:00
|
|
|
NULL, -1, false, false);
|
2011-07-22 03:31:46 +00:00
|
|
|
|
|
|
|
if (fd < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = qemuDomainDefFormatXML(driver, def, flags);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainSaveImageDefineXML(virConnectPtr conn, const char *path,
|
|
|
|
const char *dxml, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
int fd = -1;
|
|
|
|
struct qemud_save_header header;
|
|
|
|
char *xml = NULL;
|
|
|
|
size_t len;
|
2011-08-27 13:12:32 +00:00
|
|
|
int state = -1;
|
2011-07-22 03:31:46 +00:00
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SAVE_RUNNING |
|
|
|
|
VIR_DOMAIN_SAVE_PAUSED, -1);
|
2011-07-22 03:31:46 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
if (flags & VIR_DOMAIN_SAVE_RUNNING)
|
|
|
|
state = 1;
|
|
|
|
else if (flags & VIR_DOMAIN_SAVE_PAUSED)
|
|
|
|
state = 0;
|
|
|
|
|
2011-07-22 03:31:46 +00:00
|
|
|
fd = qemuDomainSaveImageOpen(driver, path, &def, &header, false, NULL,
|
2011-08-27 13:12:32 +00:00
|
|
|
dxml, state, true, false);
|
2011-07-22 03:31:46 +00:00
|
|
|
|
|
|
|
if (fd < 0) {
|
|
|
|
/* Check for special case of no change needed. */
|
|
|
|
if (fd == -2)
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
xml = qemuDomainDefFormatXML(driver, def, (VIR_DOMAIN_XML_INACTIVE |
|
|
|
|
VIR_DOMAIN_XML_SECURE));
|
|
|
|
if (!xml)
|
|
|
|
goto cleanup;
|
|
|
|
len = strlen(xml) + 1;
|
|
|
|
|
|
|
|
if (len > header.xml_len) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("new xml too large to fit in file"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (VIR_EXPAND_N(xml, len, header.xml_len - len) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-27 13:12:32 +00:00
|
|
|
if (lseek(fd, 0, SEEK_SET) != 0) {
|
2011-07-22 03:31:46 +00:00
|
|
|
virReportSystemError(errno, _("cannot seek in '%s'"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-27 13:12:32 +00:00
|
|
|
if (safewrite(fd, &header, sizeof(header)) != sizeof(header) ||
|
|
|
|
safewrite(fd, xml, len) != len ||
|
2011-07-22 03:31:46 +00:00
|
|
|
VIR_CLOSE(fd) < 0) {
|
|
|
|
virReportSystemError(errno, _("failed to write xml to '%s'"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
/* Return 0 on success, 1 if incomplete saved image was silently unlinked,
|
|
|
|
* and -1 on failure with error raised. */
|
2011-03-04 19:30:35 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjRestore(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-07-11 18:07:50 +00:00
|
|
|
const char *path,
|
2011-08-05 22:05:50 +00:00
|
|
|
bool start_paused,
|
2011-07-11 18:07:50 +00:00
|
|
|
bool bypass_cache)
|
2010-05-20 10:01:16 +00:00
|
|
|
{
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
int fd = -1;
|
|
|
|
int ret = -1;
|
|
|
|
struct qemud_save_header header;
|
2011-07-11 18:07:50 +00:00
|
|
|
virFileDirectFdPtr directFd = NULL;
|
2010-05-20 10:01:16 +00:00
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
fd = qemuDomainSaveImageOpen(driver, path, &def, &header,
|
2011-08-27 13:12:32 +00:00
|
|
|
bypass_cache, &directFd, NULL, -1, false,
|
|
|
|
true);
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
if (fd < 0) {
|
|
|
|
if (fd == -3)
|
|
|
|
ret = 1;
|
2010-05-20 10:01:16 +00:00
|
|
|
goto cleanup;
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
}
|
2010-05-20 10:01:16 +00:00
|
|
|
|
|
|
|
if (STRNEQ(vm->def->name, def->name) ||
|
|
|
|
memcmp(vm->def->uuid, def->uuid, VIR_UUID_BUFLEN)) {
|
|
|
|
char vm_uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
char def_uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(vm->def->uuid, vm_uuidstr);
|
|
|
|
virUUIDFormat(def->uuid, def_uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("cannot restore domain '%s' uuid %s from a file"
|
|
|
|
" which belongs to domain '%s' uuid %s"),
|
|
|
|
vm->def->name, vm_uuidstr,
|
|
|
|
def->name, def_uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
virDomainObjAssignDef(vm, def, true);
|
|
|
|
def = NULL;
|
|
|
|
|
2011-08-05 22:05:50 +00:00
|
|
|
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, &header, path,
|
|
|
|
start_paused);
|
2011-07-11 18:07:50 +00:00
|
|
|
if (virFileDirectFdClose(directFd) < 0)
|
|
|
|
VIR_WARN("Failed to close %s", path);
|
2010-05-20 10:01:16 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
2011-03-04 19:30:35 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2011-07-11 18:07:50 +00:00
|
|
|
virFileDirectFdFree(directFd);
|
2010-05-20 10:01:16 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2011-05-06 19:53:10 +00:00
|
|
|
static char *qemuDomainGetXMLDesc(virDomainPtr dom,
|
2011-07-06 20:40:19 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
char *ret = NULL;
|
2009-03-27 11:44:29 +00:00
|
|
|
unsigned long balloon;
|
|
|
|
int err;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2011-07-13 22:24:38 +00:00
|
|
|
/* Flags checked by virDomainDefFormat */
|
2011-07-02 04:53:06 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2010-08-12 16:32:16 +00:00
|
|
|
/* Refresh current memory based on balloon info if supported */
|
|
|
|
if ((vm->def->memballoon != NULL) &&
|
|
|
|
(vm->def->memballoon->model != VIR_DOMAIN_MEMBALLOON_MODEL_NONE) &&
|
|
|
|
(virDomainObjIsActive(vm))) {
|
2009-10-09 20:13:06 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2009-11-03 18:26:32 +00:00
|
|
|
/* Don't delay if someone's using the monitor, just use
|
|
|
|
* existing most recent data instead */
|
2011-09-29 13:14:13 +00:00
|
|
|
if (qemuDomainJobAllowed(priv, QEMU_JOB_QUERY)) {
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_QUERY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2009-11-03 18:26:32 +00:00
|
|
|
err = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
|
2010-03-23 08:34:19 +00:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto cleanup;
|
|
|
|
if (err > 0)
|
2010-10-12 14:43:39 +00:00
|
|
|
vm->def->mem.cur_balloon = balloon;
|
2009-11-03 18:26:32 +00:00
|
|
|
/* err == 0 indicates no balloon support, so ignore it */
|
|
|
|
}
|
2009-09-23 11:29:39 +00:00
|
|
|
}
|
2009-03-27 11:44:29 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
ret = qemuDomainFormatXML(driver, vm, flags);
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2010-03-23 08:34:19 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-05-21 14:16:55 +00:00
|
|
|
static char *qemuDomainXMLFromNative(virConnectPtr conn,
|
|
|
|
const char *format,
|
|
|
|
const char *config,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
2009-05-28 13:21:19 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
2009-05-21 14:16:55 +00:00
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
char *xml = NULL;
|
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
2009-05-21 14:16:55 +00:00
|
|
|
if (STRNEQ(format, QEMU_CONFIG_FORMAT_ARGV)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("unsupported config type %s"), format);
|
2009-05-21 14:16:55 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2009-09-02 13:02:06 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-06-17 14:31:02 +00:00
|
|
|
def = qemuParseCommandLineString(driver->caps, config,
|
|
|
|
NULL, NULL, NULL);
|
2009-09-02 13:02:06 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-05-21 14:16:55 +00:00
|
|
|
if (!def)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-06-17 14:31:02 +00:00
|
|
|
if (!def->name &&
|
|
|
|
!(def->name = strdup("unnamed"))) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2010-02-09 18:58:01 +00:00
|
|
|
xml = virDomainDefFormat(def, VIR_DOMAIN_XML_INACTIVE);
|
2009-05-21 14:16:55 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
|
|
|
return xml;
|
|
|
|
}
|
|
|
|
|
2009-05-21 14:14:01 +00:00
|
|
|
static char *qemuDomainXMLToNative(virConnectPtr conn,
|
|
|
|
const char *format,
|
|
|
|
const char *xmlData,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
|
|
|
{
|
2009-05-21 14:14:01 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
virDomainDefPtr def = NULL;
|
2011-01-07 23:36:25 +00:00
|
|
|
virDomainChrSourceDef monConfig;
|
2011-02-08 14:22:39 +00:00
|
|
|
virBitmapPtr qemuCaps = NULL;
|
2010-11-22 23:09:13 +00:00
|
|
|
virCommandPtr cmd = NULL;
|
2009-05-21 14:14:01 +00:00
|
|
|
char *ret = NULL;
|
|
|
|
int i;
|
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
2009-05-21 14:16:55 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
2009-05-21 14:14:01 +00:00
|
|
|
if (STRNEQ(format, QEMU_CONFIG_FORMAT_ARGV)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("unsupported config type %s"), format);
|
2009-05-21 14:14:01 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-11 17:29:09 +00:00
|
|
|
def = virDomainDefParseString(driver->caps, xmlData,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES, 0);
|
2009-05-21 14:14:01 +00:00
|
|
|
if (!def)
|
|
|
|
goto cleanup;
|
|
|
|
|
2010-11-30 17:50:54 +00:00
|
|
|
/* Since we're just exporting args, we can't do bridge/network/direct
|
|
|
|
* setups, since libvirt will normally create TAP/macvtap devices
|
2009-05-21 14:14:01 +00:00
|
|
|
* directly. We convert those configs into generic 'ethernet'
|
|
|
|
* config and assume the user has suitable 'ifup-qemu' scripts
|
|
|
|
*/
|
|
|
|
for (i = 0 ; i < def->nnets ; i++) {
|
|
|
|
virDomainNetDefPtr net = def->nets[i];
|
2011-01-12 10:33:34 +00:00
|
|
|
int bootIndex = net->bootIndex;
|
2011-07-20 04:06:45 +00:00
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_NETWORK) {
|
|
|
|
int actualType = virDomainNetGetActualType(net);
|
|
|
|
const char *brname;
|
|
|
|
|
2009-05-21 14:14:01 +00:00
|
|
|
VIR_FREE(net->data.network.name);
|
2011-07-20 04:06:45 +00:00
|
|
|
VIR_FREE(net->data.network.portgroup);
|
|
|
|
if ((actualType == VIR_DOMAIN_NET_TYPE_BRIDGE) &&
|
|
|
|
(brname = virDomainNetGetActualBridgeName(net))) {
|
|
|
|
|
|
|
|
char *brnamecopy = strdup(brname);
|
|
|
|
if (!brnamecopy) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
virDomainActualNetDefFree(net->data.network.actual);
|
|
|
|
|
|
|
|
memset(net, 0, sizeof *net);
|
|
|
|
|
|
|
|
net->type = VIR_DOMAIN_NET_TYPE_ETHERNET;
|
|
|
|
net->data.ethernet.dev = brnamecopy;
|
|
|
|
net->data.ethernet.script = NULL;
|
|
|
|
net->data.ethernet.ipaddr = NULL;
|
|
|
|
} else {
|
|
|
|
/* actualType is either NETWORK or DIRECT. In either
|
|
|
|
* case, the best we can do is NULL everything out.
|
|
|
|
*/
|
|
|
|
virDomainActualNetDefFree(net->data.network.actual);
|
|
|
|
memset(net, 0, sizeof *net);
|
|
|
|
|
|
|
|
net->type = VIR_DOMAIN_NET_TYPE_ETHERNET;
|
|
|
|
net->data.ethernet.dev = NULL;
|
|
|
|
net->data.ethernet.script = NULL;
|
|
|
|
net->data.ethernet.ipaddr = NULL;
|
|
|
|
}
|
|
|
|
} else if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
|
|
|
VIR_FREE(net->data.direct.linkdev);
|
|
|
|
VIR_FREE(net->data.direct.virtPortProfile);
|
2009-05-21 14:14:01 +00:00
|
|
|
|
|
|
|
memset(net, 0, sizeof *net);
|
|
|
|
|
|
|
|
net->type = VIR_DOMAIN_NET_TYPE_ETHERNET;
|
|
|
|
net->data.ethernet.dev = NULL;
|
|
|
|
net->data.ethernet.script = NULL;
|
|
|
|
net->data.ethernet.ipaddr = NULL;
|
|
|
|
} else if (net->type == VIR_DOMAIN_NET_TYPE_BRIDGE) {
|
|
|
|
char *brname = net->data.bridge.brname;
|
|
|
|
char *script = net->data.bridge.script;
|
|
|
|
char *ipaddr = net->data.bridge.ipaddr;
|
|
|
|
|
|
|
|
memset(net, 0, sizeof *net);
|
|
|
|
|
|
|
|
net->type = VIR_DOMAIN_NET_TYPE_ETHERNET;
|
|
|
|
net->data.ethernet.dev = brname;
|
|
|
|
net->data.ethernet.script = script;
|
|
|
|
net->data.ethernet.ipaddr = ipaddr;
|
|
|
|
}
|
2011-01-12 10:33:34 +00:00
|
|
|
net->bootIndex = bootIndex;
|
2009-05-21 14:14:01 +00:00
|
|
|
}
|
|
|
|
for (i = 0 ; i < def->ngraphics ; i++) {
|
|
|
|
if (def->graphics[i]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
|
|
|
|
def->graphics[i]->data.vnc.autoport)
|
2010-12-01 12:35:52 +00:00
|
|
|
def->graphics[i]->data.vnc.port = QEMU_VNC_PORT_MIN;
|
2009-05-21 14:14:01 +00:00
|
|
|
}
|
|
|
|
|
2011-02-10 02:19:38 +00:00
|
|
|
if (qemuCapsExtractVersionInfo(def->emulator, def->os.arch,
|
2010-12-16 15:07:07 +00:00
|
|
|
NULL,
|
2011-02-08 14:28:53 +00:00
|
|
|
&qemuCaps) < 0)
|
2009-05-21 14:14:01 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuProcessPrepareMonitorChr(driver, &monConfig, def->name) < 0)
|
2009-07-09 17:33:40 +00:00
|
|
|
goto cleanup;
|
2009-05-21 14:14:01 +00:00
|
|
|
|
2010-12-16 15:07:07 +00:00
|
|
|
if (!(cmd = qemuBuildCommandLine(conn, driver, def,
|
2011-02-08 14:28:53 +00:00
|
|
|
&monConfig, false, qemuCaps,
|
2010-12-22 22:13:29 +00:00
|
|
|
NULL, -1, NULL, VIR_VM_OP_NO_OP)))
|
2009-05-21 14:14:01 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2010-11-22 23:09:13 +00:00
|
|
|
ret = virCommandToString(cmd);
|
2009-05-21 14:14:01 +00:00
|
|
|
|
|
|
|
cleanup:
|
2009-05-21 14:16:55 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-05-21 14:14:01 +00:00
|
|
|
|
2011-02-08 14:22:39 +00:00
|
|
|
qemuCapsFree(qemuCaps);
|
2010-11-22 23:09:13 +00:00
|
|
|
virCommandFree(cmd);
|
2009-05-21 14:14:01 +00:00
|
|
|
virDomainDefFree(def);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudListDefinedDomains(virConnectPtr conn,
|
2007-06-26 22:39:53 +00:00
|
|
|
char **const names, int nnames) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
int n;
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
n = virDomainObjListGetInactiveNames(&driver->domains, names, nnames);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
return n;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudNumDefinedDomains(virConnectPtr conn) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
int n;
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
Convert virDomainObjListPtr to use a hash of domain objects
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
2009-10-09 11:33:51 +00:00
|
|
|
n = virDomainObjListNumOfDomains(&driver->domains, 0);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-10-10 14:20:37 +00:00
|
|
|
|
2008-07-11 19:34:11 +00:00
|
|
|
return n;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-11 18:07:50 +00:00
|
|
|
static int
|
|
|
|
qemuDomainObjStart(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-08-27 23:07:18 +00:00
|
|
|
unsigned int flags)
|
2010-05-20 10:01:16 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
char *managed_save;
|
2011-08-27 23:07:18 +00:00
|
|
|
bool start_paused = (flags & VIR_DOMAIN_START_PAUSED) != 0;
|
|
|
|
bool autodestroy = (flags & VIR_DOMAIN_START_AUTODESTROY) != 0;
|
|
|
|
bool bypass_cache = (flags & VIR_DOMAIN_START_BYPASS_CACHE) != 0;
|
|
|
|
bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0;
|
2010-05-20 10:01:16 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a managed saved state restore it instead of starting
|
2011-04-07 08:58:26 +00:00
|
|
|
* from scratch. The old state is removed once the restoring succeeded.
|
2010-05-20 10:01:16 +00:00
|
|
|
*/
|
|
|
|
managed_save = qemuDomainManagedSavePath(driver, vm);
|
2011-04-07 08:58:26 +00:00
|
|
|
|
|
|
|
if (!managed_save)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-04-07 15:01:18 +00:00
|
|
|
if (virFileExists(managed_save)) {
|
2011-08-27 23:07:18 +00:00
|
|
|
if (force_boot) {
|
|
|
|
if (unlink(managed_save) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot remove managed save file %s"),
|
|
|
|
managed_save);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = qemuDomainObjRestore(conn, driver, vm, managed_save,
|
2011-08-05 22:05:50 +00:00
|
|
|
start_paused, bypass_cache);
|
2010-05-20 10:01:16 +00:00
|
|
|
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
if (ret == 0 && unlink(managed_save) < 0)
|
2011-08-27 23:07:18 +00:00
|
|
|
VIR_WARN("Failed to remove the managed state %s", managed_save);
|
qemu: detect incomplete save files
Several users have reported problems with 'virsh start' failing because
it was encountering a managed save situation where the managed save file
was incomplete. Be more robust to this by using two different magic
numbers, so that newer libvirt can gracefully handle an incomplete file
differently than a complete one, while older libvirt will at least fail
up front rather than trying to load only to have qemu fail at the end.
Managed save is a convenience - it exists to preserve as much state
as possible; if the state was not preserved, it is reasonable to just
log that fact, then proceed with a fresh boot. On the other hand,
user saves are under user control, so we must fail, but by making
the failure message distinct, the user can better decide how to handle
the situation of an incomplete save file.
* src/qemu/qemu_driver.c (QEMUD_SAVE_PARTIAL): New define.
(qemuDomainSaveInternal): Use it to mark incomplete images.
(qemuDomainSaveImageOpen, qemuDomainObjRestore): Add parameter
that controls what to do with partial images.
(qemuDomainRestoreFlags, qemuDomainSaveImageGetXMLDesc)
(qemuDomainSaveImageDefineXML, qemuDomainObjStart): Update callers.
Based on an initial idea by Osier Yang.
2011-08-30 19:53:45 +00:00
|
|
|
if (ret > 0)
|
|
|
|
VIR_WARN("Ignoring incomplete managed state %s", managed_save);
|
|
|
|
else
|
|
|
|
goto cleanup;
|
2011-08-27 23:07:18 +00:00
|
|
|
}
|
2010-05-20 10:01:16 +00:00
|
|
|
}
|
|
|
|
|
2011-06-23 09:37:57 +00:00
|
|
|
ret = qemuProcessStart(conn, driver, vm, NULL, start_paused,
|
2011-08-25 20:44:48 +00:00
|
|
|
autodestroy, -1, NULL, NULL, VIR_VM_OP_CREATE);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "booted", ret >= 0);
|
2010-10-26 16:34:16 +00:00
|
|
|
if (ret >= 0) {
|
2010-05-20 10:01:16 +00:00
|
|
|
virDomainEventPtr event =
|
|
|
|
virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_BOOTED);
|
2011-08-05 22:05:50 +00:00
|
|
|
if (event) {
|
2010-05-20 10:01:16 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-08-05 22:05:50 +00:00
|
|
|
if (start_paused) {
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
}
|
|
|
|
}
|
2010-05-20 10:01:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(managed_save);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-06-10 15:55:36 +00:00
|
|
|
static int
|
2011-07-11 18:07:50 +00:00
|
|
|
qemuDomainStartWithFlags(virDomainPtr dom, unsigned int flags)
|
2010-06-10 15:55:36 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2007-02-23 08:41:23 +00:00
|
|
|
|
2011-06-23 10:41:57 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_START_PAUSED |
|
2011-07-11 18:07:50 +00:00
|
|
|
VIR_DOMAIN_START_AUTODESTROY |
|
2011-08-27 23:07:18 +00:00
|
|
|
VIR_DOMAIN_START_BYPASS_CACHE |
|
|
|
|
VIR_DOMAIN_START_FORCE_BOOT, -1);
|
2010-06-10 15:55:36 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-02-23 08:41:23 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 08:41:23 +00:00
|
|
|
}
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is already running"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-08-27 23:07:18 +00:00
|
|
|
if (qemuDomainObjStart(dom->conn, driver, vm, flags) < 0)
|
2011-06-23 10:41:57 +00:00
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
ret = 0;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2010-06-10 15:55:36 +00:00
|
|
|
static int
|
2011-07-11 18:07:50 +00:00
|
|
|
qemuDomainStart(virDomainPtr dom)
|
2010-06-10 15:55:36 +00:00
|
|
|
{
|
2011-07-11 18:07:50 +00:00
|
|
|
return qemuDomainStartWithFlags(dom, 0);
|
2010-06-10 15:55:36 +00:00
|
|
|
}
|
|
|
|
|
2009-07-23 17:31:34 +00:00
|
|
|
static int
|
|
|
|
qemudCanonicalizeMachineFromInfo(virDomainDefPtr def,
|
|
|
|
virCapsGuestDomainInfoPtr info,
|
|
|
|
char **canonical)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
*canonical = NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < info->nmachines; i++) {
|
|
|
|
virCapsGuestMachinePtr machine = info->machines[i];
|
|
|
|
|
|
|
|
if (!machine->canonical)
|
|
|
|
continue;
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
if (def->os.machine && STRNEQ(def->os.machine, machine->name))
|
2009-07-23 17:31:34 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!(*canonical = strdup(machine->canonical))) {
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
2009-07-23 17:31:34 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-23 17:31:34 +00:00
|
|
|
static int
|
|
|
|
qemudCanonicalizeMachineDirect(virDomainDefPtr def, char **canonical)
|
|
|
|
{
|
|
|
|
virCapsGuestMachinePtr *machines = NULL;
|
|
|
|
int i, nmachines = 0;
|
|
|
|
|
2011-01-18 18:00:31 +00:00
|
|
|
if (qemuCapsProbeMachineTypes(def->emulator, &machines, &nmachines) < 0)
|
2009-07-23 17:31:34 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < nmachines; i++) {
|
|
|
|
if (!machines[i]->canonical)
|
|
|
|
continue;
|
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
if (def->os.machine && STRNEQ(def->os.machine, machines[i]->name))
|
2009-07-23 17:31:34 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
*canonical = machines[i]->canonical;
|
|
|
|
machines[i]->canonical = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
virCapabilitiesFreeMachines(machines, nmachines);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-10 10:09:06 +00:00
|
|
|
int
|
|
|
|
qemudCanonicalizeMachine(struct qemud_driver *driver, virDomainDefPtr def)
|
2009-07-23 17:31:34 +00:00
|
|
|
{
|
|
|
|
char *canonical = NULL;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < driver->caps->nguests; i++) {
|
|
|
|
virCapsGuestPtr guest = driver->caps->guests[i];
|
2009-09-10 11:19:33 +00:00
|
|
|
virCapsGuestDomainInfoPtr info;
|
2009-07-23 17:31:34 +00:00
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < guest->arch.ndomains; j++) {
|
2009-09-10 11:19:33 +00:00
|
|
|
info = &guest->arch.domains[j]->info;
|
2009-07-23 17:31:34 +00:00
|
|
|
|
2009-09-10 11:19:33 +00:00
|
|
|
if (!info->emulator || !STREQ(info->emulator, def->emulator))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!info->nmachines)
|
|
|
|
info = &guest->arch.defaultInfo;
|
|
|
|
|
|
|
|
if (qemudCanonicalizeMachineFromInfo(def, info, &canonical) < 0)
|
|
|
|
return -1;
|
|
|
|
goto out;
|
2009-07-23 17:31:34 +00:00
|
|
|
}
|
|
|
|
|
2009-09-10 11:19:33 +00:00
|
|
|
info = &guest->arch.defaultInfo;
|
|
|
|
|
|
|
|
if (info->emulator && STREQ(info->emulator, def->emulator)) {
|
|
|
|
if (qemudCanonicalizeMachineFromInfo(def, info, &canonical) < 0)
|
2009-07-23 17:31:34 +00:00
|
|
|
return -1;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
2009-07-23 17:31:34 +00:00
|
|
|
|
|
|
|
if (qemudCanonicalizeMachineDirect(def, &canonical) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2009-07-23 17:31:34 +00:00
|
|
|
out:
|
|
|
|
if (canonical) {
|
|
|
|
VIR_FREE(def->os.machine);
|
|
|
|
def->os.machine = canonical;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static virDomainPtr qemudDomainDefine(virConnectPtr conn, const char *xml) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
2008-07-11 19:34:11 +00:00
|
|
|
virDomainDefPtr def;
|
2008-12-04 21:06:41 +00:00
|
|
|
virDomainObjPtr vm = NULL;
|
2008-12-04 21:04:30 +00:00
|
|
|
virDomainPtr dom = NULL;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2009-11-02 18:37:38 +00:00
|
|
|
int dupVM;
|
2007-02-23 08:48:02 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2010-02-09 18:58:01 +00:00
|
|
|
if (!(def = virDomainDefParseString(driver->caps, xml,
|
2011-07-11 17:29:09 +00:00
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
2009-01-08 13:54:20 +00:00
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 08:48:02 +00:00
|
|
|
|
Refactor the security drivers to simplify usage
The current security driver usage requires horrible code like
if (driver->securityDriver &&
driver->securityDriver->domainSetSecurityHostdevLabel &&
driver->securityDriver->domainSetSecurityHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
This pair of checks for NULL clutters up the code, making the driver
calls 2 lines longer than they really need to be. The goal of the
patchset is to change the calling convention to simply
if (virSecurityManagerSetHostdevLabel(driver->securityDriver,
vm, hostdev) < 0)
The first check for 'driver->securityDriver' being NULL is removed
by introducing a 'no op' security driver that will always be present
if no real driver is enabled. This guarentees driver->securityDriver
!= NULL.
The second check for 'driver->securityDriver->domainSetSecurityHostdevLabel'
being non-NULL is hidden in a new abstraction called virSecurityManager.
This separates the driver callbacks, from main internal API. The addition
of a virSecurityManager object, that is separate from the virSecurityDriver
struct also allows for security drivers to carry state / configuration
information directly. Thus the DAC/Stack drivers from src/qemu which
used to pull config from 'struct qemud_driver' can now be moved into
the 'src/security' directory and store their config directly.
* src/qemu/qemu_conf.h, src/qemu/qemu_driver.c: Update to
use new virSecurityManager APIs
* src/qemu/qemu_security_dac.c, src/qemu/qemu_security_dac.h
src/qemu/qemu_security_stacked.c, src/qemu/qemu_security_stacked.h:
Move into src/security directory
* src/security/security_stack.c, src/security/security_stack.h,
src/security/security_dac.c, src/security/security_dac.h: Generic
versions of previous QEMU specific drivers
* src/security/security_apparmor.c, src/security/security_apparmor.h,
src/security/security_driver.c, src/security/security_driver.h,
src/security/security_selinux.c, src/security/security_selinux.h:
Update to take virSecurityManagerPtr object as the first param
in all callbacks
* src/security/security_nop.c, src/security/security_nop.h: Stub
implementation of all security driver APIs.
* src/security/security_manager.h, src/security/security_manager.c:
New internal API for invoking security drivers
* src/libvirt.c: Add missing debug for security APIs
2010-11-17 20:26:30 +00:00
|
|
|
if (virSecurityManagerVerify(driver->securityManager, def) < 0)
|
2009-04-03 10:55:51 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2009-11-02 18:37:38 +00:00
|
|
|
if ((dupVM = virDomainObjIsDuplicate(&driver->domains, def, 0)) < 0)
|
|
|
|
goto cleanup;
|
2008-11-17 16:52:32 +00:00
|
|
|
|
2009-09-10 10:09:06 +00:00
|
|
|
if (qemudCanonicalizeMachine(driver, def) < 0)
|
2009-07-23 17:31:34 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
if (qemuDomainAssignPCIAddresses(def) < 0)
|
2010-02-11 16:19:34 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2010-02-09 18:58:01 +00:00
|
|
|
if (!(vm = virDomainAssignDef(driver->caps,
|
2008-07-11 19:34:11 +00:00
|
|
|
&driver->domains,
|
2010-03-24 14:31:21 +00:00
|
|
|
def, false))) {
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 08:48:02 +00:00
|
|
|
}
|
2009-05-08 10:11:14 +00:00
|
|
|
def = NULL;
|
2008-08-20 19:42:36 +00:00
|
|
|
vm->persistent = 1;
|
2007-02-23 08:48:02 +00:00
|
|
|
|
2010-02-09 18:58:01 +00:00
|
|
|
if (virDomainSaveConfig(driver->configDir,
|
2008-08-20 19:42:36 +00:00
|
|
|
vm->newDef ? vm->newDef : vm->def) < 0) {
|
maint: omit translation for all VIR_INFO
We were 31/73 on whether to translate; since less than 50% translated
and since VIR_INFO is less than VIR_WARN which also doesn't translate,
this makes sense.
* cfg.mk (sc_prohibit_gettext_markup): Add VIR_INFO, since it
falls between WARN and DEBUG.
* daemon/libvirtd.c (qemudDispatchSignalEvent, remoteCheckAccess)
(qemudDispatchServer): Adjust offenders.
* daemon/remote.c (remoteDispatchAuthPolkit): Likewise.
* src/network/bridge_driver.c (networkReloadIptablesRules)
(networkStartNetworkDaemon, networkShutdownNetworkDaemon)
(networkCreate, networkDefine, networkUndefine): Likewise.
* src/qemu/qemu_driver.c (qemudDomainDefine)
(qemudDomainUndefine): Likewise.
* src/storage/storage_driver.c (storagePoolCreate)
(storagePoolDefine, storagePoolUndefine, storagePoolStart)
(storagePoolDestroy, storagePoolDelete, storageVolumeCreateXML)
(storageVolumeCreateXMLFrom, storageVolumeDelete): Likewise.
* src/util/bridge.c (brProbeVnetHdr): Likewise.
* po/POTFILES.in: Drop src/util/bridge.c.
2011-05-11 15:08:44 +00:00
|
|
|
VIR_INFO("Defining domain '%s'", vm->def->name);
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2008-12-04 21:06:41 +00:00
|
|
|
vm = NULL;
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 08:48:02 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 21:09:20 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED,
|
2009-11-02 18:37:38 +00:00
|
|
|
!dupVM ?
|
2008-12-04 21:09:20 +00:00
|
|
|
VIR_DOMAIN_EVENT_DEFINED_ADDED :
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_UPDATED);
|
2008-11-17 16:52:32 +00:00
|
|
|
|
maint: omit translation for all VIR_INFO
We were 31/73 on whether to translate; since less than 50% translated
and since VIR_INFO is less than VIR_WARN which also doesn't translate,
this makes sense.
* cfg.mk (sc_prohibit_gettext_markup): Add VIR_INFO, since it
falls between WARN and DEBUG.
* daemon/libvirtd.c (qemudDispatchSignalEvent, remoteCheckAccess)
(qemudDispatchServer): Adjust offenders.
* daemon/remote.c (remoteDispatchAuthPolkit): Likewise.
* src/network/bridge_driver.c (networkReloadIptablesRules)
(networkStartNetworkDaemon, networkShutdownNetworkDaemon)
(networkCreate, networkDefine, networkUndefine): Likewise.
* src/qemu/qemu_driver.c (qemudDomainDefine)
(qemudDomainUndefine): Likewise.
* src/storage/storage_driver.c (storagePoolCreate)
(storagePoolDefine, storagePoolUndefine, storagePoolStart)
(storagePoolDestroy, storagePoolDelete, storageVolumeCreateXML)
(storageVolumeCreateXMLFrom, storageVolumeDelete): Likewise.
* src/util/bridge.c (brProbeVnetHdr): Likewise.
* po/POTFILES.in: Drop src/util/bridge.c.
2011-05-11 15:08:44 +00:00
|
|
|
VIR_INFO("Creating domain '%s'", vm->def->name);
|
2007-06-26 23:48:46 +00:00
|
|
|
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
|
2008-07-11 19:34:11 +00:00
|
|
|
if (dom) dom->id = vm->def->id;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
|
|
|
cleanup:
|
2009-05-08 10:11:14 +00:00
|
|
|
virDomainDefFree(def);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:09:20 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2007-06-26 22:39:53 +00:00
|
|
|
return dom;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2011-07-20 03:04:15 +00:00
|
|
|
static int
|
|
|
|
qemuDomainUndefineFlags(virDomainPtr dom,
|
2011-09-21 19:08:50 +00:00
|
|
|
unsigned int flags)
|
2011-07-20 03:04:15 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2008-12-04 21:09:20 +00:00
|
|
|
virDomainEventPtr event = NULL;
|
2011-07-20 03:04:15 +00:00
|
|
|
char *name = NULL;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2011-08-12 15:07:08 +00:00
|
|
|
int nsnapshots;
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2011-08-12 16:20:24 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_UNDEFINE_MANAGED_SAVE |
|
|
|
|
VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA, -1);
|
2011-07-20 03:04:15 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-02-14 01:40:09 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2011-09-21 19:08:50 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cannot undefine transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-12 15:07:08 +00:00
|
|
|
if (!virDomainObjIsActive(vm) &&
|
|
|
|
(nsnapshots = virDomainSnapshotObjListNum(&vm->snapshots, 0))) {
|
2011-09-21 19:08:50 +00:00
|
|
|
if (!(flags & VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)) {
|
2011-08-12 16:20:24 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("cannot delete inactive domain with %d "
|
|
|
|
"snapshots"),
|
|
|
|
nsnapshots);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-09-21 19:08:51 +00:00
|
|
|
if (qemuDomainSnapshotDiscardAllMetadata(driver, vm) < 0)
|
2011-08-12 16:20:24 +00:00
|
|
|
goto cleanup;
|
2011-08-12 15:07:08 +00:00
|
|
|
}
|
|
|
|
|
2011-07-20 03:04:15 +00:00
|
|
|
name = qemuDomainManagedSavePath(driver, vm);
|
|
|
|
if (name == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virFileExists(name)) {
|
|
|
|
if (flags & VIR_DOMAIN_UNDEFINE_MANAGED_SAVE) {
|
|
|
|
if (unlink(name) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Failed to remove domain managed "
|
|
|
|
"save image"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("Refusing to undefine while domain managed "
|
|
|
|
"save image exists"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-09 18:58:01 +00:00
|
|
|
if (virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm) < 0)
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2008-12-04 21:09:20 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_UNDEFINED,
|
|
|
|
VIR_DOMAIN_EVENT_UNDEFINED_REMOVED);
|
2008-11-17 16:52:32 +00:00
|
|
|
|
maint: omit translation for all VIR_INFO
We were 31/73 on whether to translate; since less than 50% translated
and since VIR_INFO is less than VIR_WARN which also doesn't translate,
this makes sense.
* cfg.mk (sc_prohibit_gettext_markup): Add VIR_INFO, since it
falls between WARN and DEBUG.
* daemon/libvirtd.c (qemudDispatchSignalEvent, remoteCheckAccess)
(qemudDispatchServer): Adjust offenders.
* daemon/remote.c (remoteDispatchAuthPolkit): Likewise.
* src/network/bridge_driver.c (networkReloadIptablesRules)
(networkStartNetworkDaemon, networkShutdownNetworkDaemon)
(networkCreate, networkDefine, networkUndefine): Likewise.
* src/qemu/qemu_driver.c (qemudDomainDefine)
(qemudDomainUndefine): Likewise.
* src/storage/storage_driver.c (storagePoolCreate)
(storagePoolDefine, storagePoolUndefine, storagePoolStart)
(storagePoolDestroy, storagePoolDelete, storageVolumeCreateXML)
(storageVolumeCreateXMLFrom, storageVolumeDelete): Likewise.
* src/util/bridge.c (brProbeVnetHdr): Likewise.
* po/POTFILES.in: Drop src/util/bridge.c.
2011-05-11 15:08:44 +00:00
|
|
|
VIR_INFO("Undefining domain '%s'", vm->def->name);
|
2011-08-19 13:49:37 +00:00
|
|
|
|
|
|
|
/* If the domain is active, keep it running but set it as transient.
|
|
|
|
* domainDestroy and domainShutdown will take care of removing the
|
|
|
|
* domain obj from the hash table.
|
|
|
|
*/
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
vm->persistent = 0;
|
|
|
|
} else {
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-08-19 13:49:37 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
2007-02-14 01:40:09 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2011-07-20 03:04:15 +00:00
|
|
|
VIR_FREE(name);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:09:20 +00:00
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-14 01:40:09 +00:00
|
|
|
}
|
|
|
|
|
2011-07-20 03:04:15 +00:00
|
|
|
static int
|
|
|
|
qemudDomainUndefine(virDomainPtr dom)
|
|
|
|
{
|
|
|
|
return qemuDomainUndefineFlags(dom, 0);
|
|
|
|
}
|
|
|
|
|
2011-04-21 07:23:48 +00:00
|
|
|
static int
|
|
|
|
qemuDomainAttachDeviceDiskLive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
2011-04-21 07:23:48 +00:00
|
|
|
{
|
|
|
|
virDomainDiskDefPtr disk = dev->data.disk;
|
|
|
|
virCgroupPtr cgroup = NULL;
|
|
|
|
int ret = -1;
|
2008-09-03 16:05:25 +00:00
|
|
|
|
2011-04-21 07:23:48 +00:00
|
|
|
if (disk->driverName != NULL && !STREQ(disk->driverName, "qemu")) {
|
2011-09-19 12:57:07 +00:00
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
2011-04-21 07:23:48 +00:00
|
|
|
_("unsupported driver name '%s' for disk '%s'"),
|
|
|
|
disk->driverName, disk->src);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_DEVICES)) {
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to find cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
if (qemuSetupDiskCgroup(driver, vm, cgroup, disk) < 0)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
switch (disk->device) {
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_CDROM:
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_FLOPPY:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainChangeEjectableMedia(driver, vm, disk, false);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_DISK:
|
|
|
|
if (disk->bus == VIR_DOMAIN_DISK_BUS_USB)
|
|
|
|
ret = qemuDomainAttachUsbMassstorageDevice(driver, vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
disk);
|
2011-04-21 07:23:48 +00:00
|
|
|
else if (disk->bus == VIR_DOMAIN_DISK_BUS_VIRTIO)
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainAttachPciDiskDevice(driver, vm, disk);
|
2011-04-21 07:23:48 +00:00
|
|
|
else if (disk->bus == VIR_DOMAIN_DISK_BUS_SCSI)
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainAttachSCSIDisk(driver, vm, disk);
|
2011-04-21 07:23:48 +00:00
|
|
|
else
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("disk bus '%s' cannot be hotplugged."),
|
|
|
|
virDomainDiskBusTypeToString(disk->bus));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("disk device type '%s' cannot be hotplugged"),
|
|
|
|
virDomainDiskDeviceTypeToString(disk->device));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != 0 && cgroup) {
|
|
|
|
if (qemuTeardownDiskCgroup(driver, vm, cgroup, disk) < 0)
|
|
|
|
VIR_WARN("Failed to teardown cgroup for disk path %s",
|
|
|
|
NULLSTR(disk->src));
|
|
|
|
}
|
|
|
|
end:
|
|
|
|
if (cgroup)
|
|
|
|
virCgroupFree(&cgroup);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainAttachDeviceControllerLive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
2011-04-21 07:23:48 +00:00
|
|
|
{
|
|
|
|
virDomainControllerDefPtr cont = dev->data.controller;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
switch (cont->type) {
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_SCSI:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainAttachPciControllerDevice(driver, vm, cont);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("disk controller bus '%s' cannot be hotplugged."),
|
|
|
|
virDomainControllerTypeToString(cont->type));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainAttachDeviceLive(virDomainObjPtr vm,
|
|
|
|
virDomainDeviceDefPtr dev,
|
2011-05-04 12:09:09 +00:00
|
|
|
virDomainPtr dom)
|
2011-04-21 07:23:48 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjCheckDiskTaint(driver, vm, dev->data.disk, -1);
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainAttachDeviceDiskLive(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
if (!ret)
|
|
|
|
dev->data.disk = NULL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_CONTROLLER:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainAttachDeviceControllerLive(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
if (!ret)
|
|
|
|
dev->data.controller = NULL;
|
|
|
|
break;
|
|
|
|
|
2011-05-18 16:20:53 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_LEASE:
|
|
|
|
ret = qemuDomainAttachLease(driver, vm,
|
|
|
|
dev->data.lease);
|
|
|
|
if (ret == 0)
|
|
|
|
dev->data.lease = NULL;
|
|
|
|
break;
|
|
|
|
|
2011-04-21 07:23:48 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjCheckNetTaint(driver, vm, dev->data.net, -1);
|
2011-04-21 07:23:48 +00:00
|
|
|
ret = qemuDomainAttachNetDevice(dom->conn, driver, vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
dev->data.net);
|
2011-04-21 07:23:48 +00:00
|
|
|
if (!ret)
|
|
|
|
dev->data.net = NULL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_HOSTDEV:
|
|
|
|
ret = qemuDomainAttachHostDevice(driver, vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
dev->data.hostdev);
|
2011-04-21 07:23:48 +00:00
|
|
|
if (!ret)
|
|
|
|
dev->data.hostdev = NULL;
|
|
|
|
break;
|
|
|
|
|
2011-09-02 15:09:14 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_REDIRDEV:
|
|
|
|
ret = qemuDomainAttachRedirdevDevice(driver, vm,
|
|
|
|
dev->data.redirdev);
|
|
|
|
if (!ret)
|
|
|
|
dev->data.redirdev = NULL;
|
|
|
|
break;
|
|
|
|
|
2011-04-21 07:23:48 +00:00
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("device type '%s' cannot be attached"),
|
|
|
|
virDomainDeviceTypeToString(dev->type));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDetachDeviceDiskLive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
2011-04-21 07:23:48 +00:00
|
|
|
{
|
|
|
|
virDomainDiskDefPtr disk = dev->data.disk;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
switch (disk->device) {
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_DISK:
|
|
|
|
if (disk->bus == VIR_DOMAIN_DISK_BUS_VIRTIO)
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachPciDiskDevice(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
else if (disk->bus == VIR_DOMAIN_DISK_BUS_SCSI)
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachDiskDevice(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
else if (dev->data.disk->bus == VIR_DOMAIN_DISK_BUS_USB)
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachDiskDevice(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
else
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("This type of disk cannot be hot unplugged"));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("device type '%s' cannot be detached"),
|
|
|
|
virDomainDeviceTypeToString(dev->type));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDetachDeviceControllerLive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-04 12:09:09 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
2011-04-21 07:23:48 +00:00
|
|
|
{
|
|
|
|
virDomainControllerDefPtr cont = dev->data.controller;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
switch (cont->type) {
|
|
|
|
case VIR_DOMAIN_CONTROLLER_TYPE_SCSI:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachPciControllerDevice(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
|
|
|
default :
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("disk controller bus '%s' cannot be hotunplugged."),
|
|
|
|
virDomainControllerTypeToString(cont->type));
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainDetachDeviceLive(virDomainObjPtr vm,
|
|
|
|
virDomainDeviceDefPtr dev,
|
2011-05-04 12:09:09 +00:00
|
|
|
virDomainPtr dom)
|
2011-04-21 07:23:48 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachDeviceDiskLive(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_DEVICE_CONTROLLER:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachDeviceControllerLive(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
2011-05-18 16:20:53 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_LEASE:
|
|
|
|
ret = qemuDomainDetachLease(driver, vm, dev->data.lease);
|
|
|
|
break;
|
2011-04-21 07:23:48 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachNetDevice(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_DEVICE_HOSTDEV:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachHostDevice(driver, vm, dev);
|
2011-04-21 07:23:48 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
"%s", _("This type of device cannot be hot unplugged"));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-21 07:25:46 +00:00
|
|
|
static int
|
|
|
|
qemuDomainChangeDiskMediaLive(virDomainObjPtr vm,
|
|
|
|
virDomainDeviceDefPtr dev,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
bool force)
|
|
|
|
{
|
|
|
|
virDomainDiskDefPtr disk = dev->data.disk;
|
|
|
|
virCgroupPtr cgroup = NULL;
|
2011-05-02 09:10:10 +00:00
|
|
|
int ret = -1;
|
2011-04-21 07:25:46 +00:00
|
|
|
|
|
|
|
if (qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_DEVICES)) {
|
|
|
|
if (virCgroupForDomain(driver->cgroup,
|
2011-05-02 09:10:10 +00:00
|
|
|
vm->def->name, &cgroup, 0) != 0) {
|
2011-04-21 07:25:46 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to find cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
if (qemuSetupDiskCgroup(driver, vm, cgroup, disk) < 0)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (disk->device) {
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_CDROM:
|
|
|
|
case VIR_DOMAIN_DISK_DEVICE_FLOPPY:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainChangeEjectableMedia(driver, vm, disk, force);
|
2011-04-21 07:25:46 +00:00
|
|
|
if (ret == 0)
|
|
|
|
dev->data.disk = NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("disk bus '%s' cannot be updated."),
|
|
|
|
virDomainDiskBusTypeToString(disk->bus));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret != 0 && cgroup) {
|
|
|
|
if (qemuTeardownDiskCgroup(driver, vm, cgroup, disk) < 0)
|
|
|
|
VIR_WARN("Failed to teardown cgroup for disk path %s",
|
|
|
|
NULLSTR(disk->src));
|
|
|
|
}
|
|
|
|
end:
|
|
|
|
if (cgroup)
|
|
|
|
virCgroupFree(&cgroup);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainUpdateDeviceLive(virDomainObjPtr vm,
|
|
|
|
virDomainDeviceDefPtr dev,
|
|
|
|
virDomainPtr dom,
|
|
|
|
bool force)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
switch (dev->type) {
|
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainChangeDiskMediaLive(vm, dev, driver, force);
|
2011-04-21 07:25:46 +00:00
|
|
|
break;
|
|
|
|
case VIR_DOMAIN_DEVICE_GRAPHICS:
|
|
|
|
ret = qemuDomainChangeGraphics(driver, vm, dev->data.graphics);
|
|
|
|
break;
|
2011-09-06 08:23:47 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
|
|
|
ret = qemuDomainChangeNet(driver, vm, dom, dev->data.net);
|
|
|
|
break;
|
2011-04-21 07:25:46 +00:00
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("device type '%s' cannot be updated"),
|
|
|
|
virDomainDeviceTypeToString(dev->type));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
static int
|
2011-04-22 03:09:32 +00:00
|
|
|
qemuDomainAttachDeviceConfig(virDomainDefPtr vmdef,
|
2011-04-22 03:07:56 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
|
|
|
{
|
2011-04-22 03:09:32 +00:00
|
|
|
virDomainDiskDefPtr disk;
|
2011-05-27 07:01:47 +00:00
|
|
|
virDomainNetDefPtr net;
|
2011-05-18 16:20:53 +00:00
|
|
|
virDomainLeaseDefPtr lease;
|
2011-04-22 03:09:32 +00:00
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
switch (dev->type) {
|
2011-04-22 03:09:32 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
|
|
|
disk = dev->data.disk;
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
if (virDomainDiskIndexByName(vmdef, disk->dst, true) >= 0) {
|
2011-04-22 03:09:32 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("target %s already exists."), disk->dst);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (virDomainDiskInsert(vmdef, disk)) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* vmdef has the pointer. Generic codes for vmdef will do all jobs */
|
|
|
|
dev->data.disk = NULL;
|
|
|
|
if (disk->bus != VIR_DOMAIN_DISK_BUS_VIRTIO)
|
|
|
|
if (virDomainDefAddImplicitControllers(vmdef) < 0)
|
|
|
|
return -1;
|
|
|
|
if (qemuDomainAssignPCIAddresses(vmdef) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
2011-05-27 07:01:47 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
|
|
|
net = dev->data.net;
|
|
|
|
if (virDomainNetIndexByMac(vmdef, net->mac) >= 0) {
|
|
|
|
char macbuf[VIR_MAC_STRING_BUFLEN];
|
|
|
|
virFormatMacAddr(net->mac, macbuf);
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("mac %s already exists"), macbuf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (virDomainNetInsert(vmdef, net)) {
|
|
|
|
virReportOOMError();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
dev->data.net = NULL;
|
|
|
|
if (qemuDomainAssignPCIAddresses(vmdef) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
2011-05-18 16:20:53 +00:00
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_LEASE:
|
|
|
|
lease = dev->data.lease;
|
|
|
|
if (virDomainLeaseIndex(vmdef, lease) >= 0) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Lease %s in lockspace %s already exists"),
|
|
|
|
lease->key, NULLSTR(lease->lockspace));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (virDomainLeaseInsert(vmdef, lease) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* vmdef has the pointer. Generic codes for vmdef will do all jobs */
|
|
|
|
dev->data.lease = NULL;
|
|
|
|
break;
|
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("persistent attach of device is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2011-04-22 03:09:32 +00:00
|
|
|
qemuDomainDetachDeviceConfig(virDomainDefPtr vmdef,
|
2011-04-22 03:07:56 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
|
|
|
{
|
2011-04-22 03:09:32 +00:00
|
|
|
virDomainDiskDefPtr disk;
|
2011-05-27 07:01:47 +00:00
|
|
|
virDomainNetDefPtr net;
|
2011-05-18 16:20:53 +00:00
|
|
|
virDomainLeaseDefPtr lease;
|
2011-04-22 03:09:32 +00:00
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
switch (dev->type) {
|
2011-04-22 03:09:32 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
|
|
|
disk = dev->data.disk;
|
|
|
|
if (virDomainDiskRemoveByName(vmdef, disk->dst)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("no target device %s"), disk->dst);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
2011-05-18 16:20:53 +00:00
|
|
|
|
2011-05-27 07:01:47 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
|
|
|
net = dev->data.net;
|
|
|
|
if (virDomainNetRemoveByMac(vmdef, net->mac)) {
|
|
|
|
char macbuf[VIR_MAC_STRING_BUFLEN];
|
|
|
|
|
|
|
|
virFormatMacAddr(net->mac, macbuf);
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("no nic of mac %s"), macbuf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
2011-05-18 16:20:53 +00:00
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_LEASE:
|
|
|
|
lease = dev->data.lease;
|
|
|
|
if (virDomainLeaseRemove(vmdef, lease) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Lease %s in lockspace %s does not exist"),
|
|
|
|
lease->key, NULLSTR(lease->lockspace));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("persistent detach of device is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-04-22 03:10:28 +00:00
|
|
|
qemuDomainUpdateDeviceConfig(virDomainDefPtr vmdef,
|
2011-04-22 03:07:56 +00:00
|
|
|
virDomainDeviceDefPtr dev)
|
|
|
|
{
|
2011-04-22 03:10:28 +00:00
|
|
|
virDomainDiskDefPtr orig, disk;
|
2011-09-06 08:23:47 +00:00
|
|
|
virDomainNetDefPtr net;
|
2011-04-22 03:10:28 +00:00
|
|
|
int pos;
|
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
switch (dev->type) {
|
2011-04-22 03:10:28 +00:00
|
|
|
case VIR_DOMAIN_DEVICE_DISK:
|
|
|
|
disk = dev->data.disk;
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
pos = virDomainDiskIndexByName(vmdef, disk->dst, false);
|
2011-04-22 03:10:28 +00:00
|
|
|
if (pos < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
_("target %s doesn't exist."), disk->dst);
|
2011-04-22 03:10:28 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
orig = vmdef->disks[pos];
|
|
|
|
if (!(orig->device == VIR_DOMAIN_DISK_DEVICE_CDROM) &&
|
|
|
|
!(orig->device == VIR_DOMAIN_DISK_DEVICE_FLOPPY)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("this disk doesn't support update"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Update 'orig'
|
|
|
|
* We allow updating src/type//driverType/cachemode/
|
|
|
|
*/
|
|
|
|
VIR_FREE(orig->src);
|
|
|
|
orig->src = disk->src;
|
|
|
|
orig->type = disk->type;
|
|
|
|
orig->cachemode = disk->cachemode;
|
|
|
|
if (disk->driverName) {
|
|
|
|
VIR_FREE(orig->driverName);
|
|
|
|
orig->driverName = disk->driverName;
|
|
|
|
disk->driverName = NULL;
|
|
|
|
}
|
|
|
|
if (disk->driverType) {
|
|
|
|
VIR_FREE(orig->driverType);
|
|
|
|
orig->driverType = disk->driverType;
|
|
|
|
disk->driverType = NULL;
|
|
|
|
}
|
|
|
|
disk->src = NULL;
|
|
|
|
break;
|
2011-09-06 08:23:47 +00:00
|
|
|
|
|
|
|
case VIR_DOMAIN_DEVICE_NET:
|
|
|
|
net = dev->data.net;
|
|
|
|
if ((pos = virDomainNetIndexByMac(vmdef, net->mac)) < 0) {
|
|
|
|
char macbuf[VIR_MAC_STRING_BUFLEN];
|
|
|
|
virFormatMacAddr(net->mac, macbuf);
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("mac %s doesn't exist"), macbuf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(vmdef->nets[pos]);
|
|
|
|
|
|
|
|
vmdef->nets[pos] = net;
|
|
|
|
dev->data.net = NULL;
|
|
|
|
|
|
|
|
if (qemuDomainAssignPCIAddresses(vmdef) < 0)
|
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("persistent update of device is not supported"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-21 07:23:48 +00:00
|
|
|
/* Actions for qemuDomainModifyDeviceFlags */
|
|
|
|
enum {
|
|
|
|
QEMU_DEVICE_ATTACH,
|
|
|
|
QEMU_DEVICE_DETACH,
|
|
|
|
QEMU_DEVICE_UPDATE,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainModifyDeviceFlags(virDomainPtr dom, const char *xml,
|
|
|
|
unsigned int flags, int action)
|
2010-05-05 14:52:15 +00:00
|
|
|
{
|
2010-12-16 16:10:54 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
2011-04-21 07:23:48 +00:00
|
|
|
virDomainObjPtr vm = NULL;
|
2011-04-22 03:07:56 +00:00
|
|
|
virDomainDefPtr vmdef = NULL;
|
2011-04-21 07:23:48 +00:00
|
|
|
virDomainDeviceDefPtr dev = NULL;
|
2011-04-21 07:25:46 +00:00
|
|
|
bool force = (flags & VIR_DOMAIN_DEVICE_MODIFY_FORCE) != 0;
|
2010-12-16 16:10:54 +00:00
|
|
|
int ret = -1;
|
2011-06-27 09:25:19 +00:00
|
|
|
unsigned int affect;
|
2010-02-01 15:06:36 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG |
|
2011-04-21 07:25:46 +00:00
|
|
|
(action == QEMU_DEVICE_UPDATE ?
|
|
|
|
VIR_DOMAIN_DEVICE_MODIFY_FORCE : 0), -1);
|
|
|
|
|
2011-06-27 09:25:19 +00:00
|
|
|
affect = flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG);
|
|
|
|
|
2010-12-16 16:10:54 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2009-07-17 21:08:34 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2010-12-16 16:10:54 +00:00
|
|
|
goto cleanup;
|
2009-07-17 21:08:34 +00:00
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
2011-06-27 09:25:19 +00:00
|
|
|
if (affect == VIR_DOMAIN_AFFECT_CURRENT)
|
2011-06-08 06:33:33 +00:00
|
|
|
flags |= VIR_DOMAIN_AFFECT_LIVE;
|
2011-04-22 03:07:56 +00:00
|
|
|
} else {
|
2011-06-27 09:25:19 +00:00
|
|
|
if (affect == VIR_DOMAIN_AFFECT_CURRENT)
|
2011-06-08 06:33:33 +00:00
|
|
|
flags |= VIR_DOMAIN_AFFECT_CONFIG;
|
2011-04-22 03:07:56 +00:00
|
|
|
/* check consistency between flags and the vm state */
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-04-22 03:07:56 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s",
|
|
|
|
_("cannot do live update a device on "
|
|
|
|
"inactive domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
2009-08-14 07:31:10 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if ((flags & VIR_DOMAIN_AFFECT_CONFIG) && !vm->persistent) {
|
2011-04-22 03:07:56 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cannot modify device on transient domain"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
2010-03-03 15:15:21 +00:00
|
|
|
|
2011-06-08 06:33:33 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-26 14:28:23 +00:00
|
|
|
dev = virDomainDeviceDefParse(driver->caps, vm->def, xml,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE);
|
|
|
|
if (dev == NULL)
|
|
|
|
goto endjob;
|
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
/* Make a copy for updated domain. */
|
|
|
|
vmdef = virDomainObjCopyPersistentDef(driver->caps, vm);
|
|
|
|
if (!vmdef)
|
|
|
|
goto endjob;
|
|
|
|
switch (action) {
|
|
|
|
case QEMU_DEVICE_ATTACH:
|
|
|
|
ret = qemuDomainAttachDeviceConfig(vmdef, dev);
|
|
|
|
break;
|
|
|
|
case QEMU_DEVICE_DETACH:
|
|
|
|
ret = qemuDomainDetachDeviceConfig(vmdef, dev);
|
|
|
|
break;
|
|
|
|
case QEMU_DEVICE_UPDATE:
|
|
|
|
ret = qemuDomainUpdateDeviceConfig(vmdef, dev);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown domain modify action %d"), action);
|
|
|
|
break;
|
|
|
|
}
|
2011-04-21 07:25:46 +00:00
|
|
|
|
2011-07-16 03:24:49 +00:00
|
|
|
if (ret == -1)
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-26 14:28:23 +00:00
|
|
|
/* If dev exists it was created to modify the domain config. Free it. */
|
|
|
|
virDomainDeviceDefFree(dev);
|
|
|
|
dev = virDomainDeviceDefParse(driver->caps, vm->def, xml,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE);
|
2011-07-16 03:24:49 +00:00
|
|
|
if (dev == NULL) {
|
|
|
|
ret = -1;
|
2011-05-26 14:28:23 +00:00
|
|
|
goto endjob;
|
2011-07-16 03:24:49 +00:00
|
|
|
}
|
2011-05-26 14:28:23 +00:00
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
switch (action) {
|
|
|
|
case QEMU_DEVICE_ATTACH:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainAttachDeviceLive(vm, dev, dom);
|
2011-04-22 03:07:56 +00:00
|
|
|
break;
|
|
|
|
case QEMU_DEVICE_DETACH:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainDetachDeviceLive(vm, dev, dom);
|
2011-04-22 03:07:56 +00:00
|
|
|
break;
|
|
|
|
case QEMU_DEVICE_UPDATE:
|
2011-05-04 12:09:09 +00:00
|
|
|
ret = qemuDomainUpdateDeviceLive(vm, dev, dom, force);
|
2011-04-22 03:07:56 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown domain modify action %d"), action);
|
2011-07-16 03:24:49 +00:00
|
|
|
ret = -1;
|
2011-04-22 03:07:56 +00:00
|
|
|
break;
|
|
|
|
}
|
2011-07-16 03:24:49 +00:00
|
|
|
|
|
|
|
if (ret == -1)
|
|
|
|
goto endjob;
|
2011-04-22 03:07:56 +00:00
|
|
|
/*
|
|
|
|
* update domain status forcibly because the domain status may be
|
2011-07-16 03:24:49 +00:00
|
|
|
* changed even if we failed to attach the device. For example,
|
|
|
|
* a new controller may be created.
|
2011-04-22 03:07:56 +00:00
|
|
|
*/
|
2011-07-16 03:24:49 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
2011-04-22 03:07:56 +00:00
|
|
|
ret = -1;
|
2011-07-16 03:24:49 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2011-04-22 03:07:56 +00:00
|
|
|
}
|
2011-07-16 03:24:49 +00:00
|
|
|
|
2011-04-22 03:07:56 +00:00
|
|
|
/* Finally, if no error until here, we can save config. */
|
2011-07-16 03:24:49 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-04-22 03:07:56 +00:00
|
|
|
ret = virDomainSaveConfig(driver->configDir, vmdef);
|
|
|
|
if (!ret) {
|
|
|
|
virDomainObjAssignDef(vm, vmdef, false);
|
|
|
|
vmdef = NULL;
|
|
|
|
}
|
|
|
|
}
|
2010-12-16 16:10:54 +00:00
|
|
|
|
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2010-12-16 16:10:54 +00:00
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
cleanup:
|
2011-04-22 03:07:56 +00:00
|
|
|
virDomainDefFree(vmdef);
|
2010-12-16 16:10:54 +00:00
|
|
|
virDomainDeviceDefFree(dev);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
2009-08-14 07:31:10 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-04-21 07:23:48 +00:00
|
|
|
static int qemuDomainAttachDeviceFlags(virDomainPtr dom, const char *xml,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
return qemuDomainModifyDeviceFlags(dom, xml, flags, QEMU_DEVICE_ATTACH);
|
|
|
|
}
|
|
|
|
|
2011-04-21 07:22:30 +00:00
|
|
|
static int qemuDomainAttachDevice(virDomainPtr dom, const char *xml)
|
|
|
|
{
|
|
|
|
return qemuDomainAttachDeviceFlags(dom, xml,
|
2011-06-08 06:33:33 +00:00
|
|
|
VIR_DOMAIN_AFFECT_LIVE);
|
2010-12-16 16:10:54 +00:00
|
|
|
}
|
2010-03-03 15:10:36 +00:00
|
|
|
|
2010-03-04 11:48:16 +00:00
|
|
|
|
2010-12-16 16:10:54 +00:00
|
|
|
static int qemuDomainUpdateDeviceFlags(virDomainPtr dom,
|
|
|
|
const char *xml,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2011-04-21 07:25:46 +00:00
|
|
|
return qemuDomainModifyDeviceFlags(dom, xml, flags, QEMU_DEVICE_UPDATE);
|
2009-08-14 07:31:10 +00:00
|
|
|
}
|
|
|
|
|
2011-04-21 07:22:30 +00:00
|
|
|
static int qemuDomainDetachDeviceFlags(virDomainPtr dom, const char *xml,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2011-04-21 07:23:48 +00:00
|
|
|
return qemuDomainModifyDeviceFlags(dom, xml, flags, QEMU_DEVICE_DETACH);
|
2008-10-29 14:32:40 +00:00
|
|
|
}
|
|
|
|
|
2011-04-21 07:22:30 +00:00
|
|
|
static int qemuDomainDetachDevice(virDomainPtr dom, const char *xml)
|
|
|
|
{
|
|
|
|
return qemuDomainDetachDeviceFlags(dom, xml,
|
2011-06-08 06:33:33 +00:00
|
|
|
VIR_DOMAIN_AFFECT_LIVE);
|
2010-01-14 01:44:26 +00:00
|
|
|
}
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudDomainGetAutostart(virDomainPtr dom,
|
2008-12-04 21:06:41 +00:00
|
|
|
int *autostart) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2007-02-23 09:03:25 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2007-02-23 09:03:25 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 09:03:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
*autostart = vm->autostart;
|
2008-12-04 21:04:30 +00:00
|
|
|
ret = 0;
|
2007-02-23 09:03:25 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2007-02-23 09:03:25 +00:00
|
|
|
}
|
|
|
|
|
2007-06-29 13:23:13 +00:00
|
|
|
static int qemudDomainSetAutostart(virDomainPtr dom,
|
2008-08-20 19:42:36 +00:00
|
|
|
int autostart) {
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2008-08-20 19:42:36 +00:00
|
|
|
char *configFile = NULL, *autostartLink = NULL;
|
|
|
|
int ret = -1;
|
2007-02-23 09:03:25 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
|
2007-02-23 09:03:25 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 09:03:25 +00:00
|
|
|
}
|
|
|
|
|
2008-08-20 19:42:36 +00:00
|
|
|
if (!vm->persistent) {
|
2011-01-15 15:57:33 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
2010-02-09 18:15:41 +00:00
|
|
|
"%s", _("cannot set autostart for transient domain"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-08-20 19:42:36 +00:00
|
|
|
}
|
|
|
|
|
2007-02-23 09:03:25 +00:00
|
|
|
autostart = (autostart != 0);
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (vm->autostart != autostart) {
|
2010-02-09 18:58:01 +00:00
|
|
|
if ((configFile = virDomainConfigFile(driver->configDir, vm->def->name)) == NULL)
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2010-02-09 18:58:01 +00:00
|
|
|
if ((autostartLink = virDomainConfigFile(driver->autostartDir, vm->def->name)) == NULL)
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-02-23 09:03:25 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (autostart) {
|
2011-07-05 21:02:53 +00:00
|
|
|
if (virFileMakePath(driver->autostartDir) < 0) {
|
|
|
|
virReportSystemError(errno,
|
2009-01-20 17:13:33 +00:00
|
|
|
_("cannot create autostart directory %s"),
|
|
|
|
driver->autostartDir);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2007-02-23 09:07:41 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (symlink(configFile, autostartLink) < 0) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno,
|
2009-01-20 17:13:33 +00:00
|
|
|
_("Failed to create symlink '%s to '%s'"),
|
|
|
|
autostartLink, configFile);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlink(autostartLink) < 0 && errno != ENOENT && errno != ENOTDIR) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno,
|
2009-01-20 17:13:33 +00:00
|
|
|
_("Failed to delete symlink '%s'"),
|
|
|
|
autostartLink);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2007-02-23 09:07:41 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
vm->autostart = autostart;
|
2007-02-23 09:07:41 +00:00
|
|
|
}
|
2008-08-20 19:42:36 +00:00
|
|
|
ret = 0;
|
2007-02-23 09:03:25 +00:00
|
|
|
|
2008-08-20 19:42:36 +00:00
|
|
|
cleanup:
|
|
|
|
VIR_FREE(configFile);
|
|
|
|
VIR_FREE(autostartLink);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-05-19 11:06:25 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-08-20 19:42:36 +00:00
|
|
|
return ret;
|
2007-02-23 09:03:25 +00:00
|
|
|
}
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
/*
|
|
|
|
* check whether the host supports CFS bandwidth
|
|
|
|
*
|
|
|
|
* Return 1 when CFS bandwidth is supported, 0 when CFS bandwidth is not
|
|
|
|
* supported, -1 on error.
|
|
|
|
*/
|
|
|
|
static int qemuGetCpuBWStatus(virCgroupPtr cgroup)
|
|
|
|
{
|
|
|
|
char *cfs_period_path = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!cgroup)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (virCgroupPathOfController(cgroup, VIR_CGROUP_CONTROLLER_CPU,
|
|
|
|
"cpu.cfs_period_us", &cfs_period_path) < 0) {
|
|
|
|
VIR_INFO("cannot get the path of cgroup CPU controller");
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (access(cfs_period_path, F_OK) < 0) {
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(cfs_period_path);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
static char *qemuGetSchedulerType(virDomainPtr dom,
|
|
|
|
int *nparams)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
2009-09-02 13:02:06 +00:00
|
|
|
char *ret = NULL;
|
2011-07-21 08:04:25 +00:00
|
|
|
int rc;
|
2009-07-09 13:11:21 +00:00
|
|
|
|
2009-09-02 13:02:06 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-07-22 15:08:04 +00:00
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
|
2011-02-11 10:02:39 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cgroup CPU controller is not mounted"));
|
2009-09-02 13:02:06 +00:00
|
|
|
goto cleanup;
|
2009-07-09 13:11:21 +00:00
|
|
|
}
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
if (nparams) {
|
|
|
|
rc = qemuGetCpuBWStatus(driver->cgroup);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
else if (rc == 0)
|
|
|
|
*nparams = 1;
|
|
|
|
else
|
|
|
|
*nparams = 3;
|
|
|
|
}
|
2009-07-09 13:11:21 +00:00
|
|
|
|
|
|
|
ret = strdup("posix");
|
|
|
|
if (!ret)
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
2009-09-02 13:02:06 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuDriverUnlock(driver);
|
2009-07-09 13:11:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-22 05:32:38 +00:00
|
|
|
static int qemuDomainSetBlkioParameters(virDomainPtr dom,
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr params,
|
2011-02-22 05:32:38 +00:00
|
|
|
int nparams,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int i;
|
|
|
|
virCgroupPtr group = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
2011-06-07 06:03:09 +00:00
|
|
|
virDomainDefPtr persistentDef = NULL;
|
2011-02-22 05:32:38 +00:00
|
|
|
int ret = -1;
|
2011-06-07 06:03:09 +00:00
|
|
|
bool isActive;
|
2011-02-22 05:32:38 +00:00
|
|
|
|
2011-06-07 06:03:09 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
2011-02-22 05:32:38 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (vm == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("No such domain %s"), dom->uuid);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-07 06:03:09 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
|
|
|
if (isActive)
|
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
|
|
|
else
|
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-03-18 15:08:19 +00:00
|
|
|
}
|
|
|
|
|
2011-06-07 06:03:09 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_BLKIO)) {
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, _("blkio cgroup isn't mounted"));
|
2011-06-07 06:03:09 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find cgroup for domain %s"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto cleanup;
|
2011-02-22 05:32:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
2011-06-07 06:03:09 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
for (i = 0; i < nparams; i++) {
|
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
|
|
|
|
|
|
|
if (STREQ(param->field, VIR_DOMAIN_BLKIO_WEIGHT)) {
|
|
|
|
int rc;
|
|
|
|
if (param->type != VIR_TYPED_PARAM_UINT) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("invalid type for blkio weight tunable, expected a 'unsigned int'"));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params[i].value.ui > 1000 || params[i].value.ui < 100) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("out of blkio weight range."));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
2011-02-22 05:32:38 +00:00
|
|
|
|
2011-06-07 06:03:09 +00:00
|
|
|
rc = virCgroupSetBlkioWeight(group, params[i].value.ui);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to set blkio weight tunable"));
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Parameter `%s' not supported"), param->field);
|
2011-02-22 05:32:38 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
2011-06-07 06:03:09 +00:00
|
|
|
}
|
|
|
|
} else if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-08-05 15:10:18 +00:00
|
|
|
/* Clang can't see that if we get here, persistentDef was set. */
|
|
|
|
sa_assert(persistentDef);
|
|
|
|
|
2011-06-07 06:03:09 +00:00
|
|
|
for (i = 0; i < nparams; i++) {
|
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
|
|
|
|
|
|
|
if (STREQ(param->field, VIR_DOMAIN_BLKIO_WEIGHT)) {
|
|
|
|
if (param->type != VIR_TYPED_PARAM_UINT) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("invalid type for blkio weight tunable, expected a 'unsigned int'"));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
2011-02-22 05:32:38 +00:00
|
|
|
|
2011-06-07 06:03:09 +00:00
|
|
|
if (params[i].value.ui > 1000 || params[i].value.ui < 100) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("out of blkio weight range."));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
persistentDef->blkio.weight = params[i].value.ui;
|
|
|
|
} else {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Parameter `%s' not supported"), param->field);
|
2011-02-22 05:32:38 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
}
|
qemu: fix return value issue
whether or not previous return value is -1, the following codes will be
executed for a inactive guest in src/qemu/qemu_driver.c:
ret = virDomainSaveConfig(driver->configDir, persistentDef);
and if everything is okay, 'ret' is assigned to 0, the previous 'ret'
will be overwritten, this patch will fix this issue.
* src/qemu/qemu_driver.c: avoid return value is overwritten when give a argument
in out of blkio weight range for a inactive guest.
* how to reproduce?
% virsh blkiotune ${guestname} --weight 10
% echo $?
Note: guest must be inactive, argument 10 in out of blkio weight range,
and can get a error information by checking libvirtd.log, however,
virsh hasn't raised any error information, and return value is 0.
https://bugzilla.redhat.com/show_bug.cgi?id=726304
Signed-off-by: Alex Jia <ajia@redhat.com>
2011-08-01 15:45:27 +00:00
|
|
|
|
|
|
|
if (virDomainSaveConfig(driver->configDir, persistentDef) < 0)
|
|
|
|
ret = -1;
|
2011-02-22 05:32:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCgroupFree(&group);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuDomainGetBlkioParameters(virDomainPtr dom,
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr params,
|
2011-02-22 05:32:38 +00:00
|
|
|
int *nparams,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int i;
|
|
|
|
virCgroupPtr group = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
2011-06-07 06:03:08 +00:00
|
|
|
virDomainDefPtr persistentDef = NULL;
|
2011-02-22 05:32:38 +00:00
|
|
|
unsigned int val;
|
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
2011-06-07 06:03:08 +00:00
|
|
|
bool isActive;
|
2011-02-22 05:32:38 +00:00
|
|
|
|
2011-06-07 06:03:08 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
2011-02-22 05:32:38 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (vm == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("No such domain %s"), dom->uuid);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((*nparams) == 0) {
|
|
|
|
/* Current number of blkio parameters supported by cgroups */
|
|
|
|
*nparams = QEMU_NB_BLKIO_PARAM;
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((*nparams) != QEMU_NB_BLKIO_PARAM) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("Invalid parameter count"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-07 06:03:08 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
|
|
|
if (isActive)
|
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
|
|
|
else
|
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-02-22 05:32:38 +00:00
|
|
|
}
|
|
|
|
|
2011-06-07 06:03:08 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-02-22 05:32:38 +00:00
|
|
|
|
2011-06-07 06:03:08 +00:00
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_BLKIO)) {
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, _("blkio cgroup isn't mounted"));
|
2011-06-07 06:03:08 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find cgroup for domain %s"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
for (i = 0; i < *nparams; i++) {
|
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
|
|
|
val = 0;
|
|
|
|
param->value.ui = 0;
|
|
|
|
param->type = VIR_TYPED_PARAM_UINT;
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case 0: /* fill blkio weight here */
|
|
|
|
rc = virCgroupGetBlkioWeight(group, &val);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get blkio weight"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLKIO_WEIGHT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field blkio weight too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->value.ui = val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
/* should not hit here */
|
2011-02-22 05:32:38 +00:00
|
|
|
}
|
2011-06-07 06:03:08 +00:00
|
|
|
}
|
|
|
|
} else if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
for (i = 0; i < *nparams; i++) {
|
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
|
|
|
val = 0;
|
|
|
|
param->value.ui = 0;
|
|
|
|
param->type = VIR_TYPED_PARAM_UINT;
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case 0: /* fill blkio weight here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLKIO_WEIGHT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field blkio weight too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->value.ui = persistentDef->blkio.weight;
|
|
|
|
break;
|
2011-02-22 05:32:38 +00:00
|
|
|
|
2011-06-07 06:03:08 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
/* should not hit here */
|
|
|
|
}
|
2011-02-22 05:32:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (group)
|
|
|
|
virCgroupFree(&group);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
2010-10-12 15:24:54 +00:00
|
|
|
|
|
|
|
static int qemuDomainSetMemoryParameters(virDomainPtr dom,
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr params,
|
2010-10-12 15:24:54 +00:00
|
|
|
int nparams,
|
2011-05-27 07:47:41 +00:00
|
|
|
unsigned int flags)
|
2010-10-12 15:24:54 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int i;
|
2011-05-27 07:47:41 +00:00
|
|
|
virDomainDefPtr persistentDef = NULL;
|
2010-10-12 15:24:54 +00:00
|
|
|
virCgroupPtr group = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
2011-05-27 07:47:41 +00:00
|
|
|
bool isActive;
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
2010-10-12 15:24:54 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (vm == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("No such domain %s"), dom->uuid);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-27 07:47:41 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
2011-05-27 07:47:41 +00:00
|
|
|
if (isActive)
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
2011-05-27 07:47:41 +00:00
|
|
|
else
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-03-18 15:08:19 +00:00
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-27 07:47:41 +00:00
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_MEMORY)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cgroup memory controller is not mounted"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find cgroup for domain %s"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-27 07:47:41 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto cleanup;
|
2010-10-12 15:24:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
for (i = 0; i < nparams; i++) {
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
2010-10-12 15:24:54 +00:00
|
|
|
|
|
|
|
if (STREQ(param->field, VIR_DOMAIN_MEMORY_HARD_LIMIT)) {
|
|
|
|
int rc;
|
2011-05-26 17:39:04 +00:00
|
|
|
if (param->type != VIR_TYPED_PARAM_ULLONG) {
|
2010-10-12 15:24:54 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("invalid type for memory hard_limit tunable, expected a 'ullong'"));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-27 07:47:41 +00:00
|
|
|
rc = virCgroupSetMemoryHardLimit(group, params[i].value.ul);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to set memory hard_limit tunable"));
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-27 07:47:41 +00:00
|
|
|
persistentDef->mem.hard_limit = params[i].value.ul;
|
2010-10-12 15:24:54 +00:00
|
|
|
}
|
|
|
|
} else if (STREQ(param->field, VIR_DOMAIN_MEMORY_SOFT_LIMIT)) {
|
|
|
|
int rc;
|
2011-05-26 17:39:04 +00:00
|
|
|
if (param->type != VIR_TYPED_PARAM_ULLONG) {
|
2010-10-12 15:24:54 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("invalid type for memory soft_limit tunable, expected a 'ullong'"));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-27 07:47:41 +00:00
|
|
|
rc = virCgroupSetMemorySoftLimit(group, params[i].value.ul);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to set memory soft_limit tunable"));
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-27 07:47:41 +00:00
|
|
|
persistentDef->mem.soft_limit = params[i].value.ul;
|
2010-10-12 15:24:54 +00:00
|
|
|
}
|
2010-10-20 11:52:48 +00:00
|
|
|
} else if (STREQ(param->field, VIR_DOMAIN_MEMORY_SWAP_HARD_LIMIT)) {
|
2010-10-12 15:24:54 +00:00
|
|
|
int rc;
|
2011-05-26 17:39:04 +00:00
|
|
|
if (param->type != VIR_TYPED_PARAM_ULLONG) {
|
2010-10-12 15:24:54 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("invalid type for swap_hard_limit tunable, expected a 'ullong'"));
|
|
|
|
ret = -1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-27 07:47:41 +00:00
|
|
|
rc = virCgroupSetMemSwapHardLimit(group, params[i].value.ul);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to set swap_hard_limit tunable"));
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
}
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-27 07:47:41 +00:00
|
|
|
persistentDef->mem.swap_hard_limit = params[i].value.ul;
|
2010-10-12 15:24:54 +00:00
|
|
|
}
|
|
|
|
} else if (STREQ(param->field, VIR_DOMAIN_MEMORY_MIN_GUARANTEE)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Memory tunable `%s' not implemented"), param->field);
|
|
|
|
ret = -1;
|
|
|
|
} else {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Parameter `%s' not supported"), param->field);
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
qemu: fix return value issue in qemuDomainSetMemoryParameters
whether or not previous return value is -1, the following codes will be
executed for a inactive guest in qemuDomainSetMemoryParameters:
ret = virDomainSaveConfig(driver->configDir, persistentDef);
and if everything is okay, 'ret' is assigned to 0, the previous 'ret'
will be overwritten, this patch will fix this issue.
* src/qemu/qemu_driver.c: avoid return value is overwritten when set
min_guarante value to a inactive guest.
* how to reproduce?
% virsh memtune ${guestname} --min_guarante 1024
% echo $?
Note: guest must be inactive, in fact, 'min_guarante' hasn't been implemented
in memory tunable, and I can get the error when check actual libvirtd.log,
however, virsh hasn't raised any error information, and return value is 0.
Signed-off-by: Alex Jia <ajia@redhat.com>
2011-08-01 15:06:07 +00:00
|
|
|
if (virDomainSaveConfig(driver->configDir, persistentDef) < 0)
|
|
|
|
ret = -1;
|
2011-05-27 07:47:41 +00:00
|
|
|
}
|
|
|
|
|
2010-10-12 15:24:54 +00:00
|
|
|
cleanup:
|
|
|
|
virCgroupFree(&group);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-10-12 15:50:36 +00:00
|
|
|
static int qemuDomainGetMemoryParameters(virDomainPtr dom,
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr params,
|
2010-10-12 15:50:36 +00:00
|
|
|
int *nparams,
|
2011-05-27 07:41:29 +00:00
|
|
|
unsigned int flags)
|
2010-10-12 15:50:36 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int i;
|
|
|
|
virCgroupPtr group = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
2011-05-27 07:41:29 +00:00
|
|
|
virDomainDefPtr persistentDef = NULL;
|
2011-01-13 09:18:11 +00:00
|
|
|
unsigned long long val;
|
2010-10-12 15:50:36 +00:00
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
2011-05-27 07:41:29 +00:00
|
|
|
bool isActive;
|
2010-10-12 15:50:36 +00:00
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
2010-10-12 15:50:36 +00:00
|
|
|
|
2011-05-27 07:41:29 +00:00
|
|
|
qemuDriverLock(driver);
|
2010-10-12 15:50:36 +00:00
|
|
|
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (vm == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("No such domain %s"), dom->uuid);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-27 07:41:29 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
2011-05-27 07:41:29 +00:00
|
|
|
if (isActive)
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
2011-05-27 07:41:29 +00:00
|
|
|
else
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-05-27 07:41:29 +00:00
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-27 07:41:29 +00:00
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_MEMORY)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cgroup memory controller is not mounted"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find cgroup for domain %s"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-27 07:41:29 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(persistentDef = virDomainObjGetPersistentDef(driver->caps, vm)))
|
|
|
|
goto cleanup;
|
2011-03-18 15:08:19 +00:00
|
|
|
}
|
|
|
|
|
2010-10-12 15:50:36 +00:00
|
|
|
if ((*nparams) == 0) {
|
|
|
|
/* Current number of memory parameters supported by cgroups */
|
|
|
|
*nparams = QEMU_NB_MEM_PARAM;
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-18 09:33:42 +00:00
|
|
|
if ((*nparams) < QEMU_NB_MEM_PARAM) {
|
2010-10-12 15:50:36 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("Invalid parameter count"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-27 07:41:29 +00:00
|
|
|
for (i = 0; i < *nparams; i++) {
|
|
|
|
virMemoryParameterPtr param = ¶ms[i];
|
|
|
|
val = 0;
|
|
|
|
param->value.ul = 0;
|
2011-06-03 16:10:58 +00:00
|
|
|
param->type = VIR_TYPED_PARAM_ULLONG;
|
2011-05-27 07:41:29 +00:00
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case 0: /* fill memory hard limit here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_MEMORY_HARD_LIMIT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field memory hard limit too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->value.ul = persistentDef->mem.hard_limit;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1: /* fill memory soft limit here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_MEMORY_SOFT_LIMIT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field memory soft limit too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->value.ul = persistentDef->mem.soft_limit;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2: /* fill swap hard limit here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_MEMORY_SWAP_HARD_LIMIT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field swap hard limit too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->value.ul = persistentDef->mem.swap_hard_limit;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
/* should not hit here */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
goto out;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
|
|
|
|
2011-05-18 09:33:42 +00:00
|
|
|
for (i = 0; i < QEMU_NB_MEM_PARAM; i++) {
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
2010-10-12 15:50:36 +00:00
|
|
|
val = 0;
|
|
|
|
param->value.ul = 0;
|
2011-05-26 17:39:04 +00:00
|
|
|
param->type = VIR_TYPED_PARAM_ULLONG;
|
2010-10-12 15:50:36 +00:00
|
|
|
|
2011-08-02 22:45:23 +00:00
|
|
|
/* Coverity does not realize that if we get here, group is set. */
|
|
|
|
sa_assert(group);
|
|
|
|
|
2011-04-21 07:25:46 +00:00
|
|
|
switch (i) {
|
2010-10-12 15:50:36 +00:00
|
|
|
case 0: /* fill memory hard limit here */
|
|
|
|
rc = virCgroupGetMemoryHardLimit(group, &val);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get memory hard limit"));
|
2010-10-20 12:28:45 +00:00
|
|
|
goto cleanup;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_MEMORY_HARD_LIMIT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field memory hard limit too long for destination"));
|
2010-10-20 12:28:45 +00:00
|
|
|
goto cleanup;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
|
|
|
param->value.ul = val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1: /* fill memory soft limit here */
|
|
|
|
rc = virCgroupGetMemorySoftLimit(group, &val);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get memory soft limit"));
|
2010-10-20 12:28:45 +00:00
|
|
|
goto cleanup;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_MEMORY_SOFT_LIMIT) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field memory soft limit too long for destination"));
|
2010-10-20 12:28:45 +00:00
|
|
|
goto cleanup;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
|
|
|
param->value.ul = val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2: /* fill swap hard limit here */
|
2011-03-16 05:07:12 +00:00
|
|
|
rc = virCgroupGetMemSwapHardLimit(group, &val);
|
2010-10-12 15:50:36 +00:00
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get swap hard limit"));
|
2010-10-20 12:28:45 +00:00
|
|
|
goto cleanup;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
2010-10-20 11:52:48 +00:00
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_MEMORY_SWAP_HARD_LIMIT) == NULL) {
|
2010-10-12 15:50:36 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field swap hard limit too long for destination"));
|
2010-10-20 12:28:45 +00:00
|
|
|
goto cleanup;
|
2010-10-12 15:50:36 +00:00
|
|
|
}
|
|
|
|
param->value.ul = val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
/* should not hit here */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-27 07:41:29 +00:00
|
|
|
out:
|
2011-05-18 09:33:42 +00:00
|
|
|
*nparams = QEMU_NB_MEM_PARAM;
|
2010-10-20 12:28:45 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2010-10-12 15:50:36 +00:00
|
|
|
cleanup:
|
|
|
|
if (group)
|
|
|
|
virCgroupFree(&group);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
static int
|
|
|
|
qemuSetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
|
|
|
|
unsigned long long period, long long quota)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCgroupPtr cgroup_vcpu = NULL;
|
|
|
|
int rc;
|
2011-07-25 05:37:14 +00:00
|
|
|
long long vm_quota = 0;
|
|
|
|
long long old_quota = 0;
|
|
|
|
unsigned long long old_period = 0;
|
2011-07-21 08:04:25 +00:00
|
|
|
|
|
|
|
if (period == 0 && quota == 0)
|
|
|
|
return 0;
|
|
|
|
|
2011-07-25 05:37:14 +00:00
|
|
|
/* Ensure that we can multiply by vcpus without overflowing. */
|
|
|
|
if (quota > LLONG_MAX / vm->def->vcpus) {
|
|
|
|
virReportSystemError(EINVAL,
|
|
|
|
_("%s"),
|
|
|
|
"Unable to set cpu bandwidth quota");
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (quota > 0)
|
|
|
|
vm_quota = quota * vm->def->vcpus;
|
|
|
|
else
|
|
|
|
vm_quota = quota;
|
|
|
|
|
|
|
|
rc = virCgroupGetCpuCfsQuota(cgroup, &old_quota);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get cpu bandwidth tunable"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-07-21 08:04:25 +00:00
|
|
|
|
2011-07-25 05:37:14 +00:00
|
|
|
rc = virCgroupGetCpuCfsPeriod(cgroup, &old_period);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get cpu bandwidth period tunable"));
|
|
|
|
goto cleanup;
|
2011-07-21 08:04:25 +00:00
|
|
|
}
|
|
|
|
|
2011-07-25 05:37:14 +00:00
|
|
|
/*
|
|
|
|
* If quota will be changed to a small value, we should modify vcpu's quota
|
|
|
|
* first. Otherwise, we should modify vm's quota first.
|
|
|
|
*
|
|
|
|
* If period will be changed to a small value, we should modify vm's period
|
|
|
|
* first. Otherwise, we should modify vcpu's period first.
|
|
|
|
*
|
|
|
|
* If both quota and period will be changed to a big/small value, we cannot
|
|
|
|
* modify period and quota together.
|
|
|
|
*/
|
|
|
|
if ((quota != 0) && (period != 0)) {
|
|
|
|
if (((quota > old_quota) && (period > old_period)) ||
|
|
|
|
((quota < old_quota) && (period < old_period))) {
|
|
|
|
/* modify period */
|
|
|
|
if (qemuSetVcpusBWLive(vm, cgroup, period, 0) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* modify quota */
|
|
|
|
if (qemuSetVcpusBWLive(vm, cgroup, 0, quota) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
return 0;
|
2011-07-21 08:04:25 +00:00
|
|
|
}
|
2011-07-25 05:37:14 +00:00
|
|
|
}
|
2011-07-21 08:04:25 +00:00
|
|
|
|
2011-07-25 05:37:14 +00:00
|
|
|
if (((vm_quota != 0) && (vm_quota > old_quota)) ||
|
|
|
|
((period != 0) && (period < old_period)))
|
|
|
|
/* Set cpu bandwidth for the vm */
|
|
|
|
if (qemuSetupCgroupVcpuBW(cgroup, period, vm_quota) < 0)
|
2011-07-21 08:04:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-25 05:37:14 +00:00
|
|
|
/* If we does not know VCPU<->PID mapping or all vcpu runs in the same
|
|
|
|
* thread, we cannot control each vcpu. So we only modify cpu bandwidth
|
|
|
|
* when each vcpu has a separated thread.
|
|
|
|
*/
|
|
|
|
if (priv->nvcpupids != 0 && priv->vcpupids[0] != vm->pid) {
|
|
|
|
for (i = 0; i < priv->nvcpupids; i++) {
|
|
|
|
rc = virCgroupForVcpu(cgroup, i, &cgroup_vcpu, 0);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to find vcpu cgroup for %s(vcpu:"
|
|
|
|
" %d)"),
|
|
|
|
vm->def->name, i);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
virCgroupFree(&cgroup_vcpu);
|
|
|
|
}
|
2011-07-21 08:04:25 +00:00
|
|
|
}
|
|
|
|
|
2011-07-25 05:37:14 +00:00
|
|
|
if (((vm_quota != 0) && (vm_quota <= old_quota)) ||
|
|
|
|
((period != 0) && (period >= old_period)))
|
|
|
|
/* Set cpu bandwidth for the vm */
|
|
|
|
if (qemuSetupCgroupVcpuBW(cgroup, period, vm_quota) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCgroupFree(&cgroup_vcpu);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-05-17 06:20:01 +00:00
|
|
|
static int qemuSetSchedulerParametersFlags(virDomainPtr dom,
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr params,
|
2011-05-17 06:20:01 +00:00
|
|
|
int nparams,
|
|
|
|
unsigned int flags)
|
2009-07-09 13:11:21 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int i;
|
|
|
|
virCgroupPtr group = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
2011-07-21 08:04:25 +00:00
|
|
|
virDomainDefPtr vmdef = NULL;
|
2009-07-09 13:11:21 +00:00
|
|
|
int ret = -1;
|
2011-05-17 06:20:01 +00:00
|
|
|
bool isActive;
|
2011-07-21 08:04:25 +00:00
|
|
|
int rc;
|
2011-05-17 06:20:01 +00:00
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
2009-07-09 13:11:21 +00:00
|
|
|
|
2009-09-02 13:02:06 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-07-09 13:11:21 +00:00
|
|
|
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (vm == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("No such domain %s"), dom->uuid);
|
2009-07-09 13:11:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-17 06:20:01 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
2011-05-17 06:20:01 +00:00
|
|
|
if (isActive)
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
2011-05-17 06:20:01 +00:00
|
|
|
else
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-03-18 15:08:19 +00:00
|
|
|
}
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot change persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make a copy for updated domain. */
|
|
|
|
vmdef = virDomainObjCopyPersistentDef(driver->caps, vm);
|
|
|
|
if (!vmdef)
|
|
|
|
goto cleanup;
|
2009-07-09 13:11:21 +00:00
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-17 06:20:01 +00:00
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cgroup CPU controller is not mounted"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find cgroup for domain %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
for (i = 0; i < nparams; i++) {
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
2009-07-09 13:11:21 +00:00
|
|
|
|
|
|
|
if (STREQ(param->field, "cpu_shares")) {
|
2011-05-26 17:39:04 +00:00
|
|
|
if (param->type != VIR_TYPED_PARAM_ULLONG) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("invalid type for cpu_shares tunable, expected a 'ullong'"));
|
2009-07-09 13:11:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
2011-05-17 06:20:01 +00:00
|
|
|
rc = virCgroupSetCpuShares(group, params[i].value.ul);
|
|
|
|
if (rc != 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to set cpu shares tunable"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm->def->cputune.shares = params[i].value.ul;
|
2009-07-09 13:11:21 +00:00
|
|
|
}
|
2011-03-29 13:41:25 +00:00
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-07-21 08:04:25 +00:00
|
|
|
vmdef->cputune.shares = params[i].value.ul;
|
|
|
|
}
|
2011-07-25 05:38:51 +00:00
|
|
|
} else if (STREQ(param->field, "vcpu_period")) {
|
2011-07-21 08:04:25 +00:00
|
|
|
if (param->type != VIR_TYPED_PARAM_ULLONG) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
2011-07-25 05:38:51 +00:00
|
|
|
_("invalid type for vcpu_period tunable,"
|
2011-07-21 08:04:25 +00:00
|
|
|
" expected a 'ullong'"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
rc = qemuSetVcpusBWLive(vm, group, params[i].value.ul, 0);
|
|
|
|
if (rc != 0)
|
2011-05-17 06:20:01 +00:00
|
|
|
goto cleanup;
|
2011-07-21 08:04:25 +00:00
|
|
|
|
|
|
|
if (params[i].value.ul)
|
|
|
|
vm->def->cputune.period = params[i].value.ul;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
vmdef->cputune.period = params[i].value.ul;
|
|
|
|
}
|
2011-07-25 05:38:51 +00:00
|
|
|
} else if (STREQ(param->field, "vcpu_quota")) {
|
2011-07-21 08:04:25 +00:00
|
|
|
if (param->type != VIR_TYPED_PARAM_LLONG) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
2011-07-25 05:38:51 +00:00
|
|
|
_("invalid type for vcpu_quota tunable,"
|
2011-07-21 08:04:25 +00:00
|
|
|
" expected a 'llong'"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
|
|
|
|
rc = qemuSetVcpusBWLive(vm, group, 0, params[i].value.l);
|
|
|
|
if (rc != 0)
|
2011-05-17 06:20:01 +00:00
|
|
|
goto cleanup;
|
2011-07-21 08:04:25 +00:00
|
|
|
|
|
|
|
if (params[i].value.l)
|
|
|
|
vm->def->cputune.quota = params[i].value.l;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
vmdef->cputune.quota = params[i].value.l;
|
2011-05-17 06:20:01 +00:00
|
|
|
}
|
2009-07-09 13:11:21 +00:00
|
|
|
} else {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Invalid parameter `%s'"), param->field);
|
2009-07-09 13:11:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2011-05-17 06:20:01 +00:00
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
|
|
|
rc = virDomainSaveConfig(driver->configDir, vmdef);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
virDomainObjAssignDef(vm, vmdef, false);
|
|
|
|
vmdef = NULL;
|
|
|
|
}
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2011-07-21 08:04:25 +00:00
|
|
|
virDomainDefFree(vmdef);
|
2009-07-09 13:11:21 +00:00
|
|
|
virCgroupFree(&group);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-09-02 13:02:06 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-07-09 13:11:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-17 06:20:01 +00:00
|
|
|
static int qemuSetSchedulerParameters(virDomainPtr dom,
|
2011-05-26 17:39:04 +00:00
|
|
|
virTypedParameterPtr params,
|
2011-05-17 06:20:01 +00:00
|
|
|
int nparams)
|
|
|
|
{
|
|
|
|
return qemuSetSchedulerParametersFlags(dom,
|
|
|
|
params,
|
|
|
|
nparams,
|
2011-06-03 16:10:58 +00:00
|
|
|
VIR_DOMAIN_AFFECT_LIVE);
|
2011-05-17 06:20:01 +00:00
|
|
|
}
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
static int
|
|
|
|
qemuGetVcpuBWLive(virCgroupPtr cgroup, unsigned long long *period,
|
|
|
|
long long *quota)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = virCgroupGetCpuCfsPeriod(cgroup, period);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get cpu bandwidth period tunable"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = virCgroupGetCpuCfsQuota(cgroup, quota);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc, "%s",
|
|
|
|
_("unable to get cpu bandwidth tunable"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuGetVcpusBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
|
|
|
|
unsigned long long *period, long long *quota)
|
|
|
|
{
|
|
|
|
virCgroupPtr cgroup_vcpu = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv = NULL;
|
|
|
|
int rc;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
|
|
|
|
/* We do not create sub dir for each vcpu */
|
|
|
|
rc = qemuGetVcpuBWLive(cgroup, period, quota);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (*quota > 0)
|
|
|
|
*quota /= vm->def->vcpus;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* get period and quota for vcpu0 */
|
|
|
|
rc = virCgroupForVcpu(cgroup, 0, &cgroup_vcpu, 0);
|
|
|
|
if (!cgroup_vcpu) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to find vcpu cgroup for %s(vcpu: 0)"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = qemuGetVcpuBWLive(cgroup_vcpu, period, quota);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
out:
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCgroupFree(&cgroup_vcpu);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-17 21:35:26 +00:00
|
|
|
static int
|
|
|
|
qemuGetSchedulerParametersFlags(virDomainPtr dom,
|
|
|
|
virTypedParameterPtr params,
|
|
|
|
int *nparams,
|
|
|
|
unsigned int flags)
|
2009-07-09 13:11:21 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virCgroupPtr group = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
2011-07-21 08:04:25 +00:00
|
|
|
unsigned long long shares;
|
|
|
|
unsigned long long period;
|
|
|
|
long long quota;
|
2009-07-09 13:11:21 +00:00
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
2011-05-17 21:35:26 +00:00
|
|
|
bool isActive;
|
2011-07-21 12:06:33 +00:00
|
|
|
bool cpu_bw_status = false;
|
2011-07-21 08:04:25 +00:00
|
|
|
int saved_nparams = 0;
|
2009-07-09 13:11:21 +00:00
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
|
|
|
|
VIR_DOMAIN_AFFECT_CONFIG, -1);
|
2011-05-17 21:35:26 +00:00
|
|
|
|
2011-06-28 07:58:44 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if ((flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) ==
|
|
|
|
(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) {
|
2011-05-17 21:35:26 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("cannot query live and config together"));
|
2009-09-02 13:02:06 +00:00
|
|
|
goto cleanup;
|
2009-07-09 13:11:21 +00:00
|
|
|
}
|
|
|
|
|
2011-05-18 08:52:57 +00:00
|
|
|
if (*nparams < 1) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("Invalid parameter count"));
|
2009-09-02 13:02:06 +00:00
|
|
|
goto cleanup;
|
2009-07-09 13:11:21 +00:00
|
|
|
}
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
if (*nparams > 1) {
|
|
|
|
rc = qemuGetCpuBWStatus(driver->cgroup);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
cpu_bw_status = !!rc;
|
|
|
|
}
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
|
|
|
|
if (vm == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("No such domain %s"), dom->uuid);
|
2009-07-09 13:11:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-17 21:35:26 +00:00
|
|
|
isActive = virDomainObjIsActive(vm);
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags == VIR_DOMAIN_AFFECT_CURRENT) {
|
2011-05-17 21:35:26 +00:00
|
|
|
if (isActive)
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_LIVE;
|
2011-05-17 21:35:26 +00:00
|
|
|
else
|
2011-06-03 16:10:58 +00:00
|
|
|
flags = VIR_DOMAIN_AFFECT_CONFIG;
|
2011-05-17 21:35:26 +00:00
|
|
|
}
|
|
|
|
|
2011-06-03 16:10:58 +00:00
|
|
|
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
|
2011-05-17 21:35:26 +00:00
|
|
|
if (!vm->persistent) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot query persistent config of a transient domain"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isActive) {
|
|
|
|
virDomainDefPtr persistentDef;
|
|
|
|
|
|
|
|
persistentDef = virDomainObjGetPersistentDef(driver->caps, vm);
|
|
|
|
if (!persistentDef) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("can't get persistentDef"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-07-21 08:04:25 +00:00
|
|
|
shares = persistentDef->cputune.shares;
|
|
|
|
if (*nparams > 1 && cpu_bw_status) {
|
|
|
|
period = persistentDef->cputune.period;
|
|
|
|
quota = persistentDef->cputune.quota;
|
|
|
|
}
|
2011-05-17 21:35:26 +00:00
|
|
|
} else {
|
2011-07-21 08:04:25 +00:00
|
|
|
shares = vm->def->cputune.shares;
|
|
|
|
if (*nparams > 1 && cpu_bw_status) {
|
|
|
|
period = vm->def->cputune.period;
|
|
|
|
quota = vm->def->cputune.quota;
|
|
|
|
}
|
2011-05-17 21:35:26 +00:00
|
|
|
}
|
2011-05-17 06:20:01 +00:00
|
|
|
goto out;
|
2011-03-18 15:08:19 +00:00
|
|
|
}
|
|
|
|
|
2011-05-17 21:35:26 +00:00
|
|
|
if (!isActive) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPU)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("cgroup CPU controller is not mounted"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find cgroup for domain %s"), vm->def->name);
|
2009-07-09 13:11:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
rc = virCgroupGetCpuShares(group, &shares);
|
2009-07-09 13:11:21 +00:00
|
|
|
if (rc != 0) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(-rc, "%s",
|
2009-07-09 13:11:21 +00:00
|
|
|
_("unable to get cpu shares tunable"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-07-21 08:04:25 +00:00
|
|
|
|
|
|
|
if (*nparams > 1 && cpu_bw_status) {
|
|
|
|
rc = qemuGetVcpusBWLive(vm, group, &period, "a);
|
|
|
|
if (rc != 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-05-17 06:20:01 +00:00
|
|
|
out:
|
2011-07-21 08:04:25 +00:00
|
|
|
params[0].value.ul = shares;
|
2011-05-26 17:39:04 +00:00
|
|
|
params[0].type = VIR_TYPED_PARAM_ULLONG;
|
2009-08-03 12:37:44 +00:00
|
|
|
if (virStrcpyStatic(params[0].field, "cpu_shares") == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field cpu_shares too long for destination"));
|
2009-08-03 12:37:44 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
2009-07-09 13:11:21 +00:00
|
|
|
|
2011-07-21 08:04:25 +00:00
|
|
|
saved_nparams++;
|
|
|
|
|
|
|
|
if (cpu_bw_status) {
|
|
|
|
if (*nparams > saved_nparams) {
|
|
|
|
params[1].value.ul = period;
|
|
|
|
params[1].type = VIR_TYPED_PARAM_ULLONG;
|
2011-07-25 05:38:51 +00:00
|
|
|
if (virStrcpyStatic(params[1].field, "vcpu_period") == NULL) {
|
2011-07-21 08:04:25 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s",
|
2011-07-25 05:38:51 +00:00
|
|
|
_("Field vcpu_period too long for destination"));
|
2011-07-21 08:04:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
saved_nparams++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*nparams > saved_nparams) {
|
|
|
|
params[2].value.ul = quota;
|
|
|
|
params[2].type = VIR_TYPED_PARAM_LLONG;
|
2011-07-25 05:38:51 +00:00
|
|
|
if (virStrcpyStatic(params[2].field, "vcpu_quota") == NULL) {
|
2011-07-21 08:04:25 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s",
|
2011-07-25 05:38:51 +00:00
|
|
|
_("Field vcpu_quota too long for destination"));
|
2011-07-21 08:04:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
saved_nparams++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*nparams = saved_nparams;
|
|
|
|
|
2009-07-09 13:11:21 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virCgroupFree(&group);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2009-09-02 13:02:06 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-07-09 13:11:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-05-17 21:35:26 +00:00
|
|
|
static int
|
|
|
|
qemuGetSchedulerParameters(virDomainPtr dom,
|
|
|
|
virTypedParameterPtr params,
|
|
|
|
int *nparams)
|
|
|
|
{
|
|
|
|
return qemuGetSchedulerParametersFlags(dom, params, nparams,
|
2011-06-03 16:10:58 +00:00
|
|
|
VIR_DOMAIN_AFFECT_CURRENT);
|
2011-05-17 21:35:26 +00:00
|
|
|
}
|
2009-07-09 13:11:21 +00:00
|
|
|
|
2008-02-26 18:41:43 +00:00
|
|
|
/* This uses the 'info blockstats' monitor command which was
|
|
|
|
* integrated into both qemu & kvm in late 2007. If the command is
|
|
|
|
* not supported we detect this and return the appropriate error.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemudDomainBlockStats (virDomainPtr dom,
|
|
|
|
const char *path,
|
|
|
|
struct _virDomainBlockStats *stats)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
2008-10-17 09:29:29 +00:00
|
|
|
int i, ret = -1;
|
2008-12-04 21:04:30 +00:00
|
|
|
virDomainObjPtr vm;
|
2008-10-17 09:29:29 +00:00
|
|
|
virDomainDiskDefPtr disk = NULL;
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2008-02-26 18:41:43 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-02-26 18:41:43 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-02-26 18:41:43 +00:00
|
|
|
}
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2011-06-02 13:55:56 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-10-17 09:29:29 +00:00
|
|
|
for (i = 0 ; i < vm->def->ndisks ; i++) {
|
|
|
|
if (STREQ(path, vm->def->disks[i]->dst)) {
|
|
|
|
disk = vm->def->disks[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!disk) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid path: %s"), path);
|
2011-05-13 10:11:47 +00:00
|
|
|
goto cleanup;
|
2008-02-26 18:41:43 +00:00
|
|
|
}
|
|
|
|
|
2010-01-06 12:11:26 +00:00
|
|
|
if (!disk->info.alias) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("missing disk device alias name for %s"), disk->dst);
|
2011-05-13 10:11:47 +00:00
|
|
|
goto cleanup;
|
2010-01-06 12:11:26 +00:00
|
|
|
}
|
2008-02-26 18:41:43 +00:00
|
|
|
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
priv = vm->privateData;
|
2011-07-19 00:27:35 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
|
|
|
goto cleanup;
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2011-07-19 00:27:35 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
2011-05-13 10:11:47 +00:00
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:35 +00:00
|
|
|
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
|
|
|
|
disk->info.alias,
|
|
|
|
&stats->rd_req,
|
|
|
|
&stats->rd_bytes,
|
2011-09-05 08:22:17 +00:00
|
|
|
NULL,
|
2011-07-19 00:27:35 +00:00
|
|
|
&stats->wr_req,
|
|
|
|
&stats->wr_bytes,
|
2011-09-05 08:22:17 +00:00
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
2011-07-19 00:27:35 +00:00
|
|
|
&stats->errs);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2008-02-26 18:41:43 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-07-19 00:27:35 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2009-09-23 12:51:05 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-10-17 09:29:29 +00:00
|
|
|
return ret;
|
2008-02-26 18:41:43 +00:00
|
|
|
}
|
|
|
|
|
2011-09-05 08:22:58 +00:00
|
|
|
static int
|
|
|
|
qemudDomainBlockStatsFlags (virDomainPtr dom,
|
|
|
|
const char *path,
|
|
|
|
virTypedParameterPtr params,
|
|
|
|
int *nparams,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
int i, tmp, ret = -1;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainDiskDefPtr disk = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
long long rd_req, rd_bytes, wr_req, wr_bytes, rd_total_times;
|
|
|
|
long long wr_total_times, flush_req, flush_total_times, errs;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*nparams != 0) {
|
|
|
|
for (i = 0 ; i < vm->def->ndisks ; i++) {
|
|
|
|
if (STREQ(path, vm->def->disks[i]->dst)) {
|
|
|
|
disk = vm->def->disks[i];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!disk) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid path: %s"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!disk->info.alias) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("missing disk device alias name for %s"), disk->dst);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
VIR_DEBUG("priv=%p, params=%p, flags=%x", priv, params, flags);
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
tmp = *nparams;
|
|
|
|
ret = qemuMonitorGetBlockStatsParamsNumber(priv->mon, nparams);
|
|
|
|
|
|
|
|
if (tmp == 0) {
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
|
|
|
|
disk->info.alias,
|
|
|
|
&rd_req,
|
|
|
|
&rd_bytes,
|
|
|
|
&rd_total_times,
|
|
|
|
&wr_req,
|
|
|
|
&wr_bytes,
|
|
|
|
&wr_total_times,
|
|
|
|
&flush_req,
|
|
|
|
&flush_total_times,
|
|
|
|
&errs);
|
|
|
|
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
/* Field 'errs' is meaningless for QEMU, won't set it. */
|
|
|
|
for (i = 0; i < *nparams; i++) {
|
|
|
|
virTypedParameterPtr param = ¶ms[i];
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
case 0: /* fill write_bytes here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field write bytes too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = wr_bytes;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1: /* fill wr_operations here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field write requests too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = wr_req;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 2: /* fill read_bytes here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_READ_BYTES) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field read bytes too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = rd_bytes;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 3: /* fill rd_operations here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_READ_REQ) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field read requests too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = rd_req;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 4: /* fill flush_operations here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_FLUSH_REQ) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field flush requests too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = flush_req;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 5: /* fill wr_total_times_ns here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_WRITE_TOTAL_TIMES) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field write total times too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = wr_total_times;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 6: /* fill rd_total_times_ns here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field read total times too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = rd_total_times;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 7: /* fill flush_total_times_ns here */
|
|
|
|
if (virStrcpyStatic(param->field, VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Field flush total times too long for destination"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
param->type = VIR_TYPED_PARAM_LLONG;
|
|
|
|
param->value.l = flush_total_times;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
/* should not hit here */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
#ifdef __linux__
|
2007-11-15 17:45:44 +00:00
|
|
|
static int
|
|
|
|
qemudDomainInterfaceStats (virDomainPtr dom,
|
|
|
|
const char *path,
|
|
|
|
struct _virDomainInterfaceStats *stats)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2008-10-10 16:08:01 +00:00
|
|
|
int i;
|
2008-12-04 21:04:30 +00:00
|
|
|
int ret = -1;
|
2007-11-15 17:45:44 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2007-11-15 17:45:44 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-11-15 17:45:44 +00:00
|
|
|
}
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2007-11-15 17:45:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the path is one of the domain's network interfaces. */
|
2008-10-10 16:08:01 +00:00
|
|
|
for (i = 0 ; i < vm->def->nnets ; i++) {
|
|
|
|
if (vm->def->nets[i]->ifname &&
|
2008-12-04 21:04:30 +00:00
|
|
|
STREQ (vm->def->nets[i]->ifname, path)) {
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2007-11-15 17:45:44 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
if (ret == 0)
|
2010-02-04 23:02:10 +00:00
|
|
|
ret = linuxDomainInterfaceStats(path, stats);
|
2008-12-04 21:04:30 +00:00
|
|
|
else
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid path, '%s' is not a known interface"), path);
|
2007-11-15 17:45:44 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2007-11-15 17:45:44 +00:00
|
|
|
#else
|
2008-12-04 21:04:30 +00:00
|
|
|
static int
|
|
|
|
qemudDomainInterfaceStats (virDomainPtr dom,
|
|
|
|
const char *path ATTRIBUTE_UNUSED,
|
|
|
|
struct _virDomainInterfaceStats *stats ATTRIBUTE_UNUSED)
|
2011-03-25 15:42:32 +00:00
|
|
|
{
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("interface stats not implemented on this platform"));
|
2007-11-15 17:45:44 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
#endif
|
2007-11-15 17:45:44 +00:00
|
|
|
|
2009-12-20 12:36:28 +00:00
|
|
|
static int
|
|
|
|
qemudDomainMemoryStats (virDomainPtr dom,
|
|
|
|
struct _virDomainMemoryStat *stats,
|
libvirt: do not mix internal flags into public API
There were two API in driver.c that were silently masking flags
bits prior to calling out to the drivers, and several others
that were explicitly masking flags bits. This is not
forward-compatible - if we ever have that many flags in the
future, then talking to an old server that masks out the
flags would be indistinguishable from talking to a new server
that can honor the flag. In general, libvirt.c should forward
_all_ flags on to drivers, and only the drivers should reject
unknown flags.
In the case of virDrvSecretGetValue, the solution is to separate
the internal driver callback function to have two parameters
instead of one, with only one parameter affected by the public
API. In the case of virDomainGetXMLDesc, it turns out that
no one was ever mixing VIR_DOMAIN_XML_INTERNAL_STATUS with
the dumpxml path in the first place; that internal flag was
only used in saving and restoring state files, which happened
to be in functions internal to a single file, so there is no
mixing of the internal flag with a public flags argument.
Additionally, virDomainMemoryStats passed a flags argument
over RPC, but not to the driver.
* src/driver.h (VIR_DOMAIN_XML_FLAGS_MASK)
(VIR_SECRET_GET_VALUE_FLAGS_MASK): Delete.
(virDrvSecretGetValue): Separate out internal flags.
(virDrvDomainMemoryStats): Provide missing flags argument.
* src/driver.c (verify): Drop unused check.
* src/conf/domain_conf.h (virDomainObjParseFile): Delete
declaration.
(virDomainXMLInternalFlags): Move...
* src/conf/domain_conf.c: ...here. Delete redundant include.
(virDomainObjParseFile): Make static.
* src/libvirt.c (virDomainGetXMLDesc, virSecretGetValue): Update
clients.
(virDomainMemoryPeek, virInterfaceGetXMLDesc)
(virDomainMemoryStats, virDomainBlockPeek, virNetworkGetXMLDesc)
(virStoragePoolGetXMLDesc, virStorageVolGetXMLDesc)
(virNodeNumOfDevices, virNodeListDevices, virNWFilterGetXMLDesc):
Don't mask unknown flags.
* src/interface/netcf_driver.c (interfaceGetXMLDesc): Reject
unknown flags.
* src/secret/secret_driver.c (secretGetValue): Update clients.
* src/remote/remote_driver.c (remoteSecretGetValue)
(remoteDomainMemoryStats): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainMemoryStats): Likewise.
* daemon/remote.c (remoteDispatchDomainMemoryStats): Likewise.
2011-07-13 21:31:56 +00:00
|
|
|
unsigned int nr_stats,
|
|
|
|
unsigned int flags)
|
2009-12-20 12:36:28 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
unsigned int ret = -1;
|
|
|
|
|
libvirt: do not mix internal flags into public API
There were two API in driver.c that were silently masking flags
bits prior to calling out to the drivers, and several others
that were explicitly masking flags bits. This is not
forward-compatible - if we ever have that many flags in the
future, then talking to an old server that masks out the
flags would be indistinguishable from talking to a new server
that can honor the flag. In general, libvirt.c should forward
_all_ flags on to drivers, and only the drivers should reject
unknown flags.
In the case of virDrvSecretGetValue, the solution is to separate
the internal driver callback function to have two parameters
instead of one, with only one parameter affected by the public
API. In the case of virDomainGetXMLDesc, it turns out that
no one was ever mixing VIR_DOMAIN_XML_INTERNAL_STATUS with
the dumpxml path in the first place; that internal flag was
only used in saving and restoring state files, which happened
to be in functions internal to a single file, so there is no
mixing of the internal flag with a public flags argument.
Additionally, virDomainMemoryStats passed a flags argument
over RPC, but not to the driver.
* src/driver.h (VIR_DOMAIN_XML_FLAGS_MASK)
(VIR_SECRET_GET_VALUE_FLAGS_MASK): Delete.
(virDrvSecretGetValue): Separate out internal flags.
(virDrvDomainMemoryStats): Provide missing flags argument.
* src/driver.c (verify): Drop unused check.
* src/conf/domain_conf.h (virDomainObjParseFile): Delete
declaration.
(virDomainXMLInternalFlags): Move...
* src/conf/domain_conf.c: ...here. Delete redundant include.
(virDomainObjParseFile): Make static.
* src/libvirt.c (virDomainGetXMLDesc, virSecretGetValue): Update
clients.
(virDomainMemoryPeek, virInterfaceGetXMLDesc)
(virDomainMemoryStats, virDomainBlockPeek, virNetworkGetXMLDesc)
(virStoragePoolGetXMLDesc, virStorageVolGetXMLDesc)
(virNodeNumOfDevices, virNodeListDevices, virNWFilterGetXMLDesc):
Don't mask unknown flags.
* src/interface/netcf_driver.c (interfaceGetXMLDesc): Reject
unknown flags.
* src/secret/secret_driver.c (secretGetValue): Update clients.
* src/remote/remote_driver.c (remoteSecretGetValue)
(remoteDomainMemoryStats): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainMemoryStats): Likewise.
* daemon/remote.c (remoteDispatchDomainMemoryStats): Likewise.
2011-07-13 21:31:56 +00:00
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
2009-12-20 12:36:28 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2009-12-20 12:36:28 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
2010-03-08 14:15:44 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2009-12-20 12:36:28 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2010-04-12 11:31:15 +00:00
|
|
|
ret = qemuMonitorGetMemoryStats(priv->mon, stats, nr_stats);
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2009-12-20 12:36:28 +00:00
|
|
|
} else {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2009-12-20 12:36:28 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2010-03-08 14:15:44 +00:00
|
|
|
vm = NULL;
|
|
|
|
|
2009-12-20 12:36:28 +00:00
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-06-05 21:12:26 +00:00
|
|
|
static int
|
|
|
|
qemudDomainBlockPeek (virDomainPtr dom,
|
|
|
|
const char *path,
|
|
|
|
unsigned long long offset, size_t size,
|
|
|
|
void *buffer,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2008-06-05 21:12:26 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
int fd = -1, ret = -1;
|
|
|
|
const char *actual;
|
2008-06-05 21:12:26 +00:00
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
2008-06-05 21:12:26 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-06-05 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!path || path[0] == '\0') {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("NULL or empty path"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-06-05 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
/* Check the path belongs to this domain. */
|
|
|
|
if (!(actual = virDomainDiskPathByName(vm->def, path))) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid path '%s'"), path);
|
|
|
|
goto cleanup;
|
2008-06-05 21:12:26 +00:00
|
|
|
}
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
path = actual;
|
2008-06-05 21:12:26 +00:00
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
/* The path is correct, now try to open it and get its size. */
|
|
|
|
fd = open(path, O_RDONLY);
|
|
|
|
if (fd == -1) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("%s: failed to open"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2008-12-04 21:04:30 +00:00
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
/* Seek and read. */
|
|
|
|
/* NB. Because we configure with AC_SYS_LARGEFILE, off_t should
|
|
|
|
* be 64 bits on all platforms.
|
|
|
|
*/
|
|
|
|
if (lseek(fd, offset, SEEK_SET) == (off_t) -1 ||
|
|
|
|
saferead(fd, buffer, size) == (ssize_t) -1) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("%s: failed to seek or read"), path);
|
|
|
|
goto cleanup;
|
2008-06-05 21:12:26 +00:00
|
|
|
}
|
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-06-05 21:12:26 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-06-10 10:43:28 +00:00
|
|
|
static int
|
|
|
|
qemudDomainMemoryPeek (virDomainPtr dom,
|
|
|
|
unsigned long long offset, size_t size,
|
|
|
|
void *buffer,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2009-08-25 15:49:09 +00:00
|
|
|
char *tmp = NULL;
|
2008-06-10 10:43:28 +00:00
|
|
|
int fd = -1, ret = -1;
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2008-06-10 10:43:28 +00:00
|
|
|
|
2011-07-13 22:24:38 +00:00
|
|
|
virCheckFlags(VIR_MEMORY_VIRTUAL | VIR_MEMORY_PHYSICAL, -1);
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-06-10 10:43:28 +00:00
|
|
|
|
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2009-07-22 14:27:09 +00:00
|
|
|
if (flags != VIR_MEMORY_VIRTUAL && flags != VIR_MEMORY_PHYSICAL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("flags parameter must be VIR_MEMORY_VIRTUAL or VIR_MEMORY_PHYSICAL"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-06-10 10:43:28 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
2009-11-03 18:26:32 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
Rename internal APis
Rename virDomainIsActive to virDomainObjIsActive, and
virInterfaceIsActive to virInterfaceObjIsActive and finally
virNetworkIsActive to virNetworkObjIsActive.
* src/conf/domain_conf.c, src/conf/domain_conf.h,
src/conf/interface_conf.h, src/conf/network_conf.c,
src/conf/network_conf.h, src/lxc/lxc_driver.c,
src/network/bridge_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_driver.c, src/qemu/qemu_driver.c,
src/test/test_driver.c, src/uml/uml_driver.c: Update for
renamed APIs.
2009-10-20 14:51:03 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2008-06-10 10:43:28 +00:00
|
|
|
}
|
|
|
|
|
2010-05-14 10:08:59 +00:00
|
|
|
if (virAsprintf(&tmp, "%s/qemu.mem.XXXXXX", driver->cacheDir) < 0) {
|
2010-02-04 18:19:08 +00:00
|
|
|
virReportOOMError();
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-08-25 15:49:09 +00:00
|
|
|
}
|
|
|
|
|
2008-06-10 10:43:28 +00:00
|
|
|
/* Create a temporary filename. */
|
|
|
|
if ((fd = mkstemp (tmp)) == -1) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("mkstemp(\"%s\") failed"), tmp);
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2008-06-10 10:43:28 +00:00
|
|
|
}
|
|
|
|
|
2011-05-20 12:56:46 +00:00
|
|
|
virSecurityManagerSetSavedStateLabel(qemu_driver->securityManager, vm, tmp);
|
|
|
|
|
build: detect potentential uninitialized variables
Even with -Wuninitialized (which is part of autobuild.sh
--enable-compile-warnings=error), gcc does NOT catch this
use of an uninitialized variable:
{
if (cond)
goto error;
int a = 1;
error:
printf("%d", a);
}
which prints 0 (supposing the stack started life wiped) if
cond was true. Clang will catch it, but we don't use clang
as often. Using gcc -Wjump-misses-init catches it, but also
gives false positives:
{
if (cond)
goto error;
int a = 1;
return a;
error:
return 0;
}
Here, a was never used in the scope of the error block, so
declaring it after goto is technically fine (and clang agrees).
However, given that our HACKING already documents a preference
to C89 decl-before-statement, the false positive warning is
enough of a prod to comply with HACKING.
[Personally, I'd _really_ rather use C99 decl-after-statement
to minimize scope, but until gcc can efficiently and reliably
catch scoping and uninitialized usage bugs, I'll settle with
the compromise of enforcing a coding standard that happens to
reject false positives if it can also detect real bugs.]
* acinclude.m4 (LIBVIRT_COMPILE_WARNINGS): Add -Wjump-misses-init.
* src/util/util.c (__virExec): Adjust offenders.
* src/conf/domain_conf.c (virDomainTimerDefParseXML): Likewise.
* src/remote/remote_driver.c (doRemoteOpen): Likewise.
* src/phyp/phyp_driver.c (phypGetLparNAME, phypGetLparProfile)
(phypGetVIOSFreeSCSIAdapter, phypVolumeGetKey)
(phypGetStoragePoolDevice)
(phypVolumeGetPhysicalVolumeByStoragePool)
(phypVolumeGetPath): Likewise.
* src/vbox/vbox_tmpl.c (vboxNetworkUndefineDestroy)
(vboxNetworkCreate, vboxNetworkDumpXML)
(vboxNetworkDefineCreateXML): Likewise.
* src/xenapi/xenapi_driver.c (getCapsObject)
(xenapiDomainDumpXML): Likewise.
* src/xenapi/xenapi_utils.c (createVMRecordFromXml): Likewise.
* src/security/security_selinux.c (SELinuxGenNewContext):
Likewise.
* src/qemu/qemu_command.c (qemuBuildCommandLine): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia):
Likewise.
* src/qemu/qemu_process.c (qemuProcessWaitForMonitor): Likewise.
* src/qemu/qemu_monitor_text.c (qemuMonitorTextGetPtyPaths):
Likewise.
* src/qemu/qemu_driver.c (qemudDomainShutdown)
(qemudDomainBlockStats, qemudDomainMemoryPeek): Likewise.
* src/storage/storage_backend_iscsi.c
(virStorageBackendCreateIfaceIQN): Likewise.
* src/node_device/node_device_udev.c (udevProcessPCI): Likewise.
2011-04-01 15:41:45 +00:00
|
|
|
priv = vm->privateData;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2009-09-23 12:33:45 +00:00
|
|
|
if (flags == VIR_MEMORY_VIRTUAL) {
|
2009-10-13 14:27:58 +00:00
|
|
|
if (qemuMonitorSaveVirtualMemory(priv->mon, offset, size, tmp) < 0) {
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-10-13 14:27:58 +00:00
|
|
|
}
|
2009-09-23 12:33:45 +00:00
|
|
|
} else {
|
2009-10-13 14:27:58 +00:00
|
|
|
if (qemuMonitorSavePhysicalMemory(priv->mon, offset, size, tmp) < 0) {
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2009-10-13 14:27:58 +00:00
|
|
|
}
|
2008-06-10 10:43:28 +00:00
|
|
|
}
|
2011-06-06 08:28:38 +00:00
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2008-06-10 10:43:28 +00:00
|
|
|
|
|
|
|
/* Read the memory file into buffer. */
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
if (saferead(fd, buffer, size) == (ssize_t) -1) {
|
2010-02-04 20:02:58 +00:00
|
|
|
virReportSystemError(errno,
|
|
|
|
_("failed to read temporary file "
|
|
|
|
"created with template %s"), tmp);
|
2009-11-03 18:26:32 +00:00
|
|
|
goto endjob;
|
2008-06-10 10:43:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2009-12-08 14:42:43 +00:00
|
|
|
vm = NULL;
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2011-05-03 15:36:12 +00:00
|
|
|
if (tmp)
|
|
|
|
unlink(tmp);
|
2011-03-31 03:28:21 +00:00
|
|
|
VIR_FREE(tmp);
|
2008-12-04 21:06:41 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2008-06-10 10:43:28 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-02-23 09:03:25 +00:00
|
|
|
|
2010-04-27 19:31:09 +00:00
|
|
|
static int qemuDomainGetBlockInfo(virDomainPtr dom,
|
|
|
|
const char *path,
|
|
|
|
virDomainBlockInfoPtr info,
|
|
|
|
unsigned int flags) {
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
int fd = -1;
|
|
|
|
off_t end;
|
2011-07-28 13:42:57 +00:00
|
|
|
virStorageFileMetadata *meta = NULL;
|
2010-05-14 13:10:01 +00:00
|
|
|
virDomainDiskDefPtr disk = NULL;
|
2010-04-27 19:31:09 +00:00
|
|
|
struct stat sb;
|
2011-09-08 09:10:14 +00:00
|
|
|
int i;
|
2010-06-15 15:15:51 +00:00
|
|
|
int format;
|
2010-04-27 19:31:09 +00:00
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!path || path[0] == '\0') {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("NULL or empty path"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the path belongs to this domain. */
|
2011-09-08 09:10:14 +00:00
|
|
|
if ((i = virDomainDiskIndexByName(vm->def, path, false)) < 0) {
|
2010-04-27 19:31:09 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid path %s not assigned to domain"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-09-08 09:10:14 +00:00
|
|
|
disk = vm->def->disks[i];
|
|
|
|
if (!disk->src) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("disk %s does not currently have a source assigned"),
|
|
|
|
path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
path = disk->src;
|
2010-04-27 19:31:09 +00:00
|
|
|
|
|
|
|
/* The path is correct, now try to open it and get its size. */
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
fd = open(path, O_RDONLY);
|
2010-04-27 19:31:09 +00:00
|
|
|
if (fd == -1) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("failed to open path '%s'"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Probe for magic formats */
|
2010-06-15 15:15:51 +00:00
|
|
|
if (disk->driverType) {
|
|
|
|
if ((format = virStorageFileFormatTypeFromString(disk->driverType)) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown disk format %s for %s"),
|
|
|
|
disk->driverType, disk->src);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
2010-06-15 16:58:58 +00:00
|
|
|
if (driver->allowDiskFormatProbing) {
|
|
|
|
if ((format = virStorageFileProbeFormat(disk->src)) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("no disk format for %s and probing is disabled"),
|
|
|
|
disk->src);
|
2010-06-15 15:15:51 +00:00
|
|
|
goto cleanup;
|
2010-06-15 16:58:58 +00:00
|
|
|
}
|
2010-06-15 15:15:51 +00:00
|
|
|
}
|
|
|
|
|
2011-07-28 13:42:57 +00:00
|
|
|
if (VIR_ALLOC(meta) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2010-06-15 15:15:51 +00:00
|
|
|
if (virStorageFileGetMetadataFromFD(path, fd,
|
|
|
|
format,
|
2011-07-28 13:42:57 +00:00
|
|
|
meta) < 0)
|
2010-04-27 19:31:09 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Get info for normal formats */
|
|
|
|
if (fstat(fd, &sb) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("cannot stat file '%s'"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (S_ISREG(sb.st_mode)) {
|
2010-05-03 20:44:12 +00:00
|
|
|
#ifndef WIN32
|
2010-04-27 19:31:09 +00:00
|
|
|
info->physical = (unsigned long long)sb.st_blocks *
|
|
|
|
(unsigned long long)DEV_BSIZE;
|
|
|
|
#else
|
|
|
|
info->physical = sb.st_size;
|
|
|
|
#endif
|
|
|
|
/* Regular files may be sparse, so logical size (capacity) is not same
|
|
|
|
* as actual physical above
|
|
|
|
*/
|
|
|
|
info->capacity = sb.st_size;
|
|
|
|
} else {
|
|
|
|
/* NB. Because we configure with AC_SYS_LARGEFILE, off_t should
|
|
|
|
* be 64 bits on all platforms.
|
|
|
|
*/
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
end = lseek(fd, 0, SEEK_END);
|
2010-04-27 19:31:09 +00:00
|
|
|
if (end == (off_t)-1) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("failed to seek to end of %s"), path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
info->physical = end;
|
|
|
|
info->capacity = end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the file we probed has a capacity set, then override
|
|
|
|
* what we calculated from file/block extents */
|
2011-07-28 13:42:57 +00:00
|
|
|
if (meta->capacity)
|
|
|
|
info->capacity = meta->capacity;
|
2010-04-27 19:31:09 +00:00
|
|
|
|
2010-05-14 13:10:01 +00:00
|
|
|
/* Set default value .. */
|
2010-04-27 19:31:09 +00:00
|
|
|
info->allocation = info->physical;
|
|
|
|
|
2010-05-14 13:10:01 +00:00
|
|
|
/* ..but if guest is running & not using raw
|
|
|
|
disk format and on a block device, then query
|
|
|
|
highest allocated extent from QEMU */
|
2010-10-26 15:31:19 +00:00
|
|
|
if (disk->type == VIR_DOMAIN_DISK_TYPE_BLOCK &&
|
2010-06-15 15:15:51 +00:00
|
|
|
format != VIR_STORAGE_FILE_RAW &&
|
2011-06-02 13:55:56 +00:00
|
|
|
S_ISBLK(sb.st_mode) &&
|
|
|
|
virDomainObjIsActive(vm)) {
|
2010-05-14 13:10:01 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2011-07-19 00:27:34 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
|
|
|
|
goto cleanup;
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2011-07-19 00:27:34 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:34 +00:00
|
|
|
ret = qemuMonitorGetBlockExtent(priv->mon,
|
|
|
|
disk->info.alias,
|
|
|
|
&info->allocation);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-05-13 10:11:47 +00:00
|
|
|
} else {
|
2011-07-19 00:27:34 +00:00
|
|
|
ret = 0;
|
2011-05-13 10:11:47 +00:00
|
|
|
}
|
2011-07-19 00:27:34 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
2010-05-14 13:10:01 +00:00
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
2010-04-27 19:31:09 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-07-28 13:42:57 +00:00
|
|
|
virStorageFileFreeMetadata(meta);
|
2010-11-09 20:48:48 +00:00
|
|
|
VIR_FORCE_CLOSE(fd);
|
2010-04-27 19:31:09 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-23 13:18:18 +00:00
|
|
|
static int
|
2010-03-18 14:47:07 +00:00
|
|
|
qemuDomainEventRegister(virConnectPtr conn,
|
|
|
|
virConnectDomainEventCallback callback,
|
|
|
|
void *opaque,
|
|
|
|
virFreeCallback freecb)
|
2008-10-23 13:18:18 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret;
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-05-12 12:54:07 +00:00
|
|
|
ret = virDomainEventCallbackListAdd(conn,
|
|
|
|
driver->domainEventState->callbacks,
|
2008-12-04 21:04:30 +00:00
|
|
|
callback, opaque, freecb);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-10-23 13:18:18 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-10-23 13:18:18 +00:00
|
|
|
}
|
|
|
|
|
2010-03-18 14:47:07 +00:00
|
|
|
|
2008-10-23 13:18:18 +00:00
|
|
|
static int
|
2010-03-18 14:47:07 +00:00
|
|
|
qemuDomainEventDeregister(virConnectPtr conn,
|
|
|
|
virConnectDomainEventCallback callback)
|
2008-10-23 13:18:18 +00:00
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret;
|
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-05-12 12:54:07 +00:00
|
|
|
ret = virDomainEventStateDeregister(conn,
|
|
|
|
driver->domainEventState,
|
|
|
|
callback);
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-10-23 13:18:18 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-10-23 13:18:18 +00:00
|
|
|
}
|
|
|
|
|
2010-03-18 14:47:07 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainEventRegisterAny(virConnectPtr conn,
|
|
|
|
virDomainPtr dom,
|
|
|
|
int eventID,
|
|
|
|
virConnectDomainEventGenericCallback callback,
|
|
|
|
void *opaque,
|
|
|
|
virFreeCallback freecb)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
ret = virDomainEventCallbackListAddID(conn,
|
2011-05-12 12:54:07 +00:00
|
|
|
driver->domainEventState->callbacks,
|
2010-03-18 14:47:07 +00:00
|
|
|
dom, eventID,
|
|
|
|
callback, opaque, freecb);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainEventDeregisterAny(virConnectPtr conn,
|
|
|
|
int callbackID)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
2011-05-12 12:54:07 +00:00
|
|
|
ret = virDomainEventStateDeregisterAny(conn,
|
|
|
|
driver->domainEventState,
|
|
|
|
callbackID);
|
2010-03-18 14:47:07 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
/*******************************************************************
|
|
|
|
* Migration Protocol Version 2
|
|
|
|
*******************************************************************/
|
2008-11-14 08:42:47 +00:00
|
|
|
|
2009-09-30 10:51:54 +00:00
|
|
|
/* Prepare is the first step, and it runs on the destination host.
|
|
|
|
*
|
|
|
|
* This version starts an empty VM listening on a localhost TCP port, and
|
|
|
|
* sets up the corresponding virStream to handle the incoming data.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemudDomainMigratePrepareTunnel(virConnectPtr dconn,
|
|
|
|
virStreamPtr st,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource ATTRIBUTE_UNUSED,
|
|
|
|
const char *dom_xml)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dconn->privateData;
|
|
|
|
int ret = -1;
|
2010-06-24 18:15:27 +00:00
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
2009-09-30 10:51:54 +00:00
|
|
|
if (!dom_xml) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("no domain XML passed"));
|
2009-09-30 10:51:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(flags & VIR_MIGRATE_TUNNELLED)) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
2009-09-30 10:51:54 +00:00
|
|
|
"%s", _("PrepareTunnel called but no TUNNELLED flag set"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (st == NULL) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("tunnelled migration requested but NULL stream passed"));
|
2009-09-30 10:51:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Cannot use migrate v2 protocol with lock manager %s"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-01-24 18:06:16 +00:00
|
|
|
ret = qemuMigrationPrepareTunnel(driver, dconn,
|
|
|
|
NULL, 0, NULL, NULL, /* No cookies in v2 */
|
|
|
|
st, dname, dom_xml);
|
2010-06-24 18:15:27 +00:00
|
|
|
|
2009-09-30 10:51:54 +00:00
|
|
|
cleanup:
|
2011-05-18 16:34:21 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2009-09-30 10:51:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-14 08:42:47 +00:00
|
|
|
/* Prepare is the first step, and it runs on the destination host.
|
|
|
|
*
|
|
|
|
* This starts an empty VM listening on a TCP port.
|
|
|
|
*/
|
2010-01-05 15:32:11 +00:00
|
|
|
static int ATTRIBUTE_NONNULL (5)
|
2008-11-14 08:42:47 +00:00
|
|
|
qemudDomainMigratePrepare2 (virConnectPtr dconn,
|
2011-05-18 16:34:08 +00:00
|
|
|
char **cookie ATTRIBUTE_UNUSED,
|
|
|
|
int *cookielen ATTRIBUTE_UNUSED,
|
2008-11-14 08:42:47 +00:00
|
|
|
const char *uri_in,
|
|
|
|
char **uri_out,
|
2009-09-30 10:51:54 +00:00
|
|
|
unsigned long flags,
|
2008-11-14 08:42:47 +00:00
|
|
|
const char *dname,
|
|
|
|
unsigned long resource ATTRIBUTE_UNUSED,
|
|
|
|
const char *dom_xml)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dconn->privateData;
|
2009-07-31 06:56:17 +00:00
|
|
|
int ret = -1;
|
2008-12-04 21:04:30 +00:00
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2010-05-20 19:25:41 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
*uri_out = NULL;
|
2008-11-14 08:42:47 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-05-18 16:34:21 +00:00
|
|
|
|
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Cannot use migrate v2 protocol with lock manager %s"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2009-09-30 10:51:54 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
/* this is a logical error; we never should have gotten here with
|
|
|
|
* VIR_MIGRATE_TUNNELLED set
|
|
|
|
*/
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Tunnelled migration requested but invalid RPC method called"));
|
2009-09-30 10:51:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2008-11-14 08:42:47 +00:00
|
|
|
if (!dom_xml) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("no domain XML passed"));
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-11-14 08:42:47 +00:00
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:08 +00:00
|
|
|
/* Do not use cookies in v2 protocol, since the cookie
|
|
|
|
* length was not sufficiently large, causing failures
|
|
|
|
* migrating between old & new libvirtd
|
|
|
|
*/
|
2011-01-31 10:47:03 +00:00
|
|
|
ret = qemuMigrationPrepareDirect(driver, dconn,
|
2011-05-18 16:34:08 +00:00
|
|
|
NULL, 0, NULL, NULL, /* No cookies */
|
2011-01-31 10:47:03 +00:00
|
|
|
uri_in, uri_out,
|
|
|
|
dname, dom_xml);
|
2008-11-14 08:42:47 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
cleanup:
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
2010-05-20 17:16:30 +00:00
|
|
|
|
2008-11-14 08:42:47 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Perform is the second step, and it runs on the source host. */
|
|
|
|
static int
|
|
|
|
qemudDomainMigratePerform (virDomainPtr dom,
|
2011-05-18 16:34:08 +00:00
|
|
|
const char *cookie,
|
|
|
|
int cookielen,
|
2011-01-31 10:47:03 +00:00
|
|
|
const char *uri,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
const char *dconnuri = NULL;
|
2009-10-26 20:08:23 +00:00
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2010-05-20 19:25:41 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2011-05-18 16:34:21 +00:00
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Cannot use migrate v2 protocol with lock manager %s"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2009-04-19 15:30:50 +00:00
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2008-11-14 08:42:47 +00:00
|
|
|
if (!vm) {
|
2009-04-19 15:30:50 +00:00
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-11-14 08:42:47 +00:00
|
|
|
}
|
|
|
|
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
if (flags & VIR_MIGRATE_PEER2PEER) {
|
|
|
|
dconnuri = uri;
|
|
|
|
uri = NULL;
|
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:08 +00:00
|
|
|
/* Do not output cookies in v2 protocol, since the cookie
|
|
|
|
* length was not sufficiently large, causing failures
|
|
|
|
* migrating between old & new libvirtd.
|
|
|
|
*
|
|
|
|
* Consume any cookie we were able to decode though
|
|
|
|
*/
|
2011-01-31 10:47:03 +00:00
|
|
|
ret = qemuMigrationPerform(driver, dom->conn, vm,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
NULL, dconnuri, uri, cookie, cookielen,
|
2011-01-24 18:06:16 +00:00
|
|
|
NULL, NULL, /* No output cookies in v2 */
|
2011-05-23 12:50:11 +00:00
|
|
|
flags, dname, resource, false);
|
2009-02-27 16:18:50 +00:00
|
|
|
|
2009-11-03 18:26:32 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return ret;
|
2008-11-14 08:42:47 +00:00
|
|
|
}
|
|
|
|
|
2010-11-30 17:50:54 +00:00
|
|
|
|
2008-11-14 08:42:47 +00:00
|
|
|
/* Finish is the third and final step, and it runs on the destination host. */
|
|
|
|
static virDomainPtr
|
|
|
|
qemudDomainMigrateFinish2 (virConnectPtr dconn,
|
|
|
|
const char *dname,
|
|
|
|
const char *cookie ATTRIBUTE_UNUSED,
|
|
|
|
int cookielen ATTRIBUTE_UNUSED,
|
|
|
|
const char *uri ATTRIBUTE_UNUSED,
|
2009-07-31 10:10:22 +00:00
|
|
|
unsigned long flags,
|
2008-11-14 08:42:47 +00:00
|
|
|
int retcode)
|
|
|
|
{
|
2008-12-04 21:04:30 +00:00
|
|
|
struct qemud_driver *driver = dconn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainPtr dom = NULL;
|
2008-11-14 08:42:47 +00:00
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, NULL);
|
2010-05-20 19:25:41 +00:00
|
|
|
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverLock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
vm = virDomainFindByName(&driver->domains, dname);
|
2008-11-14 08:42:47 +00:00
|
|
|
if (!vm) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching name '%s'"), dname);
|
2008-12-04 21:04:30 +00:00
|
|
|
goto cleanup;
|
2008-11-14 08:42:47 +00:00
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:08 +00:00
|
|
|
/* Do not use cookies in v2 protocol, since the cookie
|
|
|
|
* length was not sufficiently large, causing failures
|
|
|
|
* migrating between old & new libvirtd
|
|
|
|
*/
|
2011-01-24 18:06:16 +00:00
|
|
|
dom = qemuMigrationFinish(driver, dconn, vm,
|
2011-05-18 16:34:08 +00:00
|
|
|
NULL, 0, NULL, NULL, /* No cookies */
|
2011-05-23 12:50:11 +00:00
|
|
|
flags, retcode, false);
|
2009-11-03 18:26:32 +00:00
|
|
|
|
2008-12-04 21:04:30 +00:00
|
|
|
cleanup:
|
2008-12-04 21:06:41 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2008-12-04 21:04:30 +00:00
|
|
|
return dom;
|
2008-11-14 08:42:47 +00:00
|
|
|
}
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
/*******************************************************************
|
|
|
|
* Migration Protocol Version 3
|
|
|
|
*******************************************************************/
|
|
|
|
|
|
|
|
static char *
|
|
|
|
qemuDomainMigrateBegin3(virDomainPtr domain,
|
2011-05-18 09:26:30 +00:00
|
|
|
const char *xmlin,
|
2011-02-03 11:09:28 +00:00
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname ATTRIBUTE_UNUSED,
|
|
|
|
unsigned long resource ATTRIBUTE_UNUSED)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
char *xml = NULL;
|
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, NULL);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-09-13 13:49:50 +00:00
|
|
|
/* Check if there is any ejected media.
|
|
|
|
* We don't want to require them on the destination.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (qemuDomainCheckEjectableMedia(driver, vm) < 0)
|
|
|
|
goto endjob;
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if (!(xml = qemuMigrationBegin(driver, vm, xmlin,
|
|
|
|
cookieout, cookieoutlen)))
|
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
/* We keep the job active across API calls until the confirm() call.
|
|
|
|
* This prevents any other APIs being invoked while migration is taking
|
|
|
|
* place.
|
|
|
|
*/
|
|
|
|
if (qemuMigrationJobContinue(vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("domain disappeared"));
|
|
|
|
VIR_FREE(xml);
|
|
|
|
if (cookieout)
|
|
|
|
VIR_FREE(*cookieout);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
goto endjob;
|
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-07-19 00:27:32 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return xml;
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
endjob:
|
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
if (qemuMigrationJobFinish(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
goto cleanup;
|
2011-02-03 11:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainMigratePrepare3(virConnectPtr dconn,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
const char *uri_in,
|
|
|
|
char **uri_out,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource ATTRIBUTE_UNUSED,
|
|
|
|
const char *dom_xml)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dconn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
*uri_out = NULL;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
/* this is a logical error; we never should have gotten here with
|
|
|
|
* VIR_MIGRATE_TUNNELLED set
|
|
|
|
*/
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Tunnelled migration requested but invalid RPC method called"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dom_xml) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("no domain XML passed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qemuMigrationPrepareDirect(driver, dconn,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
uri_in, uri_out,
|
|
|
|
dname, dom_xml);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
|
|
|
|
virStreamPtr st,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource ATTRIBUTE_UNUSED,
|
|
|
|
const char *dom_xml)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dconn->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
if (!dom_xml) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("no domain XML passed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(flags & VIR_MIGRATE_TUNNELLED)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("PrepareTunnel called but no TUNNELLED flag set"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (st == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("tunnelled migration requested but NULL stream passed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
ret = qemuMigrationPrepareTunnel(driver, dconn,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
st, dname, dom_xml);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainMigratePerform3(virDomainPtr dom,
|
2011-05-18 09:26:30 +00:00
|
|
|
const char *xmlin,
|
2011-02-03 11:09:28 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
const char *dconnuri,
|
2011-02-03 11:09:28 +00:00
|
|
|
const char *uri,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-18 09:26:30 +00:00
|
|
|
ret = qemuMigrationPerform(driver, dom->conn, vm, xmlin,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
dconnuri, uri, cookiein, cookieinlen,
|
2011-02-03 11:09:28 +00:00
|
|
|
cookieout, cookieoutlen,
|
2011-05-23 12:50:11 +00:00
|
|
|
flags, dname, resource, true);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
static virDomainPtr
|
2011-02-03 11:09:28 +00:00
|
|
|
qemuDomainMigrateFinish3(virConnectPtr dconn,
|
|
|
|
const char *dname,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 13:18:53 +00:00
|
|
|
const char *dconnuri ATTRIBUTE_UNUSED,
|
2011-02-03 11:09:28 +00:00
|
|
|
const char *uri ATTRIBUTE_UNUSED,
|
|
|
|
unsigned long flags,
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
int cancelled)
|
2011-02-03 11:09:28 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dconn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
virDomainPtr dom = NULL;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, NULL);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByName(&driver->domains, dname);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching name '%s'"), dname);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
dom = qemuMigrationFinish(driver, dconn, vm,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags, cancelled, true);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
qemuDriverUnlock(driver);
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
return dom;
|
2011-02-03 11:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainMigrateConfirm3(virDomainPtr domain,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
unsigned long flags,
|
|
|
|
int cancelled)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
2011-07-19 00:27:32 +00:00
|
|
|
enum qemuMigrationJobPhase phase;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-07-14 21:46:49 +00:00
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
2011-05-19 11:48:15 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if (cancelled)
|
|
|
|
phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
|
|
|
|
else
|
|
|
|
phase = QEMU_MIGRATION_PHASE_CONFIRM3;
|
|
|
|
|
|
|
|
qemuMigrationJobStartPhase(driver, vm, phase);
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
ret = qemuMigrationConfirm(driver, domain->conn, vm,
|
|
|
|
cookiein, cookieinlen,
|
2011-05-19 11:48:15 +00:00
|
|
|
flags, cancelled);
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if (qemuMigrationJobFinish(driver, vm) == 0) {
|
2011-05-19 11:48:15 +00:00
|
|
|
vm = NULL;
|
|
|
|
} else if (!virDomainObjIsActive(vm) &&
|
|
|
|
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
|
|
|
|
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
|
|
|
|
virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-05-19 11:48:15 +00:00
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
cleanup:
|
2011-05-19 11:48:15 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-03-02 16:32:52 +00:00
|
|
|
static int
|
|
|
|
qemudNodeDeviceGetPciInfo (virNodeDevicePtr dev,
|
|
|
|
unsigned *domain,
|
|
|
|
unsigned *bus,
|
|
|
|
unsigned *slot,
|
|
|
|
unsigned *function)
|
|
|
|
{
|
|
|
|
virNodeDeviceDefPtr def = NULL;
|
|
|
|
virNodeDevCapsDefPtr cap;
|
|
|
|
char *xml = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
xml = virNodeDeviceGetXMLDesc(dev, 0);
|
|
|
|
if (!xml)
|
|
|
|
goto out;
|
|
|
|
|
2010-02-10 10:40:18 +00:00
|
|
|
def = virNodeDeviceDefParseString(xml, EXISTING_DEVICE);
|
2009-03-02 16:32:52 +00:00
|
|
|
if (!def)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cap = def->caps;
|
|
|
|
while (cap) {
|
|
|
|
if (cap->type == VIR_NODE_DEV_CAP_PCI_DEV) {
|
|
|
|
*domain = cap->data.pci_dev.domain;
|
|
|
|
*bus = cap->data.pci_dev.bus;
|
|
|
|
*slot = cap->data.pci_dev.slot;
|
|
|
|
*function = cap->data.pci_dev.function;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cap = cap->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!cap) {
|
2010-02-09 18:15:41 +00:00
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("device %s is not a PCI device"), dev->name);
|
2009-03-02 16:32:52 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
virNodeDeviceDefFree(def);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemudNodeDeviceDettach (virNodeDevicePtr dev)
|
|
|
|
{
|
2010-06-14 21:12:35 +00:00
|
|
|
struct qemud_driver *driver = dev->conn->privateData;
|
2009-03-02 16:32:52 +00:00
|
|
|
pciDevice *pci;
|
|
|
|
unsigned domain, bus, slot, function;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (qemudNodeDeviceGetPciInfo(dev, &domain, &bus, &slot, &function) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-02-04 23:16:34 +00:00
|
|
|
pci = pciGetDevice(domain, bus, slot, function);
|
2009-03-02 16:32:52 +00:00
|
|
|
if (!pci)
|
|
|
|
return -1;
|
|
|
|
|
2010-06-14 21:12:35 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
if (pciDettachDevice(pci, driver->activePciHostdevs) < 0)
|
2009-03-02 16:32:52 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2010-06-14 21:12:35 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2010-02-04 23:16:34 +00:00
|
|
|
pciFreeDevice(pci);
|
2009-03-02 16:32:52 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemudNodeDeviceReAttach (virNodeDevicePtr dev)
|
|
|
|
{
|
2010-06-14 21:12:35 +00:00
|
|
|
struct qemud_driver *driver = dev->conn->privateData;
|
2009-03-02 16:32:52 +00:00
|
|
|
pciDevice *pci;
|
|
|
|
unsigned domain, bus, slot, function;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (qemudNodeDeviceGetPciInfo(dev, &domain, &bus, &slot, &function) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-02-04 23:16:34 +00:00
|
|
|
pci = pciGetDevice(domain, bus, slot, function);
|
2009-03-02 16:32:52 +00:00
|
|
|
if (!pci)
|
|
|
|
return -1;
|
|
|
|
|
2011-07-03 12:09:44 +00:00
|
|
|
pciDeviceReAttachInit(pci);
|
|
|
|
|
2010-06-14 21:12:35 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
if (pciReAttachDevice(pci, driver->activePciHostdevs) < 0)
|
2009-03-02 16:32:52 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2010-06-14 21:12:35 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2010-02-04 23:16:34 +00:00
|
|
|
pciFreeDevice(pci);
|
2009-03-02 16:32:52 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemudNodeDeviceReset (virNodeDevicePtr dev)
|
|
|
|
{
|
2009-08-17 14:05:23 +00:00
|
|
|
struct qemud_driver *driver = dev->conn->privateData;
|
2009-03-02 16:32:52 +00:00
|
|
|
pciDevice *pci;
|
|
|
|
unsigned domain, bus, slot, function;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (qemudNodeDeviceGetPciInfo(dev, &domain, &bus, &slot, &function) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2010-02-04 23:16:34 +00:00
|
|
|
pci = pciGetDevice(domain, bus, slot, function);
|
2009-03-02 16:32:52 +00:00
|
|
|
if (!pci)
|
|
|
|
return -1;
|
|
|
|
|
2009-08-17 14:05:23 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
2010-07-26 16:43:04 +00:00
|
|
|
if (pciResetDevice(pci, driver->activePciHostdevs, NULL) < 0)
|
2009-03-02 16:32:52 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
2009-08-17 14:05:23 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2010-02-04 23:16:34 +00:00
|
|
|
pciFreeDevice(pci);
|
2009-03-02 16:32:52 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-18 15:24:14 +00:00
|
|
|
static int
|
|
|
|
qemuCPUCompare(virConnectPtr conn,
|
|
|
|
const char *xmlDesc,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2009-12-18 15:24:14 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
int ret = VIR_CPU_COMPARE_ERROR;
|
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(0, VIR_CPU_COMPARE_ERROR);
|
|
|
|
|
2009-12-18 15:24:14 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
|
|
|
if (!driver->caps || !driver->caps->host.cpu) {
|
2011-08-23 08:23:10 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
2010-02-09 18:15:41 +00:00
|
|
|
"%s", _("cannot get host CPU capabilities"));
|
2011-08-05 18:03:44 +00:00
|
|
|
} else {
|
2010-02-10 12:22:36 +00:00
|
|
|
ret = cpuCompareXML(driver->caps->host.cpu, xmlDesc);
|
2011-08-05 18:03:44 +00:00
|
|
|
}
|
2009-12-18 15:24:14 +00:00
|
|
|
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-02-03 16:45:05 +00:00
|
|
|
|
2010-02-02 14:19:14 +00:00
|
|
|
static char *
|
|
|
|
qemuCPUBaseline(virConnectPtr conn ATTRIBUTE_UNUSED,
|
|
|
|
const char **xmlCPUs,
|
|
|
|
unsigned int ncpus,
|
2011-07-06 22:42:06 +00:00
|
|
|
unsigned int flags)
|
2010-02-02 14:19:14 +00:00
|
|
|
{
|
|
|
|
char *cpu;
|
|
|
|
|
2011-07-06 22:42:06 +00:00
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
2010-02-02 14:19:14 +00:00
|
|
|
cpu = cpuBaselineXML(xmlCPUs, ncpus, NULL, 0);
|
|
|
|
|
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
|
2010-02-03 16:45:05 +00:00
|
|
|
|
|
|
|
static int qemuDomainGetJobInfo(virDomainPtr dom,
|
|
|
|
virDomainJobInfoPtr info) {
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
2011-06-30 09:23:50 +00:00
|
|
|
if (priv->job.asyncJob) {
|
2011-06-06 08:34:33 +00:00
|
|
|
memcpy(info, &priv->job.info, sizeof(*info));
|
2010-06-24 18:15:27 +00:00
|
|
|
|
|
|
|
/* Refresh elapsed time again just to ensure it
|
|
|
|
* is fully updated. This is primarily for benefit
|
|
|
|
* of incoming migration which we don't currently
|
|
|
|
* monitor actively in the background thread
|
|
|
|
*/
|
2011-06-01 10:35:18 +00:00
|
|
|
if (virTimeMs(&info->timeElapsed) < 0)
|
2010-06-24 18:15:27 +00:00
|
|
|
goto cleanup;
|
2011-06-06 08:34:33 +00:00
|
|
|
info->timeElapsed -= priv->job.start;
|
2010-02-03 16:45:05 +00:00
|
|
|
} else {
|
|
|
|
memset(info, 0, sizeof(*info));
|
|
|
|
info->type = VIR_DOMAIN_JOB_NONE;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-02-03 19:11:27 +00:00
|
|
|
static int qemuDomainAbortJob(virDomainPtr dom) {
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:39 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0)
|
|
|
|
goto cleanup;
|
2010-02-03 19:11:27 +00:00
|
|
|
|
2011-08-01 11:41:07 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2010-02-03 19:11:27 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2011-07-19 00:27:39 +00:00
|
|
|
goto endjob;
|
2010-02-03 19:11:27 +00:00
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:39 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!priv->job.asyncJob) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("no job is active on the domain"));
|
|
|
|
goto endjob;
|
|
|
|
} else if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot abort incoming migration;"
|
|
|
|
" use virDomainDestroy instead"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Cancelling job at client request");
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:39 +00:00
|
|
|
ret = qemuMonitorMigrateCancel(priv->mon);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
2010-02-03 19:11:27 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-03-17 15:53:14 +00:00
|
|
|
static int
|
|
|
|
qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
|
|
|
|
unsigned long long downtime,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
int ret = -1;
|
|
|
|
|
2010-04-16 12:04:31 +00:00
|
|
|
virCheckFlags(0, -1);
|
2010-03-17 15:53:14 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2011-07-19 00:27:36 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2010-03-17 15:53:14 +00:00
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2011-07-19 00:27:36 +00:00
|
|
|
return -1;
|
2010-03-17 15:53:14 +00:00
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:36 +00:00
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2010-03-17 15:53:14 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
2011-07-19 00:27:36 +00:00
|
|
|
goto endjob;
|
2010-03-17 15:53:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
2010-03-17 15:53:14 +00:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not being migrated"));
|
2011-07-19 00:27:37 +00:00
|
|
|
goto endjob;
|
2010-03-17 15:53:14 +00:00
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:37 +00:00
|
|
|
VIR_DEBUG("Setting migration downtime to %llums", downtime);
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
2011-07-19 00:27:37 +00:00
|
|
|
ret = qemuMonitorSetMigrationDowntime(priv->mon, downtime);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
2010-03-17 15:53:14 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-02-17 14:33:00 +00:00
|
|
|
static int
|
|
|
|
qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
|
|
|
|
unsigned long bandwidth,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
2011-07-19 00:27:37 +00:00
|
|
|
qemuDriverUnlock(driver);
|
2011-02-17 14:33:00 +00:00
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
2011-07-19 00:27:37 +00:00
|
|
|
return -1;
|
2011-02-17 14:33:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
2011-08-26 18:10:25 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
|
|
|
|
goto cleanup;
|
2011-02-17 14:33:00 +00:00
|
|
|
|
2011-08-26 18:10:25 +00:00
|
|
|
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
|
|
|
|
qemuDomainObjExitMonitor(driver, vm);
|
2011-02-17 14:33:00 +00:00
|
|
|
|
2011-08-26 18:10:25 +00:00
|
|
|
if (ret == 0)
|
|
|
|
priv->migMaxBandwidth = bandwidth;
|
2011-07-19 00:27:36 +00:00
|
|
|
|
2011-08-26 18:10:25 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
} else {
|
|
|
|
priv->migMaxBandwidth = bandwidth;
|
|
|
|
ret = 0;
|
|
|
|
}
|
2011-02-17 14:33:00 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-08-26 18:10:23 +00:00
|
|
|
static int
|
|
|
|
qemuDomainMigrateGetMaxSpeed(virDomainPtr dom,
|
|
|
|
unsigned long *bandwidth,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
2011-09-14 23:57:55 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-08-26 18:10:23 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-09-14 23:57:55 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
*bandwidth = priv->migMaxBandwidth;
|
2011-08-26 18:10:23 +00:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
static int qemuDomainSnapshotIsAllowed(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* FIXME: we need to figure out what else here might succeed; in
|
|
|
|
* particular, if it's a raw device but on LVM, we could probably make
|
|
|
|
* that succeed as well
|
|
|
|
*/
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
if (vm->def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK &&
|
|
|
|
(!vm->def->disks[i]->driverType ||
|
|
|
|
STRNEQ(vm->def->disks[i]->driverType, "qcow2"))) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
2010-04-23 15:59:23 +00:00
|
|
|
_("Disk '%s' does not support snapshotting"),
|
|
|
|
vm->def->disks[i]->src);
|
2010-04-02 14:10:37 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2011-08-26 22:17:41 +00:00
|
|
|
/* The domain is expected to be locked and inactive. */
|
|
|
|
static int
|
2011-08-12 16:12:47 +00:00
|
|
|
qemuDomainSnapshotCreateInactive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-08-26 22:17:41 +00:00
|
|
|
virDomainSnapshotObjPtr snap)
|
|
|
|
{
|
2011-08-12 16:12:47 +00:00
|
|
|
return qemuDomainSnapshotForEachQcow2(driver, vm, snap, "-c", false);
|
2011-08-26 22:17:41 +00:00
|
|
|
}
|
|
|
|
|
2011-02-25 09:29:57 +00:00
|
|
|
/* The domain is expected to be locked and active. */
|
|
|
|
static int
|
2011-02-24 15:46:44 +00:00
|
|
|
qemuDomainSnapshotCreateActive(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
2011-02-25 09:29:57 +00:00
|
|
|
virDomainObjPtr *vmptr,
|
2011-09-01 23:23:29 +00:00
|
|
|
virDomainSnapshotObjPtr snap,
|
|
|
|
unsigned int flags)
|
2011-02-25 09:29:57 +00:00
|
|
|
{
|
|
|
|
virDomainObjPtr vm = *vmptr;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-02-24 15:46:44 +00:00
|
|
|
bool resume = false;
|
|
|
|
int ret = -1;
|
2011-02-25 09:29:57 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-02-25 09:29:57 +00:00
|
|
|
return -1;
|
|
|
|
|
2011-05-04 09:07:01 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2011-02-24 15:46:44 +00:00
|
|
|
/* savevm monitor command pauses the domain emitting an event which
|
|
|
|
* confuses libvirt since it's not notified when qemu resumes the
|
|
|
|
* domain. Thus we stop and start CPUs ourselves.
|
|
|
|
*/
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0)
|
2011-02-24 15:46:44 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
resume = true;
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-02-25 09:29:57 +00:00
|
|
|
ret = qemuMonitorCreateSnapshot(priv->mon, snap->def->name);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
2011-09-01 23:23:29 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT) {
|
|
|
|
virDomainEventPtr event;
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
|
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT);
|
|
|
|
virDomainAuditStop(vm, "from-snapshot");
|
|
|
|
/* We already filtered the _HALT flag for persistent domains
|
|
|
|
* only, so this end job never drops the last reference. */
|
|
|
|
ignore_value(qemuDomainObjEndJob(driver, vm));
|
|
|
|
resume = false;
|
|
|
|
vm = NULL;
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
}
|
2011-02-25 09:29:57 +00:00
|
|
|
|
2011-02-24 15:46:44 +00:00
|
|
|
cleanup:
|
|
|
|
if (resume && virDomainObjIsActive(vm) &&
|
2011-05-04 09:07:01 +00:00
|
|
|
qemuProcessStartCPUs(driver, vm, conn,
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0 &&
|
2011-02-24 15:46:44 +00:00
|
|
|
virGetLastError() == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("resuming after snapshot failed"));
|
|
|
|
}
|
|
|
|
|
2011-09-01 23:23:29 +00:00
|
|
|
if (vm && qemuDomainObjEndJob(driver, vm) == 0) {
|
2011-08-12 20:45:39 +00:00
|
|
|
/* Only possible if a transient vm quit while our locks were down,
|
|
|
|
* in which case we don't want to save snapshot metadata. */
|
2011-02-25 09:29:57 +00:00
|
|
|
*vmptr = NULL;
|
2011-08-12 20:45:39 +00:00
|
|
|
ret = -1;
|
|
|
|
}
|
2011-02-25 09:29:57 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-08-20 04:33:13 +00:00
|
|
|
static int
|
|
|
|
qemuDomainSnapshotDiskPrepare(virDomainObjPtr vm, virDomainSnapshotDefPtr def)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
int i;
|
|
|
|
bool found = false;
|
|
|
|
bool active = virDomainObjIsActive(vm);
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
|
|
|
virDomainSnapshotDiskDefPtr disk = &def->disks[i];
|
|
|
|
|
|
|
|
switch (disk->snapshot) {
|
|
|
|
case VIR_DOMAIN_DISK_SNAPSHOT_INTERNAL:
|
|
|
|
if (active) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("active qemu domains require external disk "
|
|
|
|
"snapshots; disk %s requested internal"),
|
|
|
|
disk->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!vm->def->disks[i]->driverType ||
|
|
|
|
STRNEQ(vm->def->disks[i]->driverType, "qcow2")) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("internal snapshot for disk %s unsupported "
|
|
|
|
"for storage type %s"),
|
|
|
|
disk->name,
|
|
|
|
NULLSTR(vm->def->disks[i]->driverType));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_SNAPSHOT_EXTERNAL:
|
|
|
|
if (!disk->driverType) {
|
|
|
|
if (!(disk->driverType = strdup("qcow2"))) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else if (STRNEQ(disk->driverType, "qcow2")) {
|
|
|
|
/* XXX We should also support QED */
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("external snapshot format for disk %s "
|
|
|
|
"is unsupported: %s"),
|
|
|
|
disk->name, disk->driverType);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (stat(disk->file, &st) < 0) {
|
|
|
|
if (errno != ENOENT) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("unable to stat for disk %s: %s"),
|
|
|
|
disk->name, disk->file);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else if (!S_ISBLK(st.st_mode)) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("external snapshot file for disk %s already "
|
|
|
|
"exists and is not a block device: %s"),
|
|
|
|
disk->name, disk->file);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_SNAPSHOT_NO:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_DISK_SNAPSHOT_DEFAULT:
|
|
|
|
default:
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("unexpected code path"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("disk snapshots require at least one disk to be "
|
|
|
|
"selected for snapshot"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The domain is expected to hold monitor lock. */
|
|
|
|
static int
|
2011-08-23 23:14:55 +00:00
|
|
|
qemuDomainSnapshotCreateSingleDiskActive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-08-20 04:33:13 +00:00
|
|
|
virDomainSnapshotDiskDefPtr snap,
|
2011-09-17 03:46:21 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virDomainDiskDefPtr persistDisk)
|
2011-08-20 04:33:13 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
char *device = NULL;
|
|
|
|
char *source = NULL;
|
|
|
|
char *driverType = NULL;
|
2011-09-17 03:46:21 +00:00
|
|
|
char *persistSource = NULL;
|
|
|
|
char *persistDriverType = NULL;
|
2011-08-20 04:33:13 +00:00
|
|
|
int ret = -1;
|
2011-08-23 23:14:55 +00:00
|
|
|
int fd = -1;
|
|
|
|
char *origsrc = NULL;
|
|
|
|
char *origdriver = NULL;
|
|
|
|
bool need_unlink = false;
|
2011-08-20 04:33:13 +00:00
|
|
|
|
|
|
|
if (snap->snapshot != VIR_DOMAIN_DISK_SNAPSHOT_EXTERNAL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("unexpected code path"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virAsprintf(&device, "drive-%s", disk->info.alias) < 0 ||
|
|
|
|
!(source = strdup(snap->file)) ||
|
|
|
|
(STRNEQ_NULLABLE(disk->driverType, "qcow2") &&
|
2011-09-17 03:46:21 +00:00
|
|
|
!(driverType = strdup("qcow2"))) ||
|
|
|
|
(persistDisk &&
|
|
|
|
(!(persistSource = strdup(source)) ||
|
|
|
|
(STRNEQ_NULLABLE(persistDisk->driverType, "qcow2") &&
|
|
|
|
!(persistDriverType = strdup("qcow2")))))) {
|
2011-08-20 04:33:13 +00:00
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-23 23:14:55 +00:00
|
|
|
/* create the stub file and set selinux labels; manipulate disk in
|
|
|
|
* place, in a way that can be reverted on failure. */
|
|
|
|
fd = qemuOpenFile(driver, source, O_WRONLY | O_TRUNC | O_CREAT,
|
|
|
|
&need_unlink, NULL);
|
|
|
|
if (fd < 0)
|
|
|
|
goto cleanup;
|
|
|
|
VIR_FORCE_CLOSE(fd);
|
|
|
|
|
|
|
|
origsrc = disk->src;
|
|
|
|
disk->src = source;
|
|
|
|
origdriver = disk->driverType;
|
2011-09-17 12:16:08 +00:00
|
|
|
disk->driverType = (char *) "raw"; /* Don't want to probe backing files */
|
2011-08-23 23:14:55 +00:00
|
|
|
|
|
|
|
if (virDomainLockDiskAttach(driver->lockManager, vm, disk) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
if (virSecurityManagerSetImageLabel(driver->securityManager, vm,
|
|
|
|
disk) < 0) {
|
|
|
|
if (virDomainLockDiskDetach(driver->lockManager, vm, disk) < 0)
|
|
|
|
VIR_WARN("Unable to release lock on %s", source);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
need_unlink = false;
|
|
|
|
|
|
|
|
disk->src = origsrc;
|
|
|
|
origsrc = NULL;
|
|
|
|
disk->driverType = origdriver;
|
|
|
|
origdriver = NULL;
|
|
|
|
|
|
|
|
/* create the actual snapshot */
|
2011-08-20 04:33:13 +00:00
|
|
|
ret = qemuMonitorDiskSnapshot(priv->mon, device, source);
|
|
|
|
virDomainAuditDisk(vm, disk->src, source, "snapshot", ret >= 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Update vm in place to match changes. */
|
|
|
|
VIR_FREE(disk->src);
|
|
|
|
disk->src = source;
|
|
|
|
source = NULL;
|
|
|
|
if (driverType) {
|
|
|
|
VIR_FREE(disk->driverType);
|
|
|
|
disk->driverType = driverType;
|
|
|
|
driverType = NULL;
|
|
|
|
}
|
2011-09-17 03:46:21 +00:00
|
|
|
if (persistDisk) {
|
|
|
|
VIR_FREE(persistDisk->src);
|
|
|
|
persistDisk->src = persistSource;
|
|
|
|
persistSource = NULL;
|
|
|
|
if (persistDriverType) {
|
|
|
|
VIR_FREE(persistDisk->driverType);
|
|
|
|
persistDisk->driverType = persistDriverType;
|
|
|
|
persistDriverType = NULL;
|
|
|
|
}
|
|
|
|
}
|
2011-08-20 04:33:13 +00:00
|
|
|
|
|
|
|
cleanup:
|
2011-08-23 23:14:55 +00:00
|
|
|
if (origsrc) {
|
|
|
|
disk->src = origsrc;
|
|
|
|
disk->driverType = origdriver;
|
|
|
|
}
|
|
|
|
if (need_unlink && unlink(source))
|
|
|
|
VIR_WARN("unable to unlink just-created %s", source);
|
2011-08-20 04:33:13 +00:00
|
|
|
VIR_FREE(device);
|
|
|
|
VIR_FREE(source);
|
|
|
|
VIR_FREE(driverType);
|
2011-09-17 03:46:21 +00:00
|
|
|
VIR_FREE(persistSource);
|
|
|
|
VIR_FREE(persistDriverType);
|
2011-08-20 04:33:13 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The domain is expected to be locked and active. */
|
|
|
|
static int
|
|
|
|
qemuDomainSnapshotCreateDiskActive(virConnectPtr conn,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr *vmptr,
|
|
|
|
virDomainSnapshotObjPtr snap,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
virDomainObjPtr vm = *vmptr;
|
|
|
|
bool resume = false;
|
|
|
|
int ret = -1;
|
|
|
|
int i;
|
2011-09-17 03:46:21 +00:00
|
|
|
bool persist = false;
|
2011-08-20 04:33:13 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
/* In qemu, snapshot_blkdev on a single disk will pause cpus,
|
|
|
|
* but this confuses libvirt since notifications are not given
|
|
|
|
* when qemu resumes. And for multiple disks, libvirt must
|
|
|
|
* pause externally to get all snapshots to be at the same
|
|
|
|
* point in time. For simplicitly, we always pause ourselves
|
|
|
|
* rather than relying on qemu doing pause.
|
|
|
|
*/
|
|
|
|
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
resume = true;
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No way to roll back if first disk succeeds but later disks
|
|
|
|
* fail. Based on earlier qemuDomainSnapshotDiskPrepare, all
|
|
|
|
* disks in this list are now either SNAPSHOT_NO, or
|
|
|
|
* SNAPSHOT_EXTERNAL with a valid file name and qcow2 format. */
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
for (i = 0; i < snap->def->ndisks; i++) {
|
2011-09-17 03:46:21 +00:00
|
|
|
virDomainDiskDefPtr persistDisk = NULL;
|
|
|
|
|
2011-08-20 04:33:13 +00:00
|
|
|
if (snap->def->disks[i].snapshot == VIR_DOMAIN_DISK_SNAPSHOT_NO)
|
|
|
|
continue;
|
2011-09-17 03:46:21 +00:00
|
|
|
if (vm->newDef) {
|
|
|
|
int indx = virDomainDiskIndexByName(vm->newDef,
|
|
|
|
vm->def->disks[i]->dst,
|
|
|
|
false);
|
|
|
|
if (indx >= 0) {
|
|
|
|
persistDisk = vm->newDef->disks[indx];
|
|
|
|
persist = true;
|
|
|
|
}
|
|
|
|
}
|
2011-08-20 04:33:13 +00:00
|
|
|
|
2011-08-23 23:14:55 +00:00
|
|
|
ret = qemuDomainSnapshotCreateSingleDiskActive(driver, vm,
|
2011-08-20 04:33:13 +00:00
|
|
|
&snap->def->disks[i],
|
2011-09-17 03:46:21 +00:00
|
|
|
vm->def->disks[i],
|
|
|
|
persistDisk);
|
2011-08-20 04:33:13 +00:00
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (ret < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT) {
|
|
|
|
virDomainEventPtr event;
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT);
|
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT);
|
|
|
|
virDomainAuditStop(vm, "from-snapshot");
|
|
|
|
/* We already filtered the _HALT flag for persistent domains
|
|
|
|
* only, so this end job never drops the last reference. */
|
|
|
|
ignore_value(qemuDomainObjEndJob(driver, vm));
|
|
|
|
resume = false;
|
|
|
|
vm = NULL;
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (resume && virDomainObjIsActive(vm) &&
|
|
|
|
qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_UNPAUSED,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0 &&
|
|
|
|
virGetLastError() == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("resuming after snapshot failed"));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vm) {
|
2011-09-17 03:46:21 +00:00
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0 ||
|
|
|
|
(persist &&
|
|
|
|
virDomainSaveConfig(driver->configDir, vm->newDef) < 0))
|
2011-08-20 04:33:13 +00:00
|
|
|
ret = -1;
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
|
|
|
/* Only possible if a transient vm quit while our locks were down,
|
|
|
|
* in which case we don't want to save snapshot metadata. */
|
|
|
|
*vmptr = NULL;
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-08-13 03:20:04 +00:00
|
|
|
static virDomainSnapshotPtr
|
|
|
|
qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|
|
|
const char *xmlDesc,
|
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
2011-08-13 03:20:04 +00:00
|
|
|
char *xml = NULL;
|
2010-04-02 14:10:37 +00:00
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
|
|
|
virDomainSnapshotPtr snapshot = NULL;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
2011-08-12 20:45:39 +00:00
|
|
|
virDomainSnapshotDefPtr def = NULL;
|
2011-09-01 22:50:17 +00:00
|
|
|
bool update_current = true;
|
|
|
|
unsigned int parse_flags = 0;
|
2010-04-02 14:10:37 +00:00
|
|
|
|
2011-09-01 22:50:17 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT |
|
2011-09-01 23:23:29 +00:00
|
|
|
VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
|
2011-08-20 04:33:13 +00:00
|
|
|
VIR_DOMAIN_SNAPSHOT_CREATE_HALT |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY, NULL);
|
2011-09-01 22:50:17 +00:00
|
|
|
|
|
|
|
if (((flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE) &&
|
|
|
|
!(flags & VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT)) ||
|
|
|
|
(flags & VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA))
|
|
|
|
update_current = false;
|
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE)
|
|
|
|
parse_flags |= VIR_DOMAIN_SNAPSHOT_PARSE_REDEFINE;
|
2011-08-20 04:33:13 +00:00
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY)
|
|
|
|
parse_flags |= VIR_DOMAIN_SNAPSHOT_PARSE_DISKS;
|
2010-04-16 12:04:31 +00:00
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-26 23:29:18 +00:00
|
|
|
if (qemuProcessAutoDestroyActive(driver, vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is marked for auto destroy"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-09-01 23:23:29 +00:00
|
|
|
if (!vm->persistent && (flags & VIR_DOMAIN_SNAPSHOT_CREATE_HALT)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot halt after transient domain snapshot"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-26 23:29:18 +00:00
|
|
|
|
snapshot: allow full domain xml in snapshot
Just like VM saved state images (virsh save), snapshots MUST
track the inactive domain xml to detect any ABI incompatibilities.
The indentation is not perfect, but functionality comes before form.
Later patches will actually supply a full domain; for now, this
wires up the storage to support one, but doesn't ever generate one
in dumpxml output.
Happily, libvirt.c was already rejecting use of VIR_DOMAIN_XML_SECURE
from read-only connections, even though before this patch, there was
no information to be secured by the use of that flag.
And while we're at it, mark the libvirt snapshot metadata files
as internal-use only.
* src/libvirt.c (virDomainSnapshotGetXMLDesc): Document flag.
* src/conf/domain_conf.h (_virDomainSnapshotDef): Add member.
(virDomainSnapshotDefParseString, virDomainSnapshotDefFormat):
Update signature.
* src/conf/domain_conf.c (virDomainSnapshotDefFree): Clean up.
(virDomainSnapshotDefParseString): Optionally parse domain.
(virDomainSnapshotDefFormat): Output full domain.
* src/esx/esx_driver.c (esxDomainSnapshotCreateXML)
(esxDomainSnapshotGetXMLDesc): Update callers.
* src/vbox/vbox_tmpl.c (vboxDomainSnapshotCreateXML)
(vboxDomainSnapshotGetXMLDesc): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotLoad, qemuDomainSnapshotGetXMLDesc)
(qemuDomainSnapshotWriteMetadata): Likewise.
* docs/formatsnapshot.html.in: Rework doc example.
Based on a patch by Philipp Hahn.
2011-08-13 01:19:47 +00:00
|
|
|
if (!(def = virDomainSnapshotDefParseString(xmlDesc, driver->caps,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
parse_flags)))
|
2010-04-02 14:10:37 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-09-01 22:50:17 +00:00
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE) {
|
|
|
|
virDomainSnapshotObjPtr other = NULL;
|
|
|
|
|
|
|
|
/* Prevent circular chains */
|
|
|
|
if (def->parent) {
|
|
|
|
if (STREQ(def->name, def->parent)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("cannot set snapshot %s as its own parent"),
|
|
|
|
def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
other = virDomainSnapshotFindByName(&vm->snapshots, def->parent);
|
|
|
|
if (!other) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("parent %s for snapshot %s not found"),
|
|
|
|
def->parent, def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
while (other->def->parent) {
|
|
|
|
if (STREQ(other->def->parent, def->name)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("parent %s would create cycle to %s"),
|
|
|
|
other->def->name, def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
other = virDomainSnapshotFindByName(&vm->snapshots,
|
|
|
|
other->def->parent);
|
|
|
|
if (!other) {
|
|
|
|
VIR_WARN("snapshots are inconsistent for %s",
|
|
|
|
vm->def->name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check that any replacement is compatible */
|
2011-08-13 03:20:04 +00:00
|
|
|
if (def->dom &&
|
|
|
|
memcmp(def->dom->uuid, domain->uuid, VIR_UUID_BUFLEN)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("definition for snapshot %s must use uuid %s"),
|
|
|
|
def->name, uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-09-01 22:50:17 +00:00
|
|
|
other = virDomainSnapshotFindByName(&vm->snapshots, def->name);
|
|
|
|
if (other) {
|
|
|
|
if ((other->def->state == VIR_DOMAIN_RUNNING ||
|
|
|
|
other->def->state == VIR_DOMAIN_PAUSED) !=
|
|
|
|
(def->state == VIR_DOMAIN_RUNNING ||
|
|
|
|
def->state == VIR_DOMAIN_PAUSED)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("cannot change between online and offline "
|
|
|
|
"snapshot state in snapshot %s"),
|
|
|
|
def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-22 20:26:52 +00:00
|
|
|
if ((other->def->state == VIR_DOMAIN_DISK_SNAPSHOT) !=
|
|
|
|
(def->state == VIR_DOMAIN_DISK_SNAPSHOT)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("cannot change between disk snapshot and "
|
|
|
|
"system checkpoint in snapshot %s"),
|
|
|
|
def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-13 03:20:04 +00:00
|
|
|
if (other->def->dom) {
|
|
|
|
if (def->dom) {
|
|
|
|
if (!virDomainDefCheckABIStability(other->def->dom,
|
|
|
|
def->dom))
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
/* Transfer the domain def */
|
|
|
|
def->dom = other->def->dom;
|
|
|
|
other->def->dom = NULL;
|
|
|
|
}
|
|
|
|
}
|
2011-09-01 22:50:17 +00:00
|
|
|
if (other == vm->current_snapshot) {
|
|
|
|
update_current = true;
|
|
|
|
vm->current_snapshot = NULL;
|
|
|
|
}
|
|
|
|
virDomainSnapshotObjListRemove(&vm->snapshots, other);
|
|
|
|
}
|
2011-08-20 04:33:13 +00:00
|
|
|
if (def->state == VIR_DOMAIN_DISK_SNAPSHOT && def->dom) {
|
|
|
|
if (virDomainSnapshotAlignDisks(def,
|
|
|
|
VIR_DOMAIN_DISK_SNAPSHOT_EXTERNAL,
|
|
|
|
false) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-13 03:20:04 +00:00
|
|
|
} else {
|
|
|
|
/* Easiest way to clone inactive portion of vm->def is via
|
|
|
|
* conversion in and back out of xml. */
|
|
|
|
if (!(xml = virDomainDefFormat(vm->def, (VIR_DOMAIN_XML_INACTIVE |
|
|
|
|
VIR_DOMAIN_XML_SECURE))) ||
|
|
|
|
!(def->dom = virDomainDefParseString(driver->caps, xml,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-08-20 04:33:13 +00:00
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) {
|
|
|
|
if (virDomainSnapshotAlignDisks(def,
|
|
|
|
VIR_DOMAIN_DISK_SNAPSHOT_EXTERNAL,
|
|
|
|
false) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
if (qemuDomainSnapshotDiskPrepare(vm, def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
def->state = VIR_DOMAIN_DISK_SNAPSHOT;
|
|
|
|
} else {
|
|
|
|
/* In a perfect world, we would allow qemu to tell us this.
|
|
|
|
* The problem is that qemu only does this check
|
|
|
|
* device-by-device; so if you had a domain that booted from a
|
|
|
|
* large qcow2 device, but had a secondary raw device
|
|
|
|
* attached, you wouldn't find out that you can't snapshot
|
|
|
|
* your guest until *after* it had spent the time to snapshot
|
|
|
|
* the boot device. This is probably a bug in qemu, but we'll
|
|
|
|
* work around it here for now.
|
|
|
|
*/
|
|
|
|
if (!qemuDomainSnapshotIsAllowed(vm))
|
|
|
|
goto cleanup;
|
|
|
|
def->state = virDomainObjGetState(vm, NULL);
|
|
|
|
}
|
2011-08-20 04:03:38 +00:00
|
|
|
}
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
if (!(snap = virDomainSnapshotAssignDef(&vm->snapshots, def)))
|
|
|
|
goto cleanup;
|
2011-08-12 20:45:39 +00:00
|
|
|
def = NULL;
|
2010-04-02 14:10:37 +00:00
|
|
|
|
2011-09-01 22:50:17 +00:00
|
|
|
if (update_current)
|
|
|
|
snap->def->current = true;
|
2011-08-12 20:45:39 +00:00
|
|
|
if (vm->current_snapshot) {
|
2011-09-01 22:50:17 +00:00
|
|
|
if (!(flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE)) {
|
|
|
|
snap->def->parent = strdup(vm->current_snapshot->def->name);
|
|
|
|
if (snap->def->parent == NULL) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-12 20:45:39 +00:00
|
|
|
}
|
2011-09-01 22:50:17 +00:00
|
|
|
if (update_current) {
|
2011-09-01 21:47:03 +00:00
|
|
|
vm->current_snapshot->def->current = false;
|
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, vm->current_snapshot,
|
|
|
|
driver->snapshotDir) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
vm->current_snapshot = NULL;
|
|
|
|
}
|
2011-08-12 20:45:39 +00:00
|
|
|
}
|
2011-02-24 15:46:44 +00:00
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
/* actually do the snapshot */
|
2011-09-01 22:50:17 +00:00
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE) {
|
|
|
|
/* XXX Should we validate that the redefined snapshot even
|
2011-08-22 20:26:52 +00:00
|
|
|
* makes sense, such as checking that qemu-img recognizes the
|
|
|
|
* snapshot name in at least one of the domain's disks? */
|
2011-08-20 04:33:13 +00:00
|
|
|
} else if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("disk snapshots of inactive domains not "
|
|
|
|
"implemented yet"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (qemuDomainSnapshotCreateDiskActive(domain->conn, driver,
|
|
|
|
&vm, snap, flags) < 0)
|
|
|
|
goto cleanup;
|
2011-09-01 22:50:17 +00:00
|
|
|
} else if (!virDomainObjIsActive(vm)) {
|
2011-08-12 16:12:47 +00:00
|
|
|
if (qemuDomainSnapshotCreateInactive(driver, vm, snap) < 0)
|
2010-04-02 14:10:37 +00:00
|
|
|
goto cleanup;
|
2011-08-05 18:03:44 +00:00
|
|
|
} else {
|
2011-02-24 15:46:44 +00:00
|
|
|
if (qemuDomainSnapshotCreateActive(domain->conn, driver,
|
2011-09-01 23:23:29 +00:00
|
|
|
&vm, snap, flags) < 0)
|
2010-04-22 16:01:56 +00:00
|
|
|
goto cleanup;
|
2010-04-02 14:10:37 +00:00
|
|
|
}
|
|
|
|
|
2011-08-12 20:45:39 +00:00
|
|
|
/* If we fail after this point, there's not a whole lot we can
|
2010-04-02 14:10:37 +00:00
|
|
|
* do; we've successfully taken the snapshot, and we are now running
|
|
|
|
* on it, so we have to go forward the best we can
|
|
|
|
*/
|
|
|
|
snapshot = virGetDomainSnapshot(domain, snap->def->name);
|
|
|
|
|
|
|
|
cleanup:
|
2011-08-12 20:45:39 +00:00
|
|
|
if (vm) {
|
2011-09-01 21:47:03 +00:00
|
|
|
if (snapshot && !(flags & VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA)) {
|
2011-09-01 21:22:02 +00:00
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, snap,
|
|
|
|
driver->snapshotDir) < 0)
|
|
|
|
VIR_WARN("unable to save metadata for snapshot %s",
|
|
|
|
snap->def->name);
|
2011-09-01 22:50:17 +00:00
|
|
|
else if (update_current)
|
2011-09-01 21:22:02 +00:00
|
|
|
vm->current_snapshot = snap;
|
|
|
|
} else if (snap) {
|
2011-08-12 20:45:39 +00:00
|
|
|
virDomainSnapshotObjListRemove(&vm->snapshots, snap);
|
2011-09-01 21:22:02 +00:00
|
|
|
}
|
2010-04-02 14:10:37 +00:00
|
|
|
virDomainObjUnlock(vm);
|
2011-08-12 20:45:39 +00:00
|
|
|
}
|
|
|
|
virDomainSnapshotDefFree(def);
|
2011-08-13 03:20:04 +00:00
|
|
|
VIR_FREE(xml);
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return snapshot;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuDomainSnapshotListNames(virDomainPtr domain, char **names,
|
|
|
|
int nameslen,
|
2010-04-16 12:04:31 +00:00
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int n = -1;
|
|
|
|
|
2011-08-13 17:18:44 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SNAPSHOT_LIST_ROOTS |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_LIST_METADATA, -1);
|
2010-04-16 12:04:31 +00:00
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-13 17:18:44 +00:00
|
|
|
n = virDomainSnapshotObjListGetNames(&vm->snapshots, names, nameslen,
|
|
|
|
flags);
|
2010-04-02 14:10:37 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuDomainSnapshotNum(virDomainPtr domain,
|
2010-04-16 12:04:31 +00:00
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int n = -1;
|
|
|
|
|
2011-08-13 17:18:44 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SNAPSHOT_LIST_ROOTS |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_LIST_METADATA, -1);
|
2010-04-16 12:04:31 +00:00
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-12 14:51:15 +00:00
|
|
|
/* All qemu snapshots have libvirt metadata, so
|
|
|
|
* VIR_DOMAIN_SNAPSHOT_LIST_METADATA makes no difference to our
|
|
|
|
* answer. */
|
|
|
|
|
2011-08-13 17:18:44 +00:00
|
|
|
n = virDomainSnapshotObjListNum(&vm->snapshots, flags);
|
2010-04-02 14:10:37 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static virDomainSnapshotPtr qemuDomainSnapshotLookupByName(virDomainPtr domain,
|
|
|
|
const char *name,
|
2010-04-16 12:04:31 +00:00
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
|
|
|
virDomainSnapshotPtr snapshot = NULL;
|
|
|
|
|
2010-04-16 12:04:31 +00:00
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snap = virDomainSnapshotFindByName(&vm->snapshots, name);
|
|
|
|
if (!snap) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT,
|
|
|
|
_("no snapshot with matching name '%s'"), name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snapshot = virGetDomainSnapshot(domain, snap->def->name);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return snapshot;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int qemuDomainHasCurrentSnapshot(virDomainPtr domain,
|
2010-04-16 12:04:31 +00:00
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
int ret = -1;
|
|
|
|
|
2010-04-16 12:04:31 +00:00
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = (vm->current_snapshot != NULL);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-09-25 00:02:24 +00:00
|
|
|
static virDomainSnapshotPtr
|
|
|
|
qemuDomainSnapshotGetParent(virDomainSnapshotPtr snapshot,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = snapshot->domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
|
|
|
virDomainSnapshotPtr parent = NULL;
|
|
|
|
|
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, snapshot->domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(snapshot->domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snap = virDomainSnapshotFindByName(&vm->snapshots, snapshot->name);
|
|
|
|
if (!snap) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT,
|
|
|
|
_("no domain snapshot with matching name '%s'"),
|
|
|
|
snapshot->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!snap->def->parent) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT,
|
|
|
|
_("snapshot '%s' does not have a parent"),
|
|
|
|
snap->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
parent = virGetDomainSnapshot(snapshot->domain, snap->def->parent);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return parent;
|
|
|
|
}
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
static virDomainSnapshotPtr qemuDomainSnapshotCurrent(virDomainPtr domain,
|
2010-04-16 12:04:31 +00:00
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm;
|
|
|
|
virDomainSnapshotPtr snapshot = NULL;
|
|
|
|
|
2010-04-16 12:04:31 +00:00
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vm->current_snapshot) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT, "%s",
|
|
|
|
_("the domain does not have a current snapshot"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snapshot = virGetDomainSnapshot(domain, vm->current_snapshot->def->name);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return snapshot;
|
|
|
|
}
|
|
|
|
|
2011-05-06 19:53:10 +00:00
|
|
|
static char *qemuDomainSnapshotGetXMLDesc(virDomainSnapshotPtr snapshot,
|
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = snapshot->domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
char *xml = NULL;
|
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
|
2011-09-01 21:47:03 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_XML_SECURE, NULL);
|
2010-04-16 12:04:31 +00:00
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
virUUIDFormat(snapshot->domain->uuid, uuidstr);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, snapshot->domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snap = virDomainSnapshotFindByName(&vm->snapshots, snapshot->name);
|
|
|
|
if (!snap) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT,
|
|
|
|
_("no domain snapshot with matching name '%s'"),
|
|
|
|
snapshot->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
snapshot: allow full domain xml in snapshot
Just like VM saved state images (virsh save), snapshots MUST
track the inactive domain xml to detect any ABI incompatibilities.
The indentation is not perfect, but functionality comes before form.
Later patches will actually supply a full domain; for now, this
wires up the storage to support one, but doesn't ever generate one
in dumpxml output.
Happily, libvirt.c was already rejecting use of VIR_DOMAIN_XML_SECURE
from read-only connections, even though before this patch, there was
no information to be secured by the use of that flag.
And while we're at it, mark the libvirt snapshot metadata files
as internal-use only.
* src/libvirt.c (virDomainSnapshotGetXMLDesc): Document flag.
* src/conf/domain_conf.h (_virDomainSnapshotDef): Add member.
(virDomainSnapshotDefParseString, virDomainSnapshotDefFormat):
Update signature.
* src/conf/domain_conf.c (virDomainSnapshotDefFree): Clean up.
(virDomainSnapshotDefParseString): Optionally parse domain.
(virDomainSnapshotDefFormat): Output full domain.
* src/esx/esx_driver.c (esxDomainSnapshotCreateXML)
(esxDomainSnapshotGetXMLDesc): Update callers.
* src/vbox/vbox_tmpl.c (vboxDomainSnapshotCreateXML)
(vboxDomainSnapshotGetXMLDesc): Likewise.
* src/qemu/qemu_driver.c (qemuDomainSnapshotCreateXML)
(qemuDomainSnapshotLoad, qemuDomainSnapshotGetXMLDesc)
(qemuDomainSnapshotWriteMetadata): Likewise.
* docs/formatsnapshot.html.in: Rework doc example.
Based on a patch by Philipp Hahn.
2011-08-13 01:19:47 +00:00
|
|
|
xml = virDomainSnapshotDefFormat(uuidstr, snap->def, flags, 0);
|
2010-04-02 14:10:37 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return xml;
|
|
|
|
}
|
|
|
|
|
2011-08-26 22:17:41 +00:00
|
|
|
/* The domain is expected to be locked and inactive. */
|
|
|
|
static int
|
2011-08-12 16:12:47 +00:00
|
|
|
qemuDomainSnapshotRevertInactive(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-08-26 22:17:41 +00:00
|
|
|
virDomainSnapshotObjPtr snap)
|
|
|
|
{
|
|
|
|
/* Try all disks, but report failure if we skipped any. */
|
2011-08-12 16:12:47 +00:00
|
|
|
int ret = qemuDomainSnapshotForEachQcow2(driver, vm, snap, "-a", true);
|
2011-08-26 22:17:41 +00:00
|
|
|
return ret > 0 ? -1 : ret;
|
|
|
|
}
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
static int qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
|
2010-04-16 12:04:31 +00:00
|
|
|
unsigned int flags)
|
2010-04-02 14:10:37 +00:00
|
|
|
{
|
|
|
|
struct qemud_driver *driver = snapshot->domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virDomainEventPtr event = NULL;
|
2011-08-05 22:05:50 +00:00
|
|
|
virDomainEventPtr event2 = NULL;
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
int detail;
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
int rc;
|
2011-08-13 03:20:04 +00:00
|
|
|
virDomainDefPtr config = NULL;
|
2010-04-02 14:10:37 +00:00
|
|
|
|
2011-08-27 19:48:19 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING |
|
2011-09-30 18:19:41 +00:00
|
|
|
VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_REVERT_FORCE, -1);
|
2010-04-16 12:04:31 +00:00
|
|
|
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* We have the following transitions, which create the following events:
|
|
|
|
* 1. inactive -> inactive: none
|
|
|
|
* 2. inactive -> running: EVENT_STARTED
|
|
|
|
* 3. inactive -> paused: EVENT_STARTED, EVENT_PAUSED
|
|
|
|
* 4. running -> inactive: EVENT_STOPPED
|
|
|
|
* 5. running -> running: none
|
|
|
|
* 6. running -> paused: EVENT_PAUSED
|
|
|
|
* 7. paused -> inactive: EVENT_STOPPED
|
|
|
|
* 8. paused -> running: EVENT_RESUMED
|
|
|
|
* 9. paused -> paused: none
|
2011-09-30 18:19:41 +00:00
|
|
|
* Also, several transitions occur even if we fail partway through,
|
|
|
|
* and use of FORCE can cause multiple transitions.
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
*/
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
virUUIDFormat(snapshot->domain->uuid, uuidstr);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, snapshot->domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snap = virDomainSnapshotFindByName(&vm->snapshots, snapshot->name);
|
|
|
|
if (!snap) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT,
|
|
|
|
_("no domain snapshot with matching name '%s'"),
|
|
|
|
snapshot->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-27 19:48:19 +00:00
|
|
|
if (!vm->persistent &&
|
|
|
|
snap->def->state != VIR_DOMAIN_RUNNING &&
|
|
|
|
snap->def->state != VIR_DOMAIN_PAUSED &&
|
|
|
|
(flags & (VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED)) == 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("transient domain needs to request run or pause "
|
|
|
|
"to revert to inactive snapshot"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-08-22 20:26:52 +00:00
|
|
|
if (snap->def->state == VIR_DOMAIN_DISK_SNAPSHOT) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
|
|
|
_("revert to external disk snapshot not supported "
|
|
|
|
"yet"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-09-30 18:19:41 +00:00
|
|
|
if (!(flags & VIR_DOMAIN_SNAPSHOT_REVERT_FORCE)) {
|
|
|
|
if (!snap->def->dom) {
|
|
|
|
qemuReportError(VIR_ERR_SNAPSHOT_REVERT_RISKY,
|
|
|
|
_("snapshot '%s' lacks domain '%s' rollback info"),
|
|
|
|
snap->def->name, vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virDomainObjIsActive(vm) &&
|
|
|
|
!(snap->def->state == VIR_DOMAIN_RUNNING
|
|
|
|
|| snap->def->state == VIR_DOMAIN_PAUSED) &&
|
|
|
|
(flags & (VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED))) {
|
|
|
|
qemuReportError(VIR_ERR_SNAPSHOT_REVERT_RISKY,
|
|
|
|
_("must respawn qemu to start inactive snapshot"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-27 19:48:19 +00:00
|
|
|
|
2011-08-25 21:11:03 +00:00
|
|
|
if (vm->current_snapshot) {
|
|
|
|
vm->current_snapshot->def->current = false;
|
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, vm->current_snapshot,
|
|
|
|
driver->snapshotDir) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
vm->current_snapshot = NULL;
|
|
|
|
/* XXX Should we restore vm->current_snapshot after this point
|
|
|
|
* in the failure cases where we know there was no change? */
|
|
|
|
}
|
|
|
|
|
2011-08-13 03:20:04 +00:00
|
|
|
/* Prepare to copy the snapshot inactive xml as the config of this
|
|
|
|
* domain. Easiest way is by a round trip through xml.
|
|
|
|
*
|
|
|
|
* XXX Should domain snapshots track live xml rather
|
|
|
|
* than inactive xml? */
|
2011-08-25 21:11:03 +00:00
|
|
|
snap->def->current = true;
|
2011-08-13 03:20:04 +00:00
|
|
|
if (snap->def->dom) {
|
|
|
|
char *xml;
|
|
|
|
if (!(xml = virDomainDefFormat(snap->def->dom,
|
|
|
|
(VIR_DOMAIN_XML_INACTIVE |
|
|
|
|
VIR_DOMAIN_XML_SECURE))))
|
|
|
|
goto cleanup;
|
|
|
|
config = virDomainDefParseString(driver->caps, xml,
|
|
|
|
QEMU_EXPECTED_VIRT_TYPES,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE);
|
|
|
|
VIR_FREE(xml);
|
|
|
|
if (!config)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2010-04-02 14:10:37 +00:00
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2010-04-02 14:10:37 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (snap->def->state == VIR_DOMAIN_RUNNING
|
|
|
|
|| snap->def->state == VIR_DOMAIN_PAUSED) {
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Transitions 2, 3, 5, 6, 8, 9 */
|
|
|
|
bool was_running = false;
|
|
|
|
bool was_stopped = false;
|
|
|
|
|
|
|
|
/* When using the loadvm monitor command, qemu does not know
|
|
|
|
* whether to pause or run the reverted domain, and just stays
|
|
|
|
* in the same state as before the monitor command, whether
|
|
|
|
* that is paused or running. We always pause before loadvm,
|
|
|
|
* to have finer control. */
|
2010-04-02 14:10:37 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Transitions 5, 6, 8, 9 */
|
2011-08-13 03:20:04 +00:00
|
|
|
/* Check for ABI compatibility. */
|
|
|
|
if (config && !virDomainDefCheckABIStability(vm->def, config)) {
|
2011-09-30 18:19:41 +00:00
|
|
|
virErrorPtr err = virGetLastError();
|
|
|
|
|
|
|
|
if (!(flags & VIR_DOMAIN_SNAPSHOT_REVERT_FORCE)) {
|
|
|
|
/* Re-spawn error using correct category. */
|
|
|
|
if (err->code == VIR_ERR_CONFIG_UNSUPPORTED)
|
|
|
|
qemuReportError(VIR_ERR_SNAPSHOT_REVERT_RISKY, "%s",
|
|
|
|
err->str2);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
virResetError(err);
|
|
|
|
qemuProcessStop(driver, vm, 0,
|
|
|
|
VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT);
|
|
|
|
virDomainAuditStop(vm, "from-snapshot");
|
|
|
|
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
detail);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
goto load;
|
2011-08-13 03:20:04 +00:00
|
|
|
}
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
priv = vm->privateData;
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
/* Transitions 5, 6 */
|
|
|
|
was_running = true;
|
|
|
|
if (qemuProcessStopCPUs(driver, vm,
|
|
|
|
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT,
|
|
|
|
QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
goto endjob;
|
|
|
|
/* Create an event now in case the restore fails, so
|
|
|
|
* that user will be alerted that they are now paused.
|
|
|
|
* If restore later succeeds, we might replace this. */
|
|
|
|
detail = VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT;
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
detail);
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
}
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2010-04-02 14:10:37 +00:00
|
|
|
rc = qemuMonitorLoadSnapshot(priv->mon, snap->def->name);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
/* XXX resume domain if it was running before the
|
|
|
|
* failed loadvm attempt? */
|
2010-04-22 16:01:56 +00:00
|
|
|
goto endjob;
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
}
|
2011-08-13 03:20:04 +00:00
|
|
|
if (config)
|
|
|
|
virDomainObjAssignDef(vm, config, false);
|
2011-08-05 18:03:44 +00:00
|
|
|
} else {
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Transitions 2, 3 */
|
2011-09-30 18:19:41 +00:00
|
|
|
load:
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
was_stopped = true;
|
2011-08-13 03:20:04 +00:00
|
|
|
if (config)
|
|
|
|
virDomainObjAssignDef(vm, config, false);
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL,
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
true, false, -1, NULL, snap,
|
2011-08-25 20:44:48 +00:00
|
|
|
VIR_VM_OP_CREATE);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
detail = VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT;
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
detail);
|
2010-04-02 14:10:37 +00:00
|
|
|
if (rc < 0)
|
2010-04-22 16:01:56 +00:00
|
|
|
goto endjob;
|
2010-04-02 14:10:37 +00:00
|
|
|
}
|
|
|
|
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Touch up domain state. */
|
2011-08-27 19:48:19 +00:00
|
|
|
if (!(flags & VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING) &&
|
|
|
|
(snap->def->state == VIR_DOMAIN_PAUSED ||
|
|
|
|
(flags & VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED))) {
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Transitions 3, 6, 9 */
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
|
|
|
VIR_DOMAIN_PAUSED_FROM_SNAPSHOT);
|
|
|
|
if (was_stopped) {
|
|
|
|
/* Transition 3, use event as-is and add event2 */
|
|
|
|
detail = VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT;
|
|
|
|
event2 = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
detail);
|
|
|
|
} /* else transition 6 and 9 use event as-is */
|
|
|
|
} else {
|
|
|
|
/* Transitions 2, 5, 8 */
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
rc = qemuProcessStartCPUs(driver, vm, snapshot->domain->conn,
|
|
|
|
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
|
|
|
|
QEMU_ASYNC_JOB_NONE);
|
2010-12-14 07:40:26 +00:00
|
|
|
if (rc < 0)
|
2010-04-22 16:01:56 +00:00
|
|
|
goto endjob;
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
virDomainEventFree(event);
|
|
|
|
event = NULL;
|
|
|
|
if (was_stopped) {
|
|
|
|
/* Transition 2 */
|
|
|
|
detail = VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT;
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
detail);
|
|
|
|
} else if (was_running) {
|
|
|
|
/* Transition 8 */
|
|
|
|
detail = VIR_DOMAIN_EVENT_RESUMED;
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
detail);
|
|
|
|
}
|
2010-04-02 14:10:37 +00:00
|
|
|
}
|
2011-08-05 18:03:44 +00:00
|
|
|
} else {
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Transitions 1, 4, 7 */
|
2011-08-26 22:17:41 +00:00
|
|
|
/* Newer qemu -loadvm refuses to revert to the state of a snapshot
|
|
|
|
* created by qemu-img snapshot -c. If the domain is running, we
|
|
|
|
* must take it offline; then do the revert using qemu-img.
|
2010-04-02 14:10:37 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
if (virDomainObjIsActive(vm)) {
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
/* Transitions 4, 7 */
|
2011-05-04 09:07:01 +00:00
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FROM_SNAPSHOT);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "from-snapshot");
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
detail = VIR_DOMAIN_EVENT_STOPPED_FROM_SNAPSHOT;
|
2010-04-02 14:10:37 +00:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
snapshot: improve reverting to qemu paused snapshots
If you take a checkpoint snapshot of a running domain, then pause
qemu, then restore the snapshot, the result should be a running
domain, but the code was leaving things paused. Furthermore, if
you take a checkpoint of a paused domain, then run, then restore,
there was a brief but non-deterministic window of time where the
domain was running rather than paused. Fix both of these
discrepancies by always pausing before restoring.
Also, check that the VM is active every time lock is dropped
between two monitor calls.
Finally, straighten out the events that get emitted on each
transition.
* src/qemu/qemu_driver.c (qemuDomainRevertToSnapshot): Always
pause before reversion, and improve events.
2011-08-25 21:17:10 +00:00
|
|
|
detail);
|
2011-08-27 19:48:19 +00:00
|
|
|
}
|
|
|
|
|
2011-08-12 16:12:47 +00:00
|
|
|
if (qemuDomainSnapshotRevertInactive(driver, vm, snap) < 0) {
|
2010-04-22 16:01:56 +00:00
|
|
|
if (!vm->persistent) {
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) > 0)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2010-04-22 16:01:56 +00:00
|
|
|
vm = NULL;
|
2010-04-27 20:35:32 +00:00
|
|
|
goto cleanup;
|
2010-04-22 16:01:56 +00:00
|
|
|
}
|
2011-08-27 19:48:19 +00:00
|
|
|
goto endjob;
|
2010-04-02 14:10:37 +00:00
|
|
|
}
|
2011-08-13 03:20:04 +00:00
|
|
|
if (config)
|
|
|
|
virDomainObjAssignDef(vm, config, false);
|
2011-08-26 22:17:41 +00:00
|
|
|
|
2011-08-27 19:48:19 +00:00
|
|
|
if (flags & (VIR_DOMAIN_SNAPSHOT_REVERT_RUNNING |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED)) {
|
|
|
|
/* Flush first event, now do transition 2 or 3 */
|
|
|
|
bool paused = (flags & VIR_DOMAIN_SNAPSHOT_REVERT_PAUSED) != 0;
|
|
|
|
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
rc = qemuProcessStart(snapshot->domain->conn, driver, vm, NULL,
|
|
|
|
paused, false, -1, NULL, NULL,
|
|
|
|
VIR_VM_OP_CREATE);
|
|
|
|
virDomainAuditStart(vm, "from-snapshot", rc >= 0);
|
|
|
|
if (rc < 0) {
|
|
|
|
if (!vm->persistent) {
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) > 0)
|
2011-09-22 06:02:03 +00:00
|
|
|
qemuDomainRemoveInactive(driver, vm);
|
2011-08-27 19:48:19 +00:00
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
detail = VIR_DOMAIN_EVENT_STARTED_FROM_SNAPSHOT;
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
detail);
|
|
|
|
if (paused) {
|
|
|
|
detail = VIR_DOMAIN_EVENT_SUSPENDED_FROM_SNAPSHOT;
|
|
|
|
event2 = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
detail);
|
|
|
|
}
|
|
|
|
}
|
2010-04-02 14:10:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
2010-04-22 16:01:56 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (vm && qemuDomainObjEndJob(driver, vm) == 0)
|
2010-04-02 14:10:37 +00:00
|
|
|
vm = NULL;
|
|
|
|
|
2010-04-22 16:01:56 +00:00
|
|
|
cleanup:
|
2011-08-25 21:11:03 +00:00
|
|
|
if (vm && ret == 0) {
|
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, snap,
|
|
|
|
driver->snapshotDir) < 0)
|
|
|
|
ret = -1;
|
|
|
|
else
|
|
|
|
vm->current_snapshot = snap;
|
|
|
|
} else if (snap) {
|
|
|
|
snap->def->current = false;
|
|
|
|
}
|
2011-08-05 22:05:50 +00:00
|
|
|
if (event) {
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-08-05 22:05:50 +00:00
|
|
|
if (event2)
|
|
|
|
qemuDomainEventQueue(driver, event2);
|
|
|
|
}
|
2010-04-02 14:10:37 +00:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-12-22 20:44:42 +00:00
|
|
|
struct snap_reparent {
|
|
|
|
struct qemud_driver *driver;
|
2011-08-13 17:56:15 +00:00
|
|
|
const char *parent;
|
2010-12-22 20:44:42 +00:00
|
|
|
virDomainObjPtr vm;
|
|
|
|
int err;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuDomainSnapshotReparentChildren(void *payload,
|
Allow hash tables to use generic pointers as keys
Relax the restriction that the hash table key must be a string
by allowing an arbitrary hash code generator + comparison func
to be provided
* util/hash.c, util/hash.h: Allow any pointer as a key
* internal.h: Include stdbool.h as standard.
* conf/domain_conf.c, conf/domain_conf.c,
conf/nwfilter_params.c, nwfilter/nwfilter_gentech_driver.c,
nwfilter/nwfilter_gentech_driver.h, nwfilter/nwfilter_learnipaddr.c,
qemu/qemu_command.c, qemu/qemu_driver.c,
qemu/qemu_process.c, uml/uml_driver.c,
xen/xm_internal.c: s/char */void */ in hash callbacks
2011-02-22 15:11:59 +00:00
|
|
|
const void *name ATTRIBUTE_UNUSED,
|
2010-12-22 20:44:42 +00:00
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
virDomainSnapshotObjPtr snap = payload;
|
|
|
|
struct snap_reparent *rep = data;
|
|
|
|
|
|
|
|
if (rep->err < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-08-13 17:56:15 +00:00
|
|
|
VIR_FREE(snap->def->parent);
|
2010-12-22 20:44:42 +00:00
|
|
|
|
2011-08-13 17:56:15 +00:00
|
|
|
if (rep->parent != NULL) {
|
|
|
|
snap->def->parent = strdup(rep->parent);
|
2010-12-22 20:44:42 +00:00
|
|
|
|
2011-08-13 17:56:15 +00:00
|
|
|
if (snap->def->parent == NULL) {
|
|
|
|
virReportOOMError();
|
|
|
|
rep->err = -1;
|
|
|
|
return;
|
2010-12-22 20:44:42 +00:00
|
|
|
}
|
|
|
|
}
|
2011-08-13 17:56:15 +00:00
|
|
|
|
|
|
|
rep->err = qemuDomainSnapshotWriteMetadata(rep->vm, snap,
|
|
|
|
rep->driver->snapshotDir);
|
2010-12-22 20:44:42 +00:00
|
|
|
}
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
static int qemuDomainSnapshotDelete(virDomainSnapshotPtr snapshot,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = snapshot->domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
virDomainSnapshotObjPtr snap = NULL;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
2011-09-21 19:08:51 +00:00
|
|
|
struct qemu_snap_remove rem;
|
2010-12-22 20:44:42 +00:00
|
|
|
struct snap_reparent rep;
|
2011-08-12 13:21:47 +00:00
|
|
|
bool metadata_only = !!(flags & VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY);
|
2011-08-22 20:26:52 +00:00
|
|
|
int external = 0;
|
2010-04-02 14:10:37 +00:00
|
|
|
|
2011-08-12 13:21:47 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN |
|
2011-08-16 22:23:45 +00:00
|
|
|
VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, -1);
|
2010-04-16 12:04:31 +00:00
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
qemuDriverLock(driver);
|
|
|
|
virUUIDFormat(snapshot->domain->uuid, uuidstr);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, snapshot->domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
snap = virDomainSnapshotFindByName(&vm->snapshots, snapshot->name);
|
|
|
|
if (!snap) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN_SNAPSHOT,
|
|
|
|
_("no domain snapshot with matching name '%s'"),
|
|
|
|
snapshot->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-08-22 20:26:52 +00:00
|
|
|
if (!(flags & VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY)) {
|
|
|
|
if (!(flags & VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY) &&
|
|
|
|
snap->def->state == VIR_DOMAIN_DISK_SNAPSHOT)
|
|
|
|
external++;
|
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN)
|
|
|
|
virDomainSnapshotForEachDescendant(&vm->snapshots, snap,
|
|
|
|
qemuDomainSnapshotCountExternal,
|
|
|
|
&external);
|
|
|
|
if (external) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("deletion of %d external disk snapshots not "
|
|
|
|
"supported yet"), external);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2010-04-22 16:01:56 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-08-16 22:23:45 +00:00
|
|
|
if (flags & (VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN |
|
|
|
|
VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY)) {
|
2010-04-02 14:10:37 +00:00
|
|
|
rem.driver = driver;
|
|
|
|
rem.vm = vm;
|
2011-08-12 13:21:47 +00:00
|
|
|
rem.metadata_only = metadata_only;
|
2010-04-02 14:10:37 +00:00
|
|
|
rem.err = 0;
|
2011-08-24 20:25:46 +00:00
|
|
|
rem.current = false;
|
2011-08-12 13:05:50 +00:00
|
|
|
virDomainSnapshotForEachDescendant(&vm->snapshots,
|
|
|
|
snap,
|
2011-08-12 15:52:08 +00:00
|
|
|
qemuDomainSnapshotDiscardAll,
|
2011-08-12 13:05:50 +00:00
|
|
|
&rem);
|
2010-04-02 14:10:37 +00:00
|
|
|
if (rem.err < 0)
|
2010-04-22 16:01:56 +00:00
|
|
|
goto endjob;
|
2011-08-16 22:23:45 +00:00
|
|
|
if (rem.current) {
|
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY) {
|
|
|
|
snap->def->current = true;
|
|
|
|
if (qemuDomainSnapshotWriteMetadata(vm, snap,
|
|
|
|
driver->snapshotDir) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("failed to set snapshot '%s' as current"),
|
|
|
|
snap->def->name);
|
|
|
|
snap->def->current = false;
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
}
|
2011-08-24 20:25:46 +00:00
|
|
|
vm->current_snapshot = snap;
|
2011-08-16 22:23:45 +00:00
|
|
|
}
|
2010-12-22 20:44:42 +00:00
|
|
|
} else {
|
|
|
|
rep.driver = driver;
|
2011-08-13 17:56:15 +00:00
|
|
|
rep.parent = snap->def->parent;
|
2010-12-22 20:44:42 +00:00
|
|
|
rep.vm = vm;
|
|
|
|
rep.err = 0;
|
2011-08-13 17:56:15 +00:00
|
|
|
virDomainSnapshotForEachChild(&vm->snapshots, snap,
|
|
|
|
qemuDomainSnapshotReparentChildren,
|
|
|
|
&rep);
|
2010-12-22 20:44:42 +00:00
|
|
|
if (rep.err < 0)
|
|
|
|
goto endjob;
|
2010-04-02 14:10:37 +00:00
|
|
|
}
|
|
|
|
|
2011-08-16 22:23:45 +00:00
|
|
|
if (flags & VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY)
|
|
|
|
ret = 0;
|
|
|
|
else
|
|
|
|
ret = qemuDomainSnapshotDiscard(driver, vm, snap, true, metadata_only);
|
2010-04-02 14:10:37 +00:00
|
|
|
|
2010-04-22 16:01:56 +00:00
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
2010-04-22 16:01:56 +00:00
|
|
|
vm = NULL;
|
|
|
|
|
2010-04-02 14:10:37 +00:00
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
2010-03-17 15:53:14 +00:00
|
|
|
|
2010-04-17 02:12:45 +00:00
|
|
|
static int qemuDomainMonitorCommand(virDomainPtr domain, const char *cmd,
|
|
|
|
char **result, unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = domain->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
2011-02-02 15:37:10 +00:00
|
|
|
bool hmp;
|
2010-04-17 02:12:45 +00:00
|
|
|
|
2011-02-02 15:37:10 +00:00
|
|
|
virCheckFlags(VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP, -1);
|
2010-04-17 02:12:45 +00:00
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, domain->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
virUUIDFormat(domain->uuid, uuidstr);
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-05-05 11:48:07 +00:00
|
|
|
qemuDomainObjTaint(driver, vm, VIR_DOMAIN_TAINT_CUSTOM_MONITOR, -1);
|
2010-04-17 02:12:45 +00:00
|
|
|
|
2011-02-02 15:37:10 +00:00
|
|
|
hmp = !!(flags & VIR_DOMAIN_QEMU_MONITOR_COMMAND_HMP);
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2010-04-17 02:12:45 +00:00
|
|
|
goto cleanup;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-02-02 15:37:10 +00:00
|
|
|
ret = qemuMonitorArbitraryCommand(priv->mon, cmd, result, hmp);
|
2010-04-17 02:12:45 +00:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
2010-04-17 02:12:45 +00:00
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-07-23 15:00:07 +00:00
|
|
|
|
2011-05-05 16:32:21 +00:00
|
|
|
static virDomainPtr qemuDomainAttach(virConnectPtr conn,
|
|
|
|
unsigned int pid,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
virDomainPtr dom = NULL;
|
|
|
|
virDomainChrSourceDefPtr monConfig = NULL;
|
|
|
|
bool monJSON = false;
|
|
|
|
char *pidfile;
|
|
|
|
|
|
|
|
virCheckFlags(0, NULL);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
|
|
|
|
if (!(def = qemuParseCommandLinePid(driver->caps, pid,
|
|
|
|
&pidfile, &monConfig, &monJSON)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!monConfig) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("No monitor connection for pid %u"),
|
|
|
|
pid);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (monConfig->type != VIR_DOMAIN_CHR_TYPE_UNIX) {
|
|
|
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
|
|
|
|
_("Cannot connect to monitor connection of type '%s' for pid %u"),
|
|
|
|
virDomainChrTypeToString(monConfig->type), pid);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(def->name) &&
|
|
|
|
virAsprintf(&def->name, "attach-pid-%u", pid) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemudCanonicalizeMachine(driver, def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuDomainAssignPCIAddresses(def) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(vm = virDomainAssignDef(driver->caps,
|
|
|
|
&driver->domains,
|
|
|
|
def, false)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
def = NULL;
|
|
|
|
|
2011-06-30 09:23:50 +00:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
2011-05-05 16:32:21 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuProcessAttach(conn, driver, vm, pid,
|
|
|
|
pidfile, monConfig, monJSON) < 0) {
|
|
|
|
monConfig = NULL;
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
monConfig = NULL;
|
|
|
|
|
|
|
|
dom = virGetDomain(conn, vm->def->name, vm->def->uuid);
|
|
|
|
if (dom) dom->id = vm->def->id;
|
|
|
|
|
|
|
|
endjob:
|
2011-06-06 08:28:38 +00:00
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
2011-05-05 16:32:21 +00:00
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
|
|
|
virDomainChrSourceDefFree(monConfig);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
VIR_FREE(pidfile);
|
|
|
|
return dom;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-23 15:00:07 +00:00
|
|
|
static int
|
|
|
|
qemuDomainOpenConsole(virDomainPtr dom,
|
2011-09-16 12:05:58 +00:00
|
|
|
const char *dev_name,
|
2010-07-23 15:00:07 +00:00
|
|
|
virStreamPtr st,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
int ret = -1;
|
|
|
|
int i;
|
|
|
|
virDomainChrDefPtr chr = NULL;
|
|
|
|
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-09-16 12:05:58 +00:00
|
|
|
if (dev_name) {
|
2010-07-23 15:00:07 +00:00
|
|
|
if (vm->def->console &&
|
2011-09-16 12:05:58 +00:00
|
|
|
STREQ(dev_name, vm->def->console->info.alias))
|
2010-07-23 15:00:07 +00:00
|
|
|
chr = vm->def->console;
|
|
|
|
for (i = 0 ; !chr && i < vm->def->nserials ; i++) {
|
2011-09-16 12:05:58 +00:00
|
|
|
if (STREQ(dev_name, vm->def->serials[i]->info.alias))
|
2010-07-23 15:00:07 +00:00
|
|
|
chr = vm->def->serials[i];
|
|
|
|
}
|
|
|
|
for (i = 0 ; !chr && i < vm->def->nparallels ; i++) {
|
2011-09-16 12:05:58 +00:00
|
|
|
if (STREQ(dev_name, vm->def->parallels[i]->info.alias))
|
2010-07-23 15:00:07 +00:00
|
|
|
chr = vm->def->parallels[i];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (vm->def->console)
|
|
|
|
chr = vm->def->console;
|
|
|
|
else if (vm->def->nserials)
|
|
|
|
chr = vm->def->serials[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!chr) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot find character device %s"),
|
2011-09-16 12:05:58 +00:00
|
|
|
NULLSTR(dev_name));
|
2010-07-23 15:00:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
domain_conf: split source data out from ChrDef
This opens up the possibility of reusing the smaller ChrSourceDef
for both qemu monitor and a passthrough smartcard device.
* src/conf/domain_conf.h (_virDomainChrDef): Factor host
details...
(_virDomainChrSourceDef): ...into new struct.
(virDomainChrSourceDefFree): New prototype.
* src/conf/domain_conf.c (virDomainChrDefFree)
(virDomainChrDefParseXML, virDomainChrDefFormat): Split...
(virDomainChrSourceDefClear, virDomainChrSourceDefFree)
(virDomainChrSourceDefParseXML, virDomainChrSourceDefFormat):
...into new functions.
(virDomainChrDefParseTargetXML): Update clients to reflect type
split.
* src/vmx/vmx.c (virVMXParseSerial, virVMXParseParallel)
(virVMXFormatSerial, virVMXFormatParallel): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainOpenConsole): Likewise.
* src/xen/xend_internal.c (xenDaemonParseSxprChar)
(xenDaemonFormatSxprChr): Likewise.
* src/vbox/vbox_tmpl.c (vboxDomainDumpXML, vboxAttachSerial)
(vboxAttachParallel): Likewise.
* src/security/security_dac.c (virSecurityDACSetChardevLabel)
(virSecurityDACSetChardevCallback)
(virSecurityDACRestoreChardevLabel)
(virSecurityDACRestoreChardevCallback): Likewise.
* src/security/security_selinux.c (SELinuxSetSecurityChardevLabel)
(SELinuxSetSecurityChardevCallback)
(SELinuxRestoreSecurityChardevLabel)
(SELinuxSetSecurityChardevCallback): Likewise.
* src/security/virt-aa-helper.c (get_files): Likewise.
* src/lxc/lxc_driver.c (lxcVmStart, lxcDomainOpenConsole):
Likewise.
* src/uml/uml_conf.c (umlBuildCommandLineChr): Likewise.
* src/uml/uml_driver.c (umlIdentifyOneChrPTY, umlIdentifyChrPTY)
(umlDomainOpenConsole): Likewise.
* src/qemu/qemu_command.c (qemuBuildChrChardevStr)
(qemuBuildChrArgStr, qemuBuildCommandLine)
(qemuParseCommandLineChr): Likewise.
* src/qemu/qemu_domain.c (qemuDomainObjPrivateXMLFormat)
(qemuDomainObjPrivateXMLParse): Likewise.
* src/qemu/qemu_cgroup.c (qemuSetupChardevCgroup): Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainAttachNetDevice): Likewise.
* src/qemu/qemu_driver.c (qemudFindCharDevicePTYsMonitor)
(qemudFindCharDevicePTYs, qemuPrepareChardevDevice)
(qemuPrepareMonitorChr, qemudShutdownVMDaemon)
(qemuDomainOpenConsole): Likewise.
* src/qemu/qemu_command.h (qemuBuildChrChardevStr)
(qemuBuildChrArgStr): Delete, now that they are static.
* src/libvirt_private.syms (domain_conf.h): New exports.
* cfg.mk (useless_free_options): Update list.
* tests/qemuxml2argvtest.c (testCompareXMLToArgvFiles): Update
tests.
2011-01-07 22:45:01 +00:00
|
|
|
if (chr->source.type != VIR_DOMAIN_CHR_TYPE_PTY) {
|
2010-07-23 15:00:07 +00:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("character device %s is not using a PTY"),
|
2011-09-16 12:05:58 +00:00
|
|
|
NULLSTR(dev_name));
|
2010-07-23 15:00:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-04-05 10:27:35 +00:00
|
|
|
if (virFDStreamOpenFile(st, chr->source.data.file.path,
|
2011-08-02 17:19:53 +00:00
|
|
|
0, 0, O_RDWR) < 0)
|
2010-07-23 15:00:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-07-22 05:39:37 +00:00
|
|
|
static const char *
|
|
|
|
qemuDiskPathToAlias(virDomainObjPtr vm, const char *path) {
|
|
|
|
int i;
|
|
|
|
char *ret = NULL;
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
virDomainDiskDefPtr disk;
|
2011-07-22 05:39:37 +00:00
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
i = virDomainDiskIndexByName(vm->def, path, true);
|
|
|
|
if (i < 0)
|
|
|
|
goto cleanup;
|
2011-07-22 05:39:37 +00:00
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
disk = vm->def->disks[i];
|
2011-07-22 05:39:37 +00:00
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
if (disk->type != VIR_DOMAIN_DISK_TYPE_BLOCK &&
|
|
|
|
disk->type != VIR_DOMAIN_DISK_TYPE_FILE)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (disk->src) {
|
|
|
|
if (virAsprintf(&ret, "drive-%s", disk->info.alias) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return NULL;
|
2011-07-22 05:39:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
snapshot: also support disks by path
I got confused when 'virsh domblkinfo dom disk' required the
path to a disk (which can be ambiguous, since a single file
can back multiple disks), rather than the unambiguous target
device name that I was using in disk snapshots. So, in true
developer fashion, I went for the best of both worlds - all
interfaces that operate on a disk (aka block) now accept
either the target name or the unambiguous path to the backing
file used by the disk.
* src/conf/domain_conf.h (virDomainDiskIndexByName): Add
parameter.
(virDomainDiskPathByName): New prototype.
* src/libvirt_private.syms (domain_conf.h): Export it.
* src/conf/domain_conf.c (virDomainDiskIndexByName): Also allow
searching by path, and decide whether ambiguity is okay.
(virDomainDiskPathByName): New function.
(virDomainDiskRemoveByName, virDomainSnapshotAlignDisks): Update
callers.
* src/qemu/qemu_driver.c (qemudDomainBlockPeek)
(qemuDomainAttachDeviceConfig, qemuDomainUpdateDeviceConfig)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
* src/qemu/qemu_process.c (qemuProcessFindDomainDiskByPath):
Likewise.
* src/libxl/libxl_driver.c (libxlDomainAttachDeviceDiskLive)
(libxlDomainDetachDeviceDiskLive, libxlDomainAttachDeviceConfig)
(libxlDomainUpdateDeviceConfig): Likewise.
* src/uml/uml_driver.c (umlDomainBlockPeek): Likewise.
* src/xen/xend_internal.c (xenDaemonDomainBlockPeek): Likewise.
* docs/formatsnapshot.html.in: Update documentation.
* tools/virsh.pod (domblkstat, domblkinfo): Likewise.
* docs/schemas/domaincommon.rng (diskTarget): Tighten pattern on
disk targets.
* docs/schemas/domainsnapshot.rng (disksnapshot): Update to match.
* tests/domainsnapshotxml2xmlin/disk_snapshot.xml: Update test.
2011-08-20 02:38:36 +00:00
|
|
|
cleanup:
|
2011-07-22 05:39:37 +00:00
|
|
|
if (!ret) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("No device found for specified path"));
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainBlockJobImpl(virDomainPtr dom, const char *path,
|
|
|
|
unsigned long bandwidth, virDomainBlockJobInfoPtr info,
|
|
|
|
int mode)
|
|
|
|
{
|
|
|
|
struct qemud_driver *driver = dom->conn->privateData;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv;
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
const char *device = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virUUIDFormat(dom->uuid, uuidstr);
|
|
|
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
|
|
|
if (!vm) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("no domain with matching uuid '%s'"), uuidstr);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
device = qemuDiskPathToAlias(vm, path);
|
|
|
|
if (!device) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto cleanup;
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-07-22 05:39:37 +00:00
|
|
|
priv = vm->privateData;
|
|
|
|
ret = qemuMonitorBlockJob(priv->mon, device, bandwidth, info, mode);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (qemuDomainObjEndJob(driver, vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(device);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainBlockJobAbort(virDomainPtr dom, const char *path, unsigned int flags)
|
|
|
|
{
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
return qemuDomainBlockJobImpl(dom, path, 0, NULL, BLOCK_JOB_ABORT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainGetBlockJobInfo(virDomainPtr dom, const char *path,
|
|
|
|
virDomainBlockJobInfoPtr info, unsigned int flags)
|
|
|
|
{
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
return qemuDomainBlockJobImpl(dom, path, 0, info, BLOCK_JOB_INFO);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainBlockJobSetSpeed(virDomainPtr dom, const char *path,
|
|
|
|
unsigned long bandwidth, unsigned int flags)
|
|
|
|
{
|
|
|
|
virCheckFlags(0, -1);
|
|
|
|
return qemuDomainBlockJobImpl(dom, path, bandwidth, NULL, BLOCK_JOB_SPEED);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuDomainBlockPull(virDomainPtr dom, const char *path, unsigned long bandwidth,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2011-09-01 22:35:36 +00:00
|
|
|
int ret;
|
|
|
|
|
2011-07-22 05:39:37 +00:00
|
|
|
virCheckFlags(0, -1);
|
2011-09-01 22:35:36 +00:00
|
|
|
ret = qemuDomainBlockJobImpl(dom, path, bandwidth, NULL, BLOCK_JOB_PULL);
|
|
|
|
if (ret == 0 && bandwidth != 0)
|
|
|
|
ret = qemuDomainBlockJobImpl(dom, path, bandwidth, NULL,
|
|
|
|
BLOCK_JOB_SPEED);
|
|
|
|
return ret;
|
2011-07-22 05:39:37 +00:00
|
|
|
}
|
2010-07-23 15:00:07 +00:00
|
|
|
|
2007-06-26 22:39:53 +00:00
|
|
|
static virDriver qemuDriver = {
|
Convert all driver struct intializers to C99 style
Change all the driver struct initializers to use the
C99 style, leaving out unused fields. This will make
it possible to add new APIs without changing every
driver. eg change:
qemudDomainResume, /* domainResume */
qemudDomainShutdown, /* domainShutdown */
NULL, /* domainReboot */
qemudDomainDestroy, /* domainDestroy */
to
.domainResume = qemudDomainResume,
.domainShutdown = qemudDomainShutdown,
.domainDestroy = qemudDomainDestroy,
And get rid of any existing C99 style initializersr which
set NULL, eg change
.listPools = vboxStorageListPools,
.numOfDefinedPools = NULL,
.listDefinedPools = NULL,
.findPoolSources = NULL,
.poolLookupByName = vboxStoragePoolLookupByName,
to
.listPools = vboxStorageListPools,
.poolLookupByName = vboxStoragePoolLookupByName,
2011-05-13 10:16:31 +00:00
|
|
|
.no = VIR_DRV_QEMU,
|
|
|
|
.name = "QEMU",
|
2011-05-13 13:35:01 +00:00
|
|
|
.open = qemudOpen, /* 0.2.0 */
|
|
|
|
.close = qemudClose, /* 0.2.0 */
|
|
|
|
.supports_feature = qemudSupportsFeature, /* 0.5.0 */
|
|
|
|
.type = qemudGetType, /* 0.2.0 */
|
|
|
|
.version = qemudGetVersion, /* 0.2.0 */
|
|
|
|
.getHostname = virGetHostname, /* 0.3.3 */
|
|
|
|
.getSysinfo = qemuGetSysinfo, /* 0.8.8 */
|
|
|
|
.getMaxVcpus = qemudGetMaxVCPUs, /* 0.2.1 */
|
|
|
|
.nodeGetInfo = nodeGetInfo, /* 0.2.0 */
|
|
|
|
.getCapabilities = qemudGetCapabilities, /* 0.2.1 */
|
|
|
|
.listDomains = qemudListDomains, /* 0.2.0 */
|
|
|
|
.numOfDomains = qemudNumDomains, /* 0.2.0 */
|
|
|
|
.domainCreateXML = qemudDomainCreate, /* 0.2.0 */
|
|
|
|
.domainLookupByID = qemudDomainLookupByID, /* 0.2.0 */
|
|
|
|
.domainLookupByUUID = qemudDomainLookupByUUID, /* 0.2.0 */
|
|
|
|
.domainLookupByName = qemudDomainLookupByName, /* 0.2.0 */
|
|
|
|
.domainSuspend = qemudDomainSuspend, /* 0.2.0 */
|
|
|
|
.domainResume = qemudDomainResume, /* 0.2.0 */
|
2011-06-15 16:49:58 +00:00
|
|
|
.domainShutdown = qemuDomainShutdown, /* 0.2.0 */
|
|
|
|
.domainReboot = qemuDomainReboot, /* 0.9.3 */
|
2011-09-29 08:54:44 +00:00
|
|
|
.domainReset = qemuDomainReset, /* 0.9.7 */
|
2011-07-20 16:41:24 +00:00
|
|
|
.domainDestroy = qemuDomainDestroy, /* 0.2.0 */
|
|
|
|
.domainDestroyFlags = qemuDomainDestroyFlags, /* 0.9.4 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainGetOSType = qemudDomainGetOSType, /* 0.2.2 */
|
|
|
|
.domainGetMaxMemory = qemudDomainGetMaxMemory, /* 0.4.2 */
|
|
|
|
.domainSetMaxMemory = qemudDomainSetMaxMemory, /* 0.4.2 */
|
|
|
|
.domainSetMemory = qemudDomainSetMemory, /* 0.4.2 */
|
|
|
|
.domainSetMemoryFlags = qemudDomainSetMemoryFlags, /* 0.9.0 */
|
|
|
|
.domainSetMemoryParameters = qemuDomainSetMemoryParameters, /* 0.8.5 */
|
|
|
|
.domainGetMemoryParameters = qemuDomainGetMemoryParameters, /* 0.8.5 */
|
|
|
|
.domainSetBlkioParameters = qemuDomainSetBlkioParameters, /* 0.9.0 */
|
|
|
|
.domainGetBlkioParameters = qemuDomainGetBlkioParameters, /* 0.9.0 */
|
|
|
|
.domainGetInfo = qemudDomainGetInfo, /* 0.2.0 */
|
|
|
|
.domainGetState = qemuDomainGetState, /* 0.9.2 */
|
2011-05-31 16:34:20 +00:00
|
|
|
.domainGetControlInfo = qemuDomainGetControlInfo, /* 0.9.3 */
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
.domainSave = qemuDomainSave, /* 0.2.0 */
|
|
|
|
.domainSaveFlags = qemuDomainSaveFlags, /* 0.9.4 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainRestore = qemuDomainRestore, /* 0.2.0 */
|
save: wire up trivial save/restore flags implementations
For all hypervisors that support save and restore, the new API
now performs the same functions as the old.
VBox is excluded from this list, because its existing domainsave
is broken (there is no corresponding domainrestore, and there
is no control over the filename used in the save). A later
patch should change vbox to use its implementation for
managedsave, and teach start to use managedsave results.
* src/libxl/libxl_driver.c (libxlDomainSave): Move guts...
(libxlDomainSaveFlags): ...to new function.
(libxlDomainRestore): Move guts...
(libxlDomainRestoreFlags): ...to new function.
* src/test/test_driver.c (testDomainSave, testDomainSaveFlags)
(testDomainRestore, testDomainRestoreFlags): Likewise.
* src/xen/xen_driver.c (xenUnifiedDomainSave)
(xenUnifiedDomainSaveFlags, xenUnifiedDomainRestore)
(xenUnifiedDomainRestoreFlags): Likewise.
* src/qemu/qemu_driver.c (qemudDomainSave, qemudDomainRestore):
Rename and move guts.
(qemuDomainSave, qemuDomainSaveFlags, qemuDomainRestore)
(qemuDomainRestoreFlags): ...here.
(qemudDomainSaveFlag): Rename...
(qemuDomainSaveInternal): ...to this, and update callers.
2011-07-09 02:55:29 +00:00
|
|
|
.domainRestoreFlags = qemuDomainRestoreFlags, /* 0.9.4 */
|
2011-07-22 03:31:46 +00:00
|
|
|
.domainSaveImageGetXMLDesc = qemuDomainSaveImageGetXMLDesc, /* 0.9.4 */
|
|
|
|
.domainSaveImageDefineXML = qemuDomainSaveImageDefineXML, /* 0.9.4 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainCoreDump = qemudDomainCoreDump, /* 0.7.0 */
|
|
|
|
.domainScreenshot = qemuDomainScreenshot, /* 0.9.2 */
|
2011-07-15 07:01:19 +00:00
|
|
|
.domainSetVcpus = qemuDomainSetVcpus, /* 0.4.4 */
|
|
|
|
.domainSetVcpusFlags = qemuDomainSetVcpusFlags, /* 0.8.5 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainGetVcpusFlags = qemudDomainGetVcpusFlags, /* 0.8.5 */
|
|
|
|
.domainPinVcpu = qemudDomainPinVcpu, /* 0.4.4 */
|
2011-06-13 15:42:09 +00:00
|
|
|
.domainPinVcpuFlags = qemudDomainPinVcpuFlags, /* 0.9.3 */
|
2011-06-24 23:09:46 +00:00
|
|
|
.domainGetVcpuPinInfo = qemudDomainGetVcpuPinInfo, /* 0.9.3 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainGetVcpus = qemudDomainGetVcpus, /* 0.4.4 */
|
|
|
|
.domainGetMaxVcpus = qemudDomainGetMaxVcpus, /* 0.4.4 */
|
|
|
|
.domainGetSecurityLabel = qemudDomainGetSecurityLabel, /* 0.6.1 */
|
|
|
|
.nodeGetSecurityModel = qemudNodeGetSecurityModel, /* 0.6.1 */
|
|
|
|
.domainGetXMLDesc = qemuDomainGetXMLDesc, /* 0.2.0 */
|
|
|
|
.domainXMLFromNative = qemuDomainXMLFromNative, /* 0.6.4 */
|
|
|
|
.domainXMLToNative = qemuDomainXMLToNative, /* 0.6.4 */
|
|
|
|
.listDefinedDomains = qemudListDefinedDomains, /* 0.2.0 */
|
|
|
|
.numOfDefinedDomains = qemudNumDefinedDomains, /* 0.2.0 */
|
2011-07-11 18:07:50 +00:00
|
|
|
.domainCreate = qemuDomainStart, /* 0.2.0 */
|
|
|
|
.domainCreateWithFlags = qemuDomainStartWithFlags, /* 0.8.2 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainDefineXML = qemudDomainDefine, /* 0.2.0 */
|
|
|
|
.domainUndefine = qemudDomainUndefine, /* 0.2.0 */
|
2011-07-20 03:04:15 +00:00
|
|
|
.domainUndefineFlags = qemuDomainUndefineFlags, /* 0.9.4 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainAttachDevice = qemuDomainAttachDevice, /* 0.4.1 */
|
|
|
|
.domainAttachDeviceFlags = qemuDomainAttachDeviceFlags, /* 0.7.7 */
|
|
|
|
.domainDetachDevice = qemuDomainDetachDevice, /* 0.5.0 */
|
|
|
|
.domainDetachDeviceFlags = qemuDomainDetachDeviceFlags, /* 0.7.7 */
|
|
|
|
.domainUpdateDeviceFlags = qemuDomainUpdateDeviceFlags, /* 0.8.0 */
|
|
|
|
.domainGetAutostart = qemudDomainGetAutostart, /* 0.2.1 */
|
|
|
|
.domainSetAutostart = qemudDomainSetAutostart, /* 0.2.1 */
|
|
|
|
.domainGetSchedulerType = qemuGetSchedulerType, /* 0.7.0 */
|
|
|
|
.domainGetSchedulerParameters = qemuGetSchedulerParameters, /* 0.7.0 */
|
2011-05-17 21:35:26 +00:00
|
|
|
.domainGetSchedulerParametersFlags = qemuGetSchedulerParametersFlags, /* 0.9.2 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainSetSchedulerParameters = qemuSetSchedulerParameters, /* 0.7.0 */
|
2011-05-17 21:35:26 +00:00
|
|
|
.domainSetSchedulerParametersFlags = qemuSetSchedulerParametersFlags, /* 0.9.2 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainMigratePerform = qemudDomainMigratePerform, /* 0.5.0 */
|
|
|
|
.domainBlockStats = qemudDomainBlockStats, /* 0.4.1 */
|
2011-09-05 08:22:58 +00:00
|
|
|
.domainBlockStatsFlags = qemudDomainBlockStatsFlags, /* 0.9.5 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainInterfaceStats = qemudDomainInterfaceStats, /* 0.4.1 */
|
|
|
|
.domainMemoryStats = qemudDomainMemoryStats, /* 0.7.5 */
|
|
|
|
.domainBlockPeek = qemudDomainBlockPeek, /* 0.4.4 */
|
|
|
|
.domainMemoryPeek = qemudDomainMemoryPeek, /* 0.4.4 */
|
|
|
|
.domainGetBlockInfo = qemuDomainGetBlockInfo, /* 0.8.1 */
|
2011-06-07 01:02:55 +00:00
|
|
|
.nodeGetCPUStats = nodeGetCPUStats, /* 0.9.3 */
|
2011-06-07 01:11:17 +00:00
|
|
|
.nodeGetMemoryStats = nodeGetMemoryStats, /* 0.9.3 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.nodeGetCellsFreeMemory = nodeGetCellsFreeMemory, /* 0.4.4 */
|
|
|
|
.nodeGetFreeMemory = nodeGetFreeMemory, /* 0.4.4 */
|
|
|
|
.domainEventRegister = qemuDomainEventRegister, /* 0.5.0 */
|
|
|
|
.domainEventDeregister = qemuDomainEventDeregister, /* 0.5.0 */
|
|
|
|
.domainMigratePrepare2 = qemudDomainMigratePrepare2, /* 0.5.0 */
|
|
|
|
.domainMigrateFinish2 = qemudDomainMigrateFinish2, /* 0.5.0 */
|
|
|
|
.nodeDeviceDettach = qemudNodeDeviceDettach, /* 0.6.1 */
|
|
|
|
.nodeDeviceReAttach = qemudNodeDeviceReAttach, /* 0.6.1 */
|
|
|
|
.nodeDeviceReset = qemudNodeDeviceReset, /* 0.6.1 */
|
|
|
|
.domainMigratePrepareTunnel = qemudDomainMigratePrepareTunnel, /* 0.7.2 */
|
|
|
|
.isEncrypted = qemuIsEncrypted, /* 0.7.3 */
|
|
|
|
.isSecure = qemuIsSecure, /* 0.7.3 */
|
|
|
|
.domainIsActive = qemuDomainIsActive, /* 0.7.3 */
|
|
|
|
.domainIsPersistent = qemuDomainIsPersistent, /* 0.7.3 */
|
|
|
|
.domainIsUpdated = qemuDomainIsUpdated, /* 0.8.6 */
|
|
|
|
.cpuCompare = qemuCPUCompare, /* 0.7.5 */
|
|
|
|
.cpuBaseline = qemuCPUBaseline, /* 0.7.7 */
|
|
|
|
.domainGetJobInfo = qemuDomainGetJobInfo, /* 0.7.7 */
|
|
|
|
.domainAbortJob = qemuDomainAbortJob, /* 0.7.7 */
|
|
|
|
.domainMigrateSetMaxDowntime = qemuDomainMigrateSetMaxDowntime, /* 0.8.0 */
|
|
|
|
.domainMigrateSetMaxSpeed = qemuDomainMigrateSetMaxSpeed, /* 0.9.0 */
|
2011-08-26 18:10:23 +00:00
|
|
|
.domainMigrateGetMaxSpeed = qemuDomainMigrateGetMaxSpeed, /* 0.9.5 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainEventRegisterAny = qemuDomainEventRegisterAny, /* 0.8.0 */
|
|
|
|
.domainEventDeregisterAny = qemuDomainEventDeregisterAny, /* 0.8.0 */
|
|
|
|
.domainManagedSave = qemuDomainManagedSave, /* 0.8.0 */
|
|
|
|
.domainHasManagedSaveImage = qemuDomainHasManagedSaveImage, /* 0.8.0 */
|
|
|
|
.domainManagedSaveRemove = qemuDomainManagedSaveRemove, /* 0.8.0 */
|
|
|
|
.domainSnapshotCreateXML = qemuDomainSnapshotCreateXML, /* 0.8.0 */
|
|
|
|
.domainSnapshotGetXMLDesc = qemuDomainSnapshotGetXMLDesc, /* 0.8.0 */
|
|
|
|
.domainSnapshotNum = qemuDomainSnapshotNum, /* 0.8.0 */
|
|
|
|
.domainSnapshotListNames = qemuDomainSnapshotListNames, /* 0.8.0 */
|
|
|
|
.domainSnapshotLookupByName = qemuDomainSnapshotLookupByName, /* 0.8.0 */
|
|
|
|
.domainHasCurrentSnapshot = qemuDomainHasCurrentSnapshot, /* 0.8.0 */
|
2011-09-25 00:02:24 +00:00
|
|
|
.domainSnapshotGetParent = qemuDomainSnapshotGetParent, /* 0.9.7 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainSnapshotCurrent = qemuDomainSnapshotCurrent, /* 0.8.0 */
|
|
|
|
.domainRevertToSnapshot = qemuDomainRevertToSnapshot, /* 0.8.0 */
|
|
|
|
.domainSnapshotDelete = qemuDomainSnapshotDelete, /* 0.8.0 */
|
|
|
|
.qemuDomainMonitorCommand = qemuDomainMonitorCommand, /* 0.8.3 */
|
2011-05-05 16:32:21 +00:00
|
|
|
.qemuDomainAttach = qemuDomainAttach, /* 0.9.4 */
|
2011-05-13 13:35:01 +00:00
|
|
|
.domainOpenConsole = qemuDomainOpenConsole, /* 0.8.6 */
|
|
|
|
.domainInjectNMI = qemuDomainInjectNMI, /* 0.9.2 */
|
2011-02-03 11:09:28 +00:00
|
|
|
.domainMigrateBegin3 = qemuDomainMigrateBegin3, /* 0.9.2 */
|
|
|
|
.domainMigratePrepare3 = qemuDomainMigratePrepare3, /* 0.9.2 */
|
|
|
|
.domainMigratePrepareTunnel3 = qemuDomainMigratePrepareTunnel3, /* 0.9.2 */
|
|
|
|
.domainMigratePerform3 = qemuDomainMigratePerform3, /* 0.9.2 */
|
|
|
|
.domainMigrateFinish3 = qemuDomainMigrateFinish3, /* 0.9.2 */
|
|
|
|
.domainMigrateConfirm3 = qemuDomainMigrateConfirm3, /* 0.9.2 */
|
2011-07-21 07:55:56 +00:00
|
|
|
.domainSendKey = qemuDomainSendKey, /* 0.9.4 */
|
2011-07-22 05:39:37 +00:00
|
|
|
.domainBlockJobAbort = qemuDomainBlockJobAbort, /* 0.9.4 */
|
|
|
|
.domainGetBlockJobInfo = qemuDomainGetBlockJobInfo, /* 0.9.4 */
|
|
|
|
.domainBlockJobSetSpeed = qemuDomainBlockJobSetSpeed, /* 0.9.4 */
|
|
|
|
.domainBlockPull = qemuDomainBlockPull, /* 0.9.4 */
|
2007-06-26 22:39:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2007-06-26 22:56:14 +00:00
|
|
|
static virStateDriver qemuStateDriver = {
|
Fix return value in virStateInitialize impl for LXC
The LXC driver was mistakenly returning -1 for lxcStartup()
in scenarios that are not an error. This caused the libvirtd
to quit for unprivileged users. This fixes the return code
of LXC driver, and also adds a "name" field to the virStateDriver
struct and logging to make it easier to find these problems
in the future
* src/driver.h: Add a 'name' field to state driver to allow
easy identification during failures
* src/libvirt.c: Log name of failed driver for virStateInit
failures
* src/lxc/lxc_driver.c: Don't return a failure code for
lxcStartup() if LXC is not available on this host, simply
disable the driver.
* src/network/bridge_driver.c, src/node_device/node_device_devkit.c,
src/node_device/node_device_hal.c, src/opennebula/one_driver.c,
src/qemu/qemu_driver.c, src/remote/remote_driver.c,
src/secret/secret_driver.c, src/storage/storage_driver.c,
src/uml/uml_driver.c, src/xen/xen_driver.c: Fill in name
field in virStateDriver struct
2009-11-02 23:18:19 +00:00
|
|
|
.name = "QEMU",
|
2008-10-06 15:40:37 +00:00
|
|
|
.initialize = qemudStartup,
|
|
|
|
.cleanup = qemudShutdown,
|
|
|
|
.reload = qemudReload,
|
|
|
|
.active = qemudActive,
|
2007-06-26 22:56:14 +00:00
|
|
|
};
|
2007-06-26 22:39:53 +00:00
|
|
|
|
nwfilter: resolve deadlock between VM ops and filter update
This is from a bug report and conversation on IRC where Soren reported that while a filter update is occurring on one or more VMs (due to a rule having been edited for example), a deadlock can occur when a VM referencing a filter is started.
The problem is caused by the two locking sequences of
qemu driver, qemu domain, filter # for the VM start operation
filter, qemu_driver, qemu_domain # for the filter update operation
that obviously don't lock in the same order. The problem is the 2nd lock sequence. Here the qemu_driver lock is being grabbed in qemu_driver:qemudVMFilterRebuild()
The following solution is based on the idea of trying to re-arrange the 2nd sequence of locks as follows:
qemu_driver, filter, qemu_driver, qemu_domain
and making the qemu driver recursively lockable so that a second lock can occur, this would then lead to the following net-locking sequence
qemu_driver, filter, qemu_domain
where the 2nd qemu_driver lock has been ( logically ) eliminated.
The 2nd part of the idea is that the sequence of locks (filter, qemu_domain) and (qemu_domain, filter) becomes interchangeable if all code paths where filter AND qemu_domain are locked have a preceding qemu_domain lock that basically blocks their concurrent execution
So, the following code paths exist towards qemu_driver:qemudVMFilterRebuild where we now want to put a qemu_driver lock in front of the filter lock.
-> nwfilterUndefine() [ locks the filter ]
-> virNWFilterTestUnassignDef()
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDefine()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDriverReload()
-> virNWFilterPoolLoadAllConfigs()
->virNWFilterPoolObjLoad()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDriverStartup()
-> virNWFilterPoolLoadAllConfigs()
->virNWFilterPoolObjLoad()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
Qemu is not the only driver using the nwfilter driver, but also the UML driver calls into it. Therefore qemuVMFilterRebuild() can be exchanged with umlVMFilterRebuild() along with the driver lock of qemu_driver that can now be a uml_driver. Further, since UML and Qemu domains can be running on the same machine, the triggering of a rebuild of the filter can touch both types of drivers and their domains.
In the patch below I am now extending each nwfilter callback driver with functions for locking and unlocking the (VM) driver (UML, QEMU) and introduce new functions for locking all registered callback drivers and unlocking them. Then I am distributing the lock-all-cbdrivers/unlock-all-cbdrivers call into the above call paths. The last shown callpath starting with nwfilterDriverStart() is problematic since it is initialize before the Qemu and UML drives are and thus a lock in the path would result in a NULL pointer attempted to be locked -- the call to virNWFilterTriggerVMFilterRebuild() is never called, so we never lock either the qemu_driver or the uml_driver in that path. Therefore, only the first 3 paths now receive calls to lock and unlock all callback drivers. Now that the locks are distributed where it matters I can remove the qemu_driver and uml_driver lock from qemudVMFilterRebuild() and umlVMFilterRebuild() and not requiring the recursive locks.
For now I want to put this out as an RFC patch. I have tested it by 'stretching' the critical section after the define/undefine functions each lock the filter so I can (easily) concurrently execute another VM operation (suspend,start). That code is in this patch and if you want you can de-activate it. It seems to work ok and operations are being blocked while the update is being done.
I still also want to verify the other assumption above that locking filter and qemu_domain always has a preceding qemu_driver lock.
2010-10-13 14:33:26 +00:00
|
|
|
static void
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuVMDriverLock(void) {
|
nwfilter: resolve deadlock between VM ops and filter update
This is from a bug report and conversation on IRC where Soren reported that while a filter update is occurring on one or more VMs (due to a rule having been edited for example), a deadlock can occur when a VM referencing a filter is started.
The problem is caused by the two locking sequences of
qemu driver, qemu domain, filter # for the VM start operation
filter, qemu_driver, qemu_domain # for the filter update operation
that obviously don't lock in the same order. The problem is the 2nd lock sequence. Here the qemu_driver lock is being grabbed in qemu_driver:qemudVMFilterRebuild()
The following solution is based on the idea of trying to re-arrange the 2nd sequence of locks as follows:
qemu_driver, filter, qemu_driver, qemu_domain
and making the qemu driver recursively lockable so that a second lock can occur, this would then lead to the following net-locking sequence
qemu_driver, filter, qemu_domain
where the 2nd qemu_driver lock has been ( logically ) eliminated.
The 2nd part of the idea is that the sequence of locks (filter, qemu_domain) and (qemu_domain, filter) becomes interchangeable if all code paths where filter AND qemu_domain are locked have a preceding qemu_domain lock that basically blocks their concurrent execution
So, the following code paths exist towards qemu_driver:qemudVMFilterRebuild where we now want to put a qemu_driver lock in front of the filter lock.
-> nwfilterUndefine() [ locks the filter ]
-> virNWFilterTestUnassignDef()
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDefine()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDriverReload()
-> virNWFilterPoolLoadAllConfigs()
->virNWFilterPoolObjLoad()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDriverStartup()
-> virNWFilterPoolLoadAllConfigs()
->virNWFilterPoolObjLoad()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
Qemu is not the only driver using the nwfilter driver, but also the UML driver calls into it. Therefore qemuVMFilterRebuild() can be exchanged with umlVMFilterRebuild() along with the driver lock of qemu_driver that can now be a uml_driver. Further, since UML and Qemu domains can be running on the same machine, the triggering of a rebuild of the filter can touch both types of drivers and their domains.
In the patch below I am now extending each nwfilter callback driver with functions for locking and unlocking the (VM) driver (UML, QEMU) and introduce new functions for locking all registered callback drivers and unlocking them. Then I am distributing the lock-all-cbdrivers/unlock-all-cbdrivers call into the above call paths. The last shown callpath starting with nwfilterDriverStart() is problematic since it is initialize before the Qemu and UML drives are and thus a lock in the path would result in a NULL pointer attempted to be locked -- the call to virNWFilterTriggerVMFilterRebuild() is never called, so we never lock either the qemu_driver or the uml_driver in that path. Therefore, only the first 3 paths now receive calls to lock and unlock all callback drivers. Now that the locks are distributed where it matters I can remove the qemu_driver and uml_driver lock from qemudVMFilterRebuild() and umlVMFilterRebuild() and not requiring the recursive locks.
For now I want to put this out as an RFC patch. I have tested it by 'stretching' the critical section after the define/undefine functions each lock the filter so I can (easily) concurrently execute another VM operation (suspend,start). That code is in this patch and if you want you can de-activate it. It seems to work ok and operations are being blocked while the update is being done.
I still also want to verify the other assumption above that locking filter and qemu_domain always has a preceding qemu_driver lock.
2010-10-13 14:33:26 +00:00
|
|
|
qemuDriverLock(qemu_driver);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2011-02-14 16:09:39 +00:00
|
|
|
qemuVMDriverUnlock(void) {
|
nwfilter: resolve deadlock between VM ops and filter update
This is from a bug report and conversation on IRC where Soren reported that while a filter update is occurring on one or more VMs (due to a rule having been edited for example), a deadlock can occur when a VM referencing a filter is started.
The problem is caused by the two locking sequences of
qemu driver, qemu domain, filter # for the VM start operation
filter, qemu_driver, qemu_domain # for the filter update operation
that obviously don't lock in the same order. The problem is the 2nd lock sequence. Here the qemu_driver lock is being grabbed in qemu_driver:qemudVMFilterRebuild()
The following solution is based on the idea of trying to re-arrange the 2nd sequence of locks as follows:
qemu_driver, filter, qemu_driver, qemu_domain
and making the qemu driver recursively lockable so that a second lock can occur, this would then lead to the following net-locking sequence
qemu_driver, filter, qemu_domain
where the 2nd qemu_driver lock has been ( logically ) eliminated.
The 2nd part of the idea is that the sequence of locks (filter, qemu_domain) and (qemu_domain, filter) becomes interchangeable if all code paths where filter AND qemu_domain are locked have a preceding qemu_domain lock that basically blocks their concurrent execution
So, the following code paths exist towards qemu_driver:qemudVMFilterRebuild where we now want to put a qemu_driver lock in front of the filter lock.
-> nwfilterUndefine() [ locks the filter ]
-> virNWFilterTestUnassignDef()
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDefine()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDriverReload()
-> virNWFilterPoolLoadAllConfigs()
->virNWFilterPoolObjLoad()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
-> nwfilterDriverStartup()
-> virNWFilterPoolLoadAllConfigs()
->virNWFilterPoolObjLoad()
-> virNWFilterPoolAssignDef() [ locks the filter ]
-> virNWFilterTriggerVMFilterRebuild()
-> qemudVMFilterRebuild()
Qemu is not the only driver using the nwfilter driver, but also the UML driver calls into it. Therefore qemuVMFilterRebuild() can be exchanged with umlVMFilterRebuild() along with the driver lock of qemu_driver that can now be a uml_driver. Further, since UML and Qemu domains can be running on the same machine, the triggering of a rebuild of the filter can touch both types of drivers and their domains.
In the patch below I am now extending each nwfilter callback driver with functions for locking and unlocking the (VM) driver (UML, QEMU) and introduce new functions for locking all registered callback drivers and unlocking them. Then I am distributing the lock-all-cbdrivers/unlock-all-cbdrivers call into the above call paths. The last shown callpath starting with nwfilterDriverStart() is problematic since it is initialize before the Qemu and UML drives are and thus a lock in the path would result in a NULL pointer attempted to be locked -- the call to virNWFilterTriggerVMFilterRebuild() is never called, so we never lock either the qemu_driver or the uml_driver in that path. Therefore, only the first 3 paths now receive calls to lock and unlock all callback drivers. Now that the locks are distributed where it matters I can remove the qemu_driver and uml_driver lock from qemudVMFilterRebuild() and umlVMFilterRebuild() and not requiring the recursive locks.
For now I want to put this out as an RFC patch. I have tested it by 'stretching' the critical section after the define/undefine functions each lock the filter so I can (easily) concurrently execute another VM operation (suspend,start). That code is in this patch and if you want you can de-activate it. It seems to work ok and operations are being blocked while the update is being done.
I still also want to verify the other assumption above that locking filter and qemu_domain always has a preceding qemu_driver lock.
2010-10-13 14:33:26 +00:00
|
|
|
qemuDriverUnlock(qemu_driver);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2011-02-14 16:09:39 +00:00
|
|
|
static int
|
|
|
|
qemuVMFilterRebuild(virConnectPtr conn ATTRIBUTE_UNUSED,
|
|
|
|
virHashIterator iter, void *data)
|
|
|
|
{
|
|
|
|
virHashForEach(qemu_driver->domains.objs, iter, data);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-25 17:46:08 +00:00
|
|
|
static virNWFilterCallbackDriver qemuCallbackDriver = {
|
|
|
|
.name = "QEMU",
|
2011-02-14 16:09:39 +00:00
|
|
|
.vmFilterRebuild = qemuVMFilterRebuild,
|
|
|
|
.vmDriverLock = qemuVMDriverLock,
|
|
|
|
.vmDriverUnlock = qemuVMDriverUnlock,
|
2010-03-25 17:46:08 +00:00
|
|
|
};
|
|
|
|
|
2008-11-21 12:16:08 +00:00
|
|
|
int qemuRegister(void) {
|
2007-06-26 23:48:46 +00:00
|
|
|
virRegisterDriver(&qemuDriver);
|
|
|
|
virRegisterStateDriver(&qemuStateDriver);
|
2010-03-25 17:46:08 +00:00
|
|
|
virNWFilterRegisterCallbackDriver(&qemuCallbackDriver);
|
2007-06-26 23:48:46 +00:00
|
|
|
return 0;
|
|
|
|
}
|