libvirt/src/qemu/qemu_domain.h

549 lines
22 KiB
C
Raw Normal View History

/*
* qemu_domain.h: QEMU domain private state
*
* Copyright (C) 2006-2016 Red Hat, Inc.
* Copyright (C) 2006 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
* Author: Daniel P. Berrange <berrange@redhat.com>
*/
#ifndef __QEMU_DOMAIN_H__
# define __QEMU_DOMAIN_H__
# include "virthread.h"
# include "vircgroup.h"
# include "domain_addr.h"
# include "domain_conf.h"
# include "snapshot_conf.h"
# include "qemu_monitor.h"
# include "qemu_agent.h"
# include "qemu_conf.h"
# include "qemu_capabilities.h"
# include "virchrdev.h"
# include "virobject.h"
# include "logging/log_manager.h"
# define QEMU_DOMAIN_FORMAT_LIVE_FLAGS \
(VIR_DOMAIN_XML_SECURE | \
VIR_DOMAIN_XML_UPDATE_CPU)
# if ULONG_MAX == 4294967295
/* Qemu has a 64-bit limit, but we are limited by our historical choice of
* representing bandwidth in a long instead of a 64-bit int. */
# define QEMU_DOMAIN_MIG_BANDWIDTH_MAX ULONG_MAX
# else
# define QEMU_DOMAIN_MIG_BANDWIDTH_MAX (INT64_MAX / (1024 * 1024))
# endif
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
# define JOB_MASK(job) (1 << (job - 1))
# define QEMU_JOB_DEFAULT_MASK \
(JOB_MASK(QEMU_JOB_QUERY) | \
JOB_MASK(QEMU_JOB_DESTROY) | \
JOB_MASK(QEMU_JOB_ABORT))
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* Jobs which have to be tracked in domain state XML. */
# define QEMU_DOMAIN_TRACK_JOBS \
(JOB_MASK(QEMU_JOB_DESTROY) | \
JOB_MASK(QEMU_JOB_ASYNC))
/* Only 1 job is allowed at any time
* A job includes *all* monitor commands, even those just querying
* information, not merely actions */
typedef enum {
QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
QEMU_JOB_QUERY, /* Doesn't change any state */
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
QEMU_JOB_ABORT, /* Abort current async job */
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* The following two items must always be the last items before JOB_LAST */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
QEMU_JOB_ASYNC, /* Asynchronous job */
QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
QEMU_JOB_LAST
} qemuDomainJob;
VIR_ENUM_DECL(qemuDomainJob)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* Async job consists of a series of jobs that may change state. Independent
* jobs that do not change state (and possibly others if explicitly allowed by
* current async job) are allowed to be run even if async job is active.
*/
typedef enum {
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
QEMU_ASYNC_JOB_NONE = 0,
QEMU_ASYNC_JOB_MIGRATION_OUT,
QEMU_ASYNC_JOB_MIGRATION_IN,
QEMU_ASYNC_JOB_SAVE,
QEMU_ASYNC_JOB_DUMP,
QEMU_ASYNC_JOB_SNAPSHOT,
QEMU_ASYNC_JOB_START,
QEMU_ASYNC_JOB_LAST
} qemuDomainAsyncJob;
VIR_ENUM_DECL(qemuDomainAsyncJob)
typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
typedef qemuDomainJobInfo *qemuDomainJobInfoPtr;
struct _qemuDomainJobInfo {
virDomainJobType type;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
destination (only for migrations). */
unsigned long long received; /* When the destination host received status
info from the source (migrations only). */
/* Computed values */
unsigned long long timeElapsed;
unsigned long long timeRemaining;
long long timeDelta; /* delta = received - sent, i.e., the difference
between the source and the destination time plus
the time between the end of Perform phase on the
source and the beginning of Finish phase on the
destination. */
bool timeDeltaSet;
/* Raw values from QEMU */
qemuMonitorMigrationStats stats;
};
struct qemuDomainJobObj {
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virCond cond; /* Use to coordinate jobs */
qemuDomainJob active; /* Currently running job */
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
unsigned long long owner; /* Thread id which set current job */
const char *ownerAPI; /* The API which owns the job */
unsigned long long started; /* When the current job started */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virCond asyncCond; /* Use to coordinate with async jobs */
qemuDomainAsyncJob asyncJob; /* Currently active async job */
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
unsigned long long asyncOwner; /* Thread which set current async job */
const char *asyncOwnerAPI; /* The API which owns the async job */
unsigned long long asyncStarted; /* When the current async job started */
int phase; /* Job phase (mainly for migrations) */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
unsigned long long mask; /* Jobs allowed during async job */
bool dump_memory_only; /* use dump-guest-memory to do dump */
qemuDomainJobInfoPtr current; /* async job progress data */
qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */
bool abortJob; /* abort of the job requested */
bool spiceMigration; /* we asked for spice migration and we
* should wait for it to finish */
bool spiceMigrated; /* spice migration completed */
};
typedef void (*qemuDomainCleanupCallback)(virQEMUDriverPtr driver,
virDomainObjPtr vm);
typedef struct _qemuDomainObjPrivate qemuDomainObjPrivate;
typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr;
struct _qemuDomainObjPrivate {
struct qemuDomainJobObj job;
qemuMonitorPtr mon;
virDomainChrSourceDefPtr monConfig;
bool monJSON;
bool monError;
unsigned long long monStart;
qemuAgentPtr agent;
bool agentError;
unsigned long long agentStart;
bool gotShutdown;
bool beingDestroyed;
char *pidfile;
int nvcpupids;
int *vcpupids;
virDomainPCIAddressSetPtr pciaddrs;
virDomainCCWAddressSetPtr ccwaddrs;
virDomainVirtioSerialAddrSetPtr vioserialaddrs;
int persistentAddrs;
virQEMUCapsPtr qemuCaps;
char *lockState;
bool fakeReboot;
int jobs_queued;
unsigned long migMaxBandwidth;
char *origname;
int nbdPort; /* Port used for migration with NBD */
unsigned short migrationPort;
int preMigrationState;
virChrdevsPtr devs;
qemuDomainCleanupCallback *cleanupCallbacks;
size_t ncleanupCallbacks;
size_t ncleanupCallbacks_max;
virCgroupPtr cgroup;
virCond unplugFinished; /* signals that unpluggingDevice was unplugged */
const char *unpluggingDevice; /* alias of the device that is being unplugged */
char **qemuDevices; /* NULL-terminated list of devices aliases known to QEMU */
bool hookRun; /* true if there was a hook run over this domain */
/* Bitmaps below hold data from the auto NUMA feature */
virBitmapPtr autoNodeset;
virBitmapPtr autoCpuset;
bool signalIOError; /* true if the domain condition should be signalled on
I/O error */
bool signalStop; /* true if the domain condition should be signalled on
QMP STOP event */
systemd: Modernize machine naming So, systemd-machined has this philosophy that machine names are like hostnames and hence should follow the same rules. But we always allowed international characters in domain names. Thus we need to modify the machine name we are passing to systemd. In order to change some machine names that we will be passing to systemd, we also need to call TerminateMachine at the end of a lifetime of a domain. Even for domains that were started with older libvirt. That can be achieved thanks to virSystemdGetMachineNameByPID(). And because we can change machine names, we can get rid of the inconsistent and pointless escaping of domain names when creating machine names. So this patch modifies the naming in the following way. It creates the name as <drivername>-<id>-<name> where invalid hostname characters are stripped out of the name and if the resulting name is longer, it truncates it to 64 characters. That way we can start domains we couldn't start before. Well, at least on systemd. To make it work all together, the machineName (which is needed only with systemd) is saved in domain's private data. That way the generation is moved to the driver and we don't need to pass various unnecessary arguments to cgroup functions. The only thing this complicates a bit is the scope generation when validating a cgroup where we must check both old and new naming, so a slight modification was needed there. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1282846 Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2016-02-01 15:50:54 +00:00
char *machineName;
char *libDir; /* base path for per-domain files */
char *channelTargetDir; /* base path for per-domain channel targets */
};
# define QEMU_DOMAIN_DISK_PRIVATE(disk) \
((qemuDomainDiskPrivatePtr) (disk)->privateData)
typedef struct _qemuDomainDiskPrivate qemuDomainDiskPrivate;
typedef qemuDomainDiskPrivate *qemuDomainDiskPrivatePtr;
struct _qemuDomainDiskPrivate {
virObject parent;
/* ideally we want a smarter way to interlock block jobs on single qemu disk
* in the future, but for now we just disallow any concurrent job on a
* single disk */
bool blockjob;
/* for some synchronous block jobs, we need to notify the owner */
int blockJobType; /* type of the block job from the event */
int blockJobStatus; /* status of the finished block job */
bool blockJobSync; /* the block job needs synchronized termination */
bool migrating; /* the disk is being migrated */
};
typedef enum {
QEMU_PROCESS_EVENT_WATCHDOG = 0,
QEMU_PROCESS_EVENT_GUESTPANIC,
QEMU_PROCESS_EVENT_DEVICE_DELETED,
QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED,
QEMU_PROCESS_EVENT_SERIAL_CHANGED,
QEMU_PROCESS_EVENT_BLOCK_JOB,
QEMU_PROCESS_EVENT_MONITOR_EOF,
QEMU_PROCESS_EVENT_LAST
} qemuProcessEventType;
struct qemuProcessEvent {
virDomainObjPtr vm;
qemuProcessEventType eventType;
int action;
int status;
void *data;
};
typedef struct _qemuDomainLogContext qemuDomainLogContext;
typedef qemuDomainLogContext *qemuDomainLogContextPtr;
const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase);
int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
const char *phase);
void qemuDomainEventFlush(int timer, void *opaque);
void qemuDomainEventQueue(virQEMUDriverPtr driver,
virObjectEventPtr event);
void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
virDomainObjPtr vm);
int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainJob job)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
ATTRIBUTE_RETURN_CHECK;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
void qemuDomainObjEndJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
virDomainObjPtr obj,
int phase);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
unsigned long long allowedJobs);
void qemuDomainObjRestoreJob(virDomainObjPtr obj,
struct qemuDomainJobObj *job);
void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj);
qemuMonitorPtr qemuDomainGetMonitor(virDomainObjPtr vm)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainObjExitMonitor(virQEMUDriverPtr driver,
virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
qemuAgentPtr qemuDomainGetAgent(virDomainObjPtr vm);
void qemuDomainObjEnterAgent(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjExitAgent(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjEnterRemote(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjExitRemote(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
virDomainDefPtr qemuDomainDefCopy(virQEMUDriverPtr driver,
virDomainDefPtr src,
unsigned int flags);
int qemuDomainDefFormatBuf(virQEMUDriverPtr driver,
virDomainDefPtr vm,
unsigned int flags,
virBuffer *buf);
char *qemuDomainDefFormatXML(virQEMUDriverPtr driver,
virDomainDefPtr vm,
unsigned int flags);
char *qemuDomainFormatXML(virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags);
char *qemuDomainDefFormatLive(virQEMUDriverPtr driver,
virDomainDefPtr def,
bool inactive,
bool compatible);
void qemuDomainObjTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainTaintFlags taint,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckDiskTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainDiskDefPtr disk,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckHostdevTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainHostdevDefPtr disk,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckNetTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainNetDefPtr net,
qemuDomainLogContextPtr logCtxt);
typedef enum {
QEMU_DOMAIN_LOG_CONTEXT_MODE_START,
QEMU_DOMAIN_LOG_CONTEXT_MODE_ATTACH,
QEMU_DOMAIN_LOG_CONTEXT_MODE_STOP,
} qemuDomainLogContextMode;
qemuDomainLogContextPtr qemuDomainLogContextNew(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainLogContextMode mode);
int qemuDomainLogContextWrite(qemuDomainLogContextPtr ctxt,
const char *fmt, ...) ATTRIBUTE_FMT_PRINTF(2, 3);
ssize_t qemuDomainLogContextRead(qemuDomainLogContextPtr ctxt,
char **msg);
int qemuDomainLogContextGetWriteFD(qemuDomainLogContextPtr ctxt);
void qemuDomainLogContextMarkPosition(qemuDomainLogContextPtr ctxt);
void qemuDomainLogContextRef(qemuDomainLogContextPtr ctxt);
void qemuDomainLogContextFree(qemuDomainLogContextPtr ctxt);
virLogManagerPtr qemuDomainLogContextGetManager(qemuDomainLogContextPtr ctxt);
const char *qemuFindQemuImgBinary(virQEMUDriverPtr driver);
int qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
virDomainSnapshotObjPtr snapshot,
virCapsPtr caps,
char *snapshotDir);
int qemuDomainSnapshotForEachQcow2(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
const char *op,
bool try_all);
int qemuDomainSnapshotDiscard(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
bool update_current,
bool metadata_only);
typedef struct _virQEMUSnapRemove virQEMUSnapRemove;
typedef virQEMUSnapRemove *virQEMUSnapRemovePtr;
struct _virQEMUSnapRemove {
virQEMUDriverPtr driver;
virDomainObjPtr vm;
int err;
bool metadata_only;
bool current;
};
int qemuDomainSnapshotDiscardAll(void *payload,
const void *name,
void *data);
int qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainRemoveInactive(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool value);
bool qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv,
qemuDomainJob job);
int qemuDomainCheckDiskPresence(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool start_with_state);
int qemuDomainDetermineDiskChain(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainDiskDefPtr disk,
bool force_probe,
bool report_broken);
bool qemuDomainDiskSourceDiffers(virConnectPtr conn,
virDomainDiskDefPtr disk,
virDomainDiskDefPtr origDisk);
bool qemuDomainDiskChangeSupported(virDomainDiskDefPtr disk,
virDomainDiskDefPtr orig_disk);
int qemuDomainStorageFileInit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr src);
char *qemuDomainStorageAlias(const char *device, int depth);
int qemuDomainCleanupAdd(virDomainObjPtr vm,
qemuDomainCleanupCallback cb);
void qemuDomainCleanupRemove(virDomainObjPtr vm,
qemuDomainCleanupCallback cb);
void qemuDomainCleanupRun(virQEMUDriverPtr driver,
virDomainObjPtr vm);
extern virDomainXMLPrivateDataCallbacks virQEMUDriverPrivateDataCallbacks;
extern virDomainXMLNamespace virQEMUDriverDomainXMLNamespace;
extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig;
int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
virDomainObjPtr vm, int asyncJob);
int qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob);
bool qemuDomainDefCheckABIStability(virQEMUDriverPtr driver,
virDomainDefPtr src,
virDomainDefPtr dst);
bool qemuDomainAgentAvailable(virDomainObjPtr vm,
bool reportError);
int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
virDomainJobInfoPtr info)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
int *type,
virTypedParameterPtr *params,
int *nparams)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
int qemuDomainSupportsBlockJobs(virDomainObjPtr vm, bool *modern)
ATTRIBUTE_NONNULL(1);
bool qemuDomainDiskBlockJobIsActive(virDomainDiskDefPtr disk);
bool qemuDomainHasBlockjob(virDomainObjPtr vm, bool copy_only)
ATTRIBUTE_NONNULL(1);
int qemuDomainAlignMemorySizes(virDomainDefPtr def);
void qemuDomainMemoryDeviceAlignSize(virDomainDefPtr def,
virDomainMemoryDefPtr mem);
virDomainChrDefPtr qemuFindAgentConfig(virDomainDefPtr def);
bool qemuDomainMachineIsQ35(const virDomainDef *def);
bool qemuDomainMachineIsI440FX(const virDomainDef *def);
bool qemuDomainMachineNeedsFDC(const virDomainDef *def);
bool qemuDomainMachineIsS390CCW(const virDomainDef *def);
bool qemuDomainMachineHasBuiltinIDE(const virDomainDef *def);
int qemuDomainUpdateCurrentMemorySize(virQEMUDriverPtr driver,
virDomainObjPtr vm);
unsigned long long qemuDomainGetMemLockLimitBytes(virDomainDefPtr def);
bool qemuDomainRequiresMemLock(virDomainDefPtr def);
int qemuDomainAdjustMaxMemLock(virDomainObjPtr vm);
int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
virQEMUCapsPtr qemuCaps,
const virDomainMemoryDef *mem);
bool qemuDomainHasVcpuPids(virDomainObjPtr vm);
pid_t qemuDomainGetVcpuPid(virDomainObjPtr vm, unsigned int vcpu);
int qemuDomainDetectVcpuPids(virQEMUDriverPtr driver, virDomainObjPtr vm,
int asyncJob);
bool qemuDomainSupportsNicdev(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps,
virDomainNetDefPtr net);
bool qemuDomainSupportsNetdev(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps,
virDomainNetDefPtr net);
int qemuDomainNetVLAN(virDomainNetDefPtr def);
int qemuDomainSetPrivatePaths(char **domainLibDir,
char **domainChannelTargetDir,
const char *confLibDir,
const char *confChannelDir,
const char *domainName,
int domainId);
#endif /* __QEMU_DOMAIN_H__ */