libvirt/src/qemu/qemu_domain.h

1026 lines
38 KiB
C
Raw Normal View History

/*
* qemu_domain.h: QEMU domain private state
*
* Copyright (C) 2006-2016 Red Hat, Inc.
* Copyright (C) 2006 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
* Author: Daniel P. Berrange <berrange@redhat.com>
*/
#ifndef __QEMU_DOMAIN_H__
# define __QEMU_DOMAIN_H__
# include "virthread.h"
# include "vircgroup.h"
# include "virperf.h"
# include "domain_addr.h"
# include "domain_conf.h"
# include "snapshot_conf.h"
# include "qemu_monitor.h"
# include "qemu_agent.h"
# include "qemu_conf.h"
# include "qemu_capabilities.h"
# include "qemu_migration_params.h"
# include "virmdev.h"
# include "virchrdev.h"
# include "virobject.h"
# include "logging/log_manager.h"
# define QEMU_DOMAIN_FORMAT_LIVE_FLAGS \
(VIR_DOMAIN_XML_SECURE)
# if ULONG_MAX == 4294967295
/* QEMU has a 64-bit limit, but we are limited by our historical choice of
* representing bandwidth in a long instead of a 64-bit int. */
# define QEMU_DOMAIN_MIG_BANDWIDTH_MAX ULONG_MAX
# else
# define QEMU_DOMAIN_MIG_BANDWIDTH_MAX (INT64_MAX / (1024 * 1024))
# endif
# define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
# define QEMU_JOB_DEFAULT_MASK \
(JOB_MASK(QEMU_JOB_QUERY) | \
JOB_MASK(QEMU_JOB_DESTROY) | \
JOB_MASK(QEMU_JOB_ABORT))
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* Jobs which have to be tracked in domain state XML. */
# define QEMU_DOMAIN_TRACK_JOBS \
(JOB_MASK(QEMU_JOB_DESTROY) | \
JOB_MASK(QEMU_JOB_ASYNC))
/* Only 1 job is allowed at any time
* A job includes *all* monitor commands, even those just querying
* information, not merely actions */
typedef enum {
QEMU_JOB_NONE = 0, /* Always set to 0 for easy if (jobActive) conditions */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
QEMU_JOB_QUERY, /* Doesn't change any state */
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */
QEMU_JOB_ABORT, /* Abort current async job */
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* The following two items must always be the last items before JOB_LAST */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
QEMU_JOB_ASYNC, /* Asynchronous job */
QEMU_JOB_ASYNC_NESTED, /* Normal job within an async job */
QEMU_JOB_LAST
} qemuDomainJob;
VIR_ENUM_DECL(qemuDomainJob)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* Async job consists of a series of jobs that may change state. Independent
* jobs that do not change state (and possibly others if explicitly allowed by
* current async job) are allowed to be run even if async job is active.
*/
typedef enum {
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
QEMU_ASYNC_JOB_NONE = 0,
QEMU_ASYNC_JOB_MIGRATION_OUT,
QEMU_ASYNC_JOB_MIGRATION_IN,
QEMU_ASYNC_JOB_SAVE,
QEMU_ASYNC_JOB_DUMP,
QEMU_ASYNC_JOB_SNAPSHOT,
QEMU_ASYNC_JOB_START,
QEMU_ASYNC_JOB_LAST
} qemuDomainAsyncJob;
VIR_ENUM_DECL(qemuDomainAsyncJob)
typedef enum {
QEMU_DOMAIN_JOB_STATUS_NONE = 0,
QEMU_DOMAIN_JOB_STATUS_ACTIVE,
QEMU_DOMAIN_JOB_STATUS_MIGRATING,
QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
QEMU_DOMAIN_JOB_STATUS_PAUSED,
QEMU_DOMAIN_JOB_STATUS_POSTCOPY,
QEMU_DOMAIN_JOB_STATUS_COMPLETED,
QEMU_DOMAIN_JOB_STATUS_FAILED,
QEMU_DOMAIN_JOB_STATUS_CANCELED,
} qemuDomainJobStatus;
typedef enum {
QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP,
QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP,
} qemuDomainJobStatsType;
typedef struct _qemuDomainMirrorStats qemuDomainMirrorStats;
typedef qemuDomainMirrorStats *qemuDomainMirrorStatsPtr;
struct _qemuDomainMirrorStats {
unsigned long long transferred;
unsigned long long total;
};
typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
typedef qemuDomainJobInfo *qemuDomainJobInfoPtr;
struct _qemuDomainJobInfo {
qemuDomainJobStatus status;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
destination (only for migrations). */
unsigned long long received; /* When the destination host received status
info from the source (migrations only). */
/* Computed values */
unsigned long long timeElapsed;
long long timeDelta; /* delta = received - sent, i.e., the difference
between the source and the destination time plus
the time between the end of Perform phase on the
source and the beginning of Finish phase on the
destination. */
bool timeDeltaSet;
/* Raw values from QEMU */
qemuDomainJobStatsType statsType;
union {
qemuMonitorMigrationStats mig;
qemuMonitorDumpStats dump;
} stats;
qemuDomainMirrorStats mirrorStats;
};
typedef struct _qemuDomainJobObj qemuDomainJobObj;
typedef qemuDomainJobObj *qemuDomainJobObjPtr;
struct _qemuDomainJobObj {
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virCond cond; /* Use to coordinate jobs */
qemuDomainJob active; /* Currently running job */
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
unsigned long long owner; /* Thread id which set current job */
const char *ownerAPI; /* The API which owns the job */
unsigned long long started; /* When the current job started */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virCond asyncCond; /* Use to coordinate with async jobs */
qemuDomainAsyncJob asyncJob; /* Currently active async job */
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
unsigned long long asyncOwner; /* Thread which set current async job */
const char *asyncOwnerAPI; /* The API which owns the async job */
unsigned long long asyncStarted; /* When the current async job started */
int phase; /* Job phase (mainly for migrations) */
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
unsigned long long mask; /* Jobs allowed during async job */
qemuDomainJobInfoPtr current; /* async job progress data */
qemuDomainJobInfoPtr completed; /* statistics data of a recently completed job */
bool abortJob; /* abort of the job requested */
bool spiceMigration; /* we asked for spice migration and we
* should wait for it to finish */
bool spiceMigrated; /* spice migration completed */
char *error; /* job event completion error */
bool dumpCompleted; /* dump completed */
qemuMigrationParamsPtr migParams;
unsigned long apiFlags; /* flags passed to the API which started the async job */
};
typedef void (*qemuDomainCleanupCallback)(virQEMUDriverPtr driver,
virDomainObjPtr vm);
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
# define QEMU_DOMAIN_MASTER_KEY_LEN 32 /* 32 bytes for 256 bit random key */
/* helper data types for async device unplug */
typedef enum {
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_NONE = 0,
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_OK,
QEMU_DOMAIN_UNPLUGGING_DEVICE_STATUS_GUEST_REJECTED,
} qemuDomainUnpluggingDeviceStatus;
typedef struct _qemuDomainUnpluggingDevice qemuDomainUnpluggingDevice;
typedef qemuDomainUnpluggingDevice *qemuDomainUnpluggingDevicePtr;
struct _qemuDomainUnpluggingDevice {
const char *alias;
qemuDomainUnpluggingDeviceStatus status;
};
qemu: Spawn qemu under mount namespace Prime time. When it comes to spawning qemu process and relabelling all the devices it's going to touch, there's inherent race with other applications in the system (e.g. udev). Instead of trying convincing udev to not touch libvirt managed devices, we can create a separate mount namespace for the qemu, and mount our own /dev there. Of course this puts more work onto us as we have to maintain /dev files on each domain start and device hot(un-)plug. On the other hand, this enhances security also. From technical POV, on domain startup process the parent (libvirtd) creates: /var/lib/libvirt/qemu/$domain.dev /var/lib/libvirt/qemu/$domain.devpts The child (which is going to be qemu eventually) calls unshare() to create new mount namespace. From now on anything that child does is invisible to the parent. Child then mounts tmpfs on $domain.dev (so that it still sees original /dev from the host) and creates some devices (as explained in one of the previous patches). The devices have to be created exactly as they are in the host (including perms, seclabels, ACLs, ...). After that it moves $domain.dev mount to /dev. What's the $domain.devpts mount there for then you ask? QEMU can create PTYs for some chardevs. And historically we exposed the host ends in our domain XML allowing users to connect to them. Therefore we must preserve devpts mount to be shared with the host's one. To make this patch as small as possible, creating of devices configured for domain in question is implemented in next patches. Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2016-11-15 10:30:18 +00:00
typedef enum {
QEMU_DOMAIN_NS_MOUNT = 0,
QEMU_DOMAIN_NS_LAST
} qemuDomainNamespace;
VIR_ENUM_DECL(qemuDomainNamespace)
bool qemuDomainNamespaceEnabled(virDomainObjPtr vm,
qemuDomainNamespace ns);
/* Type of domain secret */
typedef enum {
VIR_DOMAIN_SECRET_INFO_TYPE_PLAIN = 0,
VIR_DOMAIN_SECRET_INFO_TYPE_AES, /* utilize GNUTLS_CIPHER_AES_256_CBC */
VIR_DOMAIN_SECRET_INFO_TYPE_LAST
} qemuDomainSecretInfoType;
typedef struct _qemuDomainSecretPlain qemuDomainSecretPlain;
typedef struct _qemuDomainSecretPlain *qemuDomainSecretPlainPtr;
struct _qemuDomainSecretPlain {
char *username;
uint8_t *secret;
size_t secretlen;
};
# define QEMU_DOMAIN_AES_IV_LEN 16 /* 16 bytes for 128 bit random */
/* initialization vector */
typedef struct _qemuDomainSecretAES qemuDomainSecretAES;
typedef struct _qemuDomainSecretAES *qemuDomainSecretAESPtr;
struct _qemuDomainSecretAES {
char *username;
char *alias; /* generated alias for secret */
char *iv; /* base64 encoded initialization vector */
char *ciphertext; /* encoded/encrypted secret */
};
typedef struct _qemuDomainSecretInfo qemuDomainSecretInfo;
typedef qemuDomainSecretInfo *qemuDomainSecretInfoPtr;
struct _qemuDomainSecretInfo {
qemuDomainSecretInfoType type;
union {
qemuDomainSecretPlain plain;
qemuDomainSecretAES aes;
} s;
};
typedef struct _qemuDomainObjPrivate qemuDomainObjPrivate;
typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr;
struct _qemuDomainObjPrivate {
virQEMUDriverPtr driver;
qemuDomainJobObj job;
qemu: Spawn qemu under mount namespace Prime time. When it comes to spawning qemu process and relabelling all the devices it's going to touch, there's inherent race with other applications in the system (e.g. udev). Instead of trying convincing udev to not touch libvirt managed devices, we can create a separate mount namespace for the qemu, and mount our own /dev there. Of course this puts more work onto us as we have to maintain /dev files on each domain start and device hot(un-)plug. On the other hand, this enhances security also. From technical POV, on domain startup process the parent (libvirtd) creates: /var/lib/libvirt/qemu/$domain.dev /var/lib/libvirt/qemu/$domain.devpts The child (which is going to be qemu eventually) calls unshare() to create new mount namespace. From now on anything that child does is invisible to the parent. Child then mounts tmpfs on $domain.dev (so that it still sees original /dev from the host) and creates some devices (as explained in one of the previous patches). The devices have to be created exactly as they are in the host (including perms, seclabels, ACLs, ...). After that it moves $domain.dev mount to /dev. What's the $domain.devpts mount there for then you ask? QEMU can create PTYs for some chardevs. And historically we exposed the host ends in our domain XML allowing users to connect to them. Therefore we must preserve devpts mount to be shared with the host's one. To make this patch as small as possible, creating of devices configured for domain in question is implemented in next patches. Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2016-11-15 10:30:18 +00:00
virBitmapPtr namespaces;
qemuMonitorPtr mon;
virDomainChrSourceDefPtr monConfig;
bool monJSON;
bool monError;
unsigned long long monStart;
qemuAgentPtr agent;
bool agentError;
bool gotShutdown;
bool beingDestroyed;
char *pidfile;
virDomainPCIAddressSetPtr pciaddrs;
virDomainUSBAddressSetPtr usbaddrs;
virQEMUCapsPtr qemuCaps;
char *lockState;
bool fakeReboot;
virTristateBool allowReboot;
int jobs_queued;
unsigned long migMaxBandwidth;
char *origname;
int nbdPort; /* Port used for migration with NBD */
unsigned short migrationPort;
int preMigrationState;
virChrdevsPtr devs;
qemuDomainCleanupCallback *cleanupCallbacks;
size_t ncleanupCallbacks;
size_t ncleanupCallbacks_max;
virCgroupPtr cgroup;
virPerfPtr perf;
qemuDomainUnpluggingDevice unplug;
char **qemuDevices; /* NULL-terminated list of devices aliases known to QEMU */
bool hookRun; /* true if there was a hook run over this domain */
/* Bitmaps below hold data from the auto NUMA feature */
virBitmapPtr autoNodeset;
virBitmapPtr autoCpuset;
bool signalIOError; /* true if the domain condition should be signalled on
I/O error */
bool signalStop; /* true if the domain condition should be signalled on
QMP STOP event */
systemd: Modernize machine naming So, systemd-machined has this philosophy that machine names are like hostnames and hence should follow the same rules. But we always allowed international characters in domain names. Thus we need to modify the machine name we are passing to systemd. In order to change some machine names that we will be passing to systemd, we also need to call TerminateMachine at the end of a lifetime of a domain. Even for domains that were started with older libvirt. That can be achieved thanks to virSystemdGetMachineNameByPID(). And because we can change machine names, we can get rid of the inconsistent and pointless escaping of domain names when creating machine names. So this patch modifies the naming in the following way. It creates the name as <drivername>-<id>-<name> where invalid hostname characters are stripped out of the name and if the resulting name is longer, it truncates it to 64 characters. That way we can start domains we couldn't start before. Well, at least on systemd. To make it work all together, the machineName (which is needed only with systemd) is saved in domain's private data. That way the generation is moved to the driver and we don't need to pass various unnecessary arguments to cgroup functions. The only thing this complicates a bit is the scope generation when validating a cgroup where we must check both old and new naming, so a slight modification was needed there. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1282846 Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2016-02-01 15:50:54 +00:00
char *machineName;
char *libDir; /* base path for per-domain files */
char *channelTargetDir; /* base path for per-domain channel targets */
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
/* random masterKey and length for encryption (not to be saved in our */
/* private XML) - need to restore at process reconnect */
uint8_t *masterKey;
size_t masterKeyLen;
/* note whether memory device alias does not correspond to slot number */
bool memAliasOrderMismatch;
/* for migrations using TLS with a secret (not to be saved in our */
/* private XML). */
qemuDomainSecretInfoPtr migSecinfo;
/* CPU def used to start the domain when it differs from the one actually
* provided by QEMU. */
virCPUDefPtr origCPU;
/* If true virtlogd is used as stdio handler for character devices. */
bool chardevStdioLogd;
/* Tracks blockjob state for vm. Valid only while reconnecting to qemu. */
virTristateBool reconnectBlockjobs;
/* Migration capabilities. Rechecked on reconnect, not to be saved in
* private XML. */
virBitmapPtr migrationCaps;
/* true if qemu-pr-helper process is running for the domain */
bool prDaemonRunning;
};
# define QEMU_DOMAIN_PRIVATE(vm) \
((qemuDomainObjPrivatePtr) (vm)->privateData)
# define QEMU_DOMAIN_DISK_PRIVATE(disk) \
((qemuDomainDiskPrivatePtr) (disk)->privateData)
typedef struct _qemuDomainDiskPrivate qemuDomainDiskPrivate;
typedef qemuDomainDiskPrivate *qemuDomainDiskPrivatePtr;
struct _qemuDomainDiskPrivate {
virObject parent;
/* ideally we want a smarter way to interlock block jobs on single qemu disk
* in the future, but for now we just disallow any concurrent job on a
* single disk */
bool blockjob;
/* for some synchronous block jobs, we need to notify the owner */
int blockJobType; /* type of the block job from the event */
int blockJobStatus; /* status of the finished block job */
char *blockJobError; /* block job completed event error */
bool blockJobSync; /* the block job needs synchronized termination */
bool migrating; /* the disk is being migrated */
virStorageSourcePtr migrSource; /* disk source object used for NBD migration */
/* information about the device */
bool tray; /* device has tray */
bool removable; /* device media can be removed/changed */
};
# define QEMU_DOMAIN_STORAGE_SOURCE_PRIVATE(src) \
((qemuDomainStorageSourcePrivatePtr) (src)->privateData)
typedef struct _qemuDomainStorageSourcePrivate qemuDomainStorageSourcePrivate;
typedef qemuDomainStorageSourcePrivate *qemuDomainStorageSourcePrivatePtr;
struct _qemuDomainStorageSourcePrivate {
virObject parent;
/* data required for authentication to the storage source */
qemuDomainSecretInfoPtr secinfo;
/* data required for decryption of encrypted storage source */
qemuDomainSecretInfoPtr encinfo;
};
virObjectPtr qemuDomainStorageSourcePrivateNew(void);
typedef struct _qemuDomainVcpuPrivate qemuDomainVcpuPrivate;
typedef qemuDomainVcpuPrivate *qemuDomainVcpuPrivatePtr;
struct _qemuDomainVcpuPrivate {
virObject parent;
pid_t tid; /* vcpu thread id */
int enable_id; /* order in which the vcpus were enabled in qemu */
int qemu_id; /* ID reported by qemu as 'CPU' in query-cpus */
char *alias;
virTristateBool halted;
/* information for hotpluggable cpus */
char *type;
int socket_id;
int core_id;
int thread_id;
int node_id;
int vcpus;
};
# define QEMU_DOMAIN_VCPU_PRIVATE(vcpu) \
((qemuDomainVcpuPrivatePtr) (vcpu)->privateData)
struct qemuDomainDiskInfo {
bool removable;
bool locked;
bool tray;
bool tray_open;
bool empty;
int io_status;
char *nodename;
};
# define QEMU_DOMAIN_CHR_SOURCE_PRIVATE(dev) \
((qemuDomainChrSourcePrivatePtr) (dev)->privateData)
typedef struct _qemuDomainChrSourcePrivate qemuDomainChrSourcePrivate;
typedef qemuDomainChrSourcePrivate *qemuDomainChrSourcePrivatePtr;
struct _qemuDomainChrSourcePrivate {
virObject parent;
/* for char devices using secret
* NB: *not* to be written to qemu domain object XML */
qemuDomainSecretInfoPtr secinfo;
};
typedef struct _qemuDomainVsockPrivate qemuDomainVsockPrivate;
typedef qemuDomainVsockPrivate *qemuDomainVsockPrivatePtr;
struct _qemuDomainVsockPrivate {
virObject parent;
int vhostfd;
};
typedef enum {
QEMU_PROCESS_EVENT_WATCHDOG = 0,
QEMU_PROCESS_EVENT_GUESTPANIC,
QEMU_PROCESS_EVENT_DEVICE_DELETED,
QEMU_PROCESS_EVENT_NIC_RX_FILTER_CHANGED,
QEMU_PROCESS_EVENT_SERIAL_CHANGED,
QEMU_PROCESS_EVENT_BLOCK_JOB,
QEMU_PROCESS_EVENT_MONITOR_EOF,
QEMU_PROCESS_EVENT_LAST
} qemuProcessEventType;
struct qemuProcessEvent {
virDomainObjPtr vm;
qemuProcessEventType eventType;
int action;
int status;
void *data;
};
void qemuProcessEventFree(struct qemuProcessEvent *event);
typedef struct _qemuDomainLogContext qemuDomainLogContext;
typedef qemuDomainLogContext *qemuDomainLogContextPtr;
typedef struct _qemuDomainSaveCookie qemuDomainSaveCookie;
typedef qemuDomainSaveCookie *qemuDomainSaveCookiePtr;
struct _qemuDomainSaveCookie {
virObject parent;
virCPUDefPtr cpu;
};
qemuDomainSaveCookiePtr qemuDomainSaveCookieNew(virDomainObjPtr vm);
const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase);
int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
const char *phase);
void qemuDomainEventFlush(int timer, void *opaque);
void qemuDomainEventQueue(virQEMUDriverPtr driver,
virObjectEventPtr event);
void qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
virDomainObjPtr vm);
int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainJob job)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob,
virDomainJobOperation operation,
unsigned long apiFlags)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
ATTRIBUTE_RETURN_CHECK;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
void qemuDomainObjEndJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjAbortAsyncJob(virDomainObjPtr obj);
void qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
virDomainObjPtr obj,
int phase);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
void qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
unsigned long long allowedJobs);
void qemuDomainObjRestoreJob(virDomainObjPtr obj,
qemuDomainJobObjPtr job);
void qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj);
void qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj);
qemuMonitorPtr qemuDomainGetMonitor(virDomainObjPtr vm)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainObjExitMonitor(virQEMUDriverPtr driver,
virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
qemuAgentPtr qemuDomainObjEnterAgent(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjExitAgent(virDomainObjPtr obj, qemuAgentPtr agent)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainObjEnterRemote(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
void qemuDomainObjExitRemote(virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1);
virDomainDefPtr qemuDomainDefCopy(virQEMUDriverPtr driver,
virDomainDefPtr src,
unsigned int flags);
int qemuDomainDefFormatBuf(virQEMUDriverPtr driver,
virDomainDefPtr vm,
unsigned int flags,
virBuffer *buf);
char *qemuDomainDefFormatXML(virQEMUDriverPtr driver,
virDomainDefPtr vm,
unsigned int flags);
char *qemuDomainFormatXML(virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags);
char *qemuDomainDefFormatLive(virQEMUDriverPtr driver,
virDomainDefPtr def,
virCPUDefPtr origCPU,
bool inactive,
bool compatible);
void qemuDomainObjTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainTaintFlags taint,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckDiskTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainDiskDefPtr disk,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckHostdevTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainHostdevDefPtr disk,
qemuDomainLogContextPtr logCtxt);
void qemuDomainObjCheckNetTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainNetDefPtr net,
qemuDomainLogContextPtr logCtxt);
typedef enum {
QEMU_DOMAIN_LOG_CONTEXT_MODE_START,
QEMU_DOMAIN_LOG_CONTEXT_MODE_ATTACH,
QEMU_DOMAIN_LOG_CONTEXT_MODE_STOP,
} qemuDomainLogContextMode;
qemuDomainLogContextPtr qemuDomainLogContextNew(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainLogContextMode mode);
int qemuDomainLogContextWrite(qemuDomainLogContextPtr ctxt,
const char *fmt, ...) ATTRIBUTE_FMT_PRINTF(2, 3);
ssize_t qemuDomainLogContextRead(qemuDomainLogContextPtr ctxt,
char **msg);
int qemuDomainLogContextGetWriteFD(qemuDomainLogContextPtr ctxt);
void qemuDomainLogContextMarkPosition(qemuDomainLogContextPtr ctxt);
virLogManagerPtr qemuDomainLogContextGetManager(qemuDomainLogContextPtr ctxt);
int qemuDomainLogAppendMessage(virQEMUDriverPtr driver,
virDomainObjPtr vm,
const char *fmt,
...) ATTRIBUTE_FMT_PRINTF(3, 4);
const char *qemuFindQemuImgBinary(virQEMUDriverPtr driver);
int qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
virDomainSnapshotObjPtr snapshot,
virCapsPtr caps,
virDomainXMLOptionPtr xmlopt,
char *snapshotDir);
int qemuDomainSnapshotForEachQcow2(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
const char *op,
bool try_all);
int qemuDomainSnapshotDiscard(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
bool update_current,
bool metadata_only);
typedef struct _virQEMUSnapRemove virQEMUSnapRemove;
typedef virQEMUSnapRemove *virQEMUSnapRemovePtr;
struct _virQEMUSnapRemove {
virQEMUDriverPtr driver;
virDomainObjPtr vm;
int err;
bool metadata_only;
bool current;
};
int qemuDomainSnapshotDiscardAll(void *payload,
const void *name,
void *data);
int qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainRemoveInactive(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainRemoveInactiveJob(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool value);
bool qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv,
qemuDomainJob job);
int qemuDomainCheckDiskStartupPolicy(virQEMUDriverPtr driver,
virDomainObjPtr vm,
size_t diskIndex,
bool cold_boot);
int qemuDomainCheckDiskPresence(virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags);
int qemuDomainDetermineDiskChain(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainDiskDefPtr disk,
bool force_probe,
bool report_broken);
bool qemuDomainDiskSourceDiffers(virDomainDiskDefPtr disk,
virDomainDiskDefPtr origDisk);
bool qemuDomainDiskChangeSupported(virDomainDiskDefPtr disk,
virDomainDiskDefPtr orig_disk);
int qemuDomainStorageFileInit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr src,
virStorageSourcePtr parent);
char *qemuDomainStorageAlias(const char *device, int depth);
void qemuDomainDiskChainElementRevoke(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr elem);
int qemuDomainDiskChainElementPrepare(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr elem,
bool readonly,
bool newSource);
int qemuDomainCleanupAdd(virDomainObjPtr vm,
qemuDomainCleanupCallback cb);
void qemuDomainCleanupRemove(virDomainObjPtr vm,
qemuDomainCleanupCallback cb);
void qemuDomainCleanupRun(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainObjPrivateDataClear(qemuDomainObjPrivatePtr priv);
extern virDomainXMLPrivateDataCallbacks virQEMUDriverPrivateDataCallbacks;
extern virDomainXMLNamespace virQEMUDriverDomainXMLNamespace;
extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig;
extern virDomainABIStability virQEMUDriverDomainABIStability;
extern virSaveCookieCallbacks virQEMUDriverDomainSaveCookie;
int qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
virDomainObjPtr vm, int asyncJob);
int qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob);
bool qemuDomainDefCheckABIStability(virQEMUDriverPtr driver,
virDomainDefPtr src,
virDomainDefPtr dst);
bool qemuDomainCheckABIStability(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainDefPtr dst);
bool qemuDomainAgentAvailable(virDomainObjPtr vm,
bool reportError);
int qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
virDomainJobInfoPtr info)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
int *type,
virTypedParameterPtr *params,
int *nparams)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2)
ATTRIBUTE_NONNULL(3) ATTRIBUTE_NONNULL(4);
int qemuDomainSupportsBlockJobs(virDomainObjPtr vm)
ATTRIBUTE_NONNULL(1);
bool qemuDomainDiskBlockJobIsActive(virDomainDiskDefPtr disk);
bool qemuDomainHasBlockjob(virDomainObjPtr vm, bool copy_only)
ATTRIBUTE_NONNULL(1);
int qemuDomainAlignMemorySizes(virDomainDefPtr def);
void qemuDomainMemoryDeviceAlignSize(virDomainDefPtr def,
virDomainMemoryDefPtr mem);
virDomainChrDefPtr qemuFindAgentConfig(virDomainDefPtr def);
bool qemuDomainIsQ35(const virDomainDef *def);
bool qemuDomainIsI440FX(const virDomainDef *def);
bool qemuDomainHasPCIRoot(const virDomainDef *def);
bool qemuDomainHasPCIeRoot(const virDomainDef *def);
bool qemuDomainNeedsFDC(const virDomainDef *def);
bool qemuDomainIsS390CCW(const virDomainDef *def);
bool qemuDomainIsVirt(const virDomainDef *def);
bool qemuDomainIsPSeries(const virDomainDef *def);
bool qemuDomainHasBuiltinIDE(const virDomainDef *def);
bool qemuDomainMachineIsQ35(const char *machine);
bool qemuDomainMachineIsI440FX(const char *machine);
bool qemuDomainMachineNeedsFDC(const char *machine);
bool qemuDomainMachineIsS390CCW(const char *machine);
bool qemuDomainMachineIsVirt(const char *machine,
const virArch arch);
bool qemuDomainMachineIsPSeries(const char *machine,
const virArch arch);
bool qemuDomainMachineHasBuiltinIDE(const char *machine);
int qemuDomainUpdateCurrentMemorySize(virQEMUDriverPtr driver,
virDomainObjPtr vm);
unsigned long long qemuDomainGetMemLockLimitBytes(virDomainDefPtr def);
int qemuDomainAdjustMaxMemLock(virDomainObjPtr vm);
int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
virQEMUCapsPtr qemuCaps,
const virDomainMemoryDef *mem);
bool qemuDomainSupportsNewVcpuHotplug(virDomainObjPtr vm);
bool qemuDomainHasVcpuPids(virDomainObjPtr vm);
pid_t qemuDomainGetVcpuPid(virDomainObjPtr vm, unsigned int vcpuid);
int qemuDomainValidateVcpuInfo(virDomainObjPtr vm);
int qemuDomainRefreshVcpuInfo(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob,
bool state);
bool qemuDomainGetVcpuHalted(virDomainObjPtr vm, unsigned int vcpu);
int qemuDomainRefreshVcpuHalted(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob);
bool qemuDomainSupportsNicdev(virDomainDefPtr def,
virDomainNetDefPtr net);
bool qemuDomainNetSupportsMTU(virDomainNetType type);
int qemuDomainSetPrivatePaths(virQEMUDriverPtr driver,
virDomainObjPtr vm);
virDomainDiskDefPtr qemuDomainDiskByName(virDomainDefPtr def, const char *name);
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
char *qemuDomainGetMasterKeyFilePath(const char *libDir);
int qemuDomainMasterKeyReadFile(qemuDomainObjPrivatePtr priv);
int qemuDomainWriteMasterKeyFile(virQEMUDriverPtr driver,
virDomainObjPtr vm);
int qemuDomainMasterKeyCreate(virDomainObjPtr vm);
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
void qemuDomainMasterKeyRemove(qemuDomainObjPrivatePtr priv);
bool qemuDomainSupportsEncryptedSecret(qemuDomainObjPrivatePtr priv);
void qemuDomainSecretInfoFree(qemuDomainSecretInfoPtr *secinfo)
ATTRIBUTE_NONNULL(1);
void qemuDomainSecretInfoDestroy(qemuDomainSecretInfoPtr secinfo);
void qemuDomainSecretDiskDestroy(virDomainDiskDefPtr disk)
ATTRIBUTE_NONNULL(1);
bool qemuDomainStorageSourceHasAuth(virStorageSourcePtr src)
ATTRIBUTE_NONNULL(1);
bool qemuDomainDiskHasEncryptionSecret(virStorageSourcePtr src)
ATTRIBUTE_NONNULL(1);
qemuDomainSecretInfoPtr
qemuDomainSecretInfoTLSNew(qemuDomainObjPrivatePtr priv,
const char *srcAlias,
const char *secretUUID);
void qemuDomainSecretHostdevDestroy(virDomainHostdevDefPtr disk)
ATTRIBUTE_NONNULL(1);
int qemuDomainSecretHostdevPrepare(qemuDomainObjPrivatePtr priv,
virDomainHostdevDefPtr hostdev)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainSecretChardevDestroy(virDomainChrSourceDefPtr dev)
ATTRIBUTE_NONNULL(1);
int qemuDomainSecretChardevPrepare(virQEMUDriverConfigPtr cfg,
qemuDomainObjPrivatePtr priv,
const char *chrAlias,
virDomainChrSourceDefPtr dev)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_NONNULL(3)
ATTRIBUTE_NONNULL(4);
void qemuDomainSecretDestroy(virDomainObjPtr vm)
ATTRIBUTE_NONNULL(1);
int qemuDomainSecretPrepare(virQEMUDriverPtr driver,
virDomainObjPtr vm)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainDefValidateDiskLunSource(const virStorageSource *src)
ATTRIBUTE_NONNULL(1);
int qemuDomainDeviceDefValidateDisk(const virDomainDiskDef *disk,
virQEMUCapsPtr qemuCaps);
int qemuDomainPrepareChannel(virDomainChrDefPtr chr,
const char *domainChannelTargetDir)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainPrepareChardevSourceTLS(virDomainChrSourceDefPtr source,
virQEMUDriverConfigPtr cfg)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
void qemuDomainPrepareChardevSource(virDomainDefPtr def,
virQEMUDriverConfigPtr cfg)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainPrepareShmemChardev(virDomainShmemDefPtr shmem)
ATTRIBUTE_NONNULL(1);
bool qemuDomainVcpuHotplugIsInOrder(virDomainDefPtr def)
ATTRIBUTE_NONNULL(1);
void qemuDomainVcpuPersistOrder(virDomainDefPtr def)
ATTRIBUTE_NONNULL(1);
int qemuDomainCheckMonitor(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob);
bool qemuDomainSupportsVideoVga(virDomainVideoDefPtr video,
virQEMUCapsPtr qemuCaps);
int qemuDomainGetHostdevPath(virDomainDefPtr def,
virDomainHostdevDefPtr dev,
bool teardown,
size_t *npaths,
char ***path,
int **perms);
int qemuDomainBuildNamespace(virQEMUDriverConfigPtr cfg,
virSecurityManagerPtr mgr,
qemu: Spawn qemu under mount namespace Prime time. When it comes to spawning qemu process and relabelling all the devices it's going to touch, there's inherent race with other applications in the system (e.g. udev). Instead of trying convincing udev to not touch libvirt managed devices, we can create a separate mount namespace for the qemu, and mount our own /dev there. Of course this puts more work onto us as we have to maintain /dev files on each domain start and device hot(un-)plug. On the other hand, this enhances security also. From technical POV, on domain startup process the parent (libvirtd) creates: /var/lib/libvirt/qemu/$domain.dev /var/lib/libvirt/qemu/$domain.devpts The child (which is going to be qemu eventually) calls unshare() to create new mount namespace. From now on anything that child does is invisible to the parent. Child then mounts tmpfs on $domain.dev (so that it still sees original /dev from the host) and creates some devices (as explained in one of the previous patches). The devices have to be created exactly as they are in the host (including perms, seclabels, ACLs, ...). After that it moves $domain.dev mount to /dev. What's the $domain.devpts mount there for then you ask? QEMU can create PTYs for some chardevs. And historically we exposed the host ends in our domain XML allowing users to connect to them. Therefore we must preserve devpts mount to be shared with the host's one. To make this patch as small as possible, creating of devices configured for domain in question is implemented in next patches. Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2016-11-15 10:30:18 +00:00
virDomainObjPtr vm);
int qemuDomainCreateNamespace(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void qemuDomainDestroyNamespace(virQEMUDriverPtr driver,
virDomainObjPtr vm);
bool qemuDomainNamespaceAvailable(qemuDomainNamespace ns);
int qemuDomainNamespaceSetupDisk(virDomainObjPtr vm,
virStorageSourcePtr src);
int qemuDomainNamespaceTeardownDisk(virDomainObjPtr vm,
virStorageSourcePtr src);
int qemuDomainNamespaceSetupHostdev(virDomainObjPtr vm,
virDomainHostdevDefPtr hostdev);
int qemuDomainNamespaceTeardownHostdev(virDomainObjPtr vm,
virDomainHostdevDefPtr hostdev);
int qemuDomainNamespaceSetupMemory(virDomainObjPtr vm,
virDomainMemoryDefPtr memory);
int qemuDomainNamespaceTeardownMemory(virDomainObjPtr vm,
virDomainMemoryDefPtr memory);
int qemuDomainNamespaceSetupChardev(virDomainObjPtr vm,
virDomainChrDefPtr chr);
int qemuDomainNamespaceTeardownChardev(virDomainObjPtr vm,
virDomainChrDefPtr chr);
int qemuDomainNamespaceSetupRNG(virDomainObjPtr vm,
virDomainRNGDefPtr rng);
int qemuDomainNamespaceTeardownRNG(virDomainObjPtr vm,
virDomainRNGDefPtr rng);
int qemuDomainNamespaceSetupInput(virDomainObjPtr vm,
virDomainInputDefPtr input);
int qemuDomainNamespaceTeardownInput(virDomainObjPtr vm,
virDomainInputDefPtr input);
virDomainDiskDefPtr qemuDomainDiskLookupByNodename(virDomainDefPtr def,
const char *nodename,
virStorageSourcePtr *src,
unsigned int *idx);
char *qemuDomainDiskBackingStoreGetName(virDomainDiskDefPtr disk,
virStorageSourcePtr src,
unsigned int idx);
virStorageSourcePtr qemuDomainGetStorageSourceByDevstr(const char *devstr,
virDomainDefPtr def);
int
qemuDomainUpdateCPU(virDomainObjPtr vm,
virCPUDefPtr cpu,
virCPUDefPtr *origCPU);
int
qemuDomainFixupCPUs(virDomainObjPtr vm,
virCPUDefPtr *origCPU);
char *
qemuDomainGetMachineName(virDomainObjPtr vm);
void
qemuDomainObjPrivateXMLFormatAllowReboot(virBufferPtr buf,
virTristateBool allowReboot);
int
qemuDomainObjPrivateXMLParseAllowReboot(xmlXPathContextPtr ctxt,
virTristateBool *allowReboot);
bool qemuDomainCheckCCWS390AddressSupport(const virDomainDef *def,
virDomainDeviceInfo info,
virQEMUCapsPtr qemuCaps,
const char *devicename);
int
qemuDomainPrepareDiskSourceChain(virDomainDiskDefPtr disk,
virStorageSourcePtr src,
virQEMUDriverConfigPtr cfg,
virQEMUCapsPtr qemuCaps)
ATTRIBUTE_RETURN_CHECK;
int
qemuDomainPrepareDiskSource(virDomainDiskDefPtr disk,
qemuDomainObjPrivatePtr priv,
virQEMUDriverConfigPtr cfg);
int
qemuDomainDiskCachemodeFlags(int cachemode,
bool *writeback,
bool *direct,
bool *noflush);
char * qemuDomainGetManagedPRSocketPath(qemuDomainObjPrivatePtr priv);
#endif /* __QEMU_DOMAIN_H__ */