libvirt/src/qemu/qemu_domain.c

5578 lines
171 KiB
C
Raw Normal View History

/*
* qemu_domain.c: QEMU domain private state
*
* Copyright (C) 2006-2016 Red Hat, Inc.
* Copyright (C) 2006 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*
* Author: Daniel P. Berrange <berrange@redhat.com>
*/
#include <config.h>
#include "qemu_domain.h"
#include "qemu_alias.h"
#include "qemu_cgroup.h"
#include "qemu_command.h"
#include "qemu_parse_command.h"
#include "qemu_capabilities.h"
#include "qemu_migration.h"
2012-12-12 18:06:53 +00:00
#include "viralloc.h"
2012-12-12 17:59:27 +00:00
#include "virlog.h"
#include "virerror.h"
#include "c-ctype.h"
#include "cpu/cpu.h"
2012-12-13 18:01:25 +00:00
#include "viruuid.h"
#include "virfile.h"
#include "domain_addr.h"
#include "domain_event.h"
#include "virtime.h"
#include "virstoragefile.h"
#include "virstring.h"
#include "virthreadjob.h"
#include "viratomic.h"
#include "virprocess.h"
#include "vircrypto.h"
#include "secret_util.h"
#include "logging/log_manager.h"
#include "locking/domain_lock.h"
#include "storage/storage_driver.h"
#include <sys/time.h>
#include <fcntl.h>
#include <libxml/xpathInternals.h>
#define VIR_FROM_THIS VIR_FROM_QEMU
VIR_LOG_INIT("qemu.qemu_domain");
#define QEMU_NAMESPACE_HREF "http://libvirt.org/schemas/domain/qemu/1.0"
VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"none",
"query",
"destroy",
"suspend",
"modify",
"abort",
"migration operation",
"none", /* async job is never stored in job.active */
"async nested",
);
VIR_ENUM_IMPL(qemuDomainAsyncJob, QEMU_ASYNC_JOB_LAST,
"none",
"migration out",
"migration in",
"save",
"dump",
"snapshot",
"start",
);
struct _qemuDomainLogContext {
int refs;
int writefd;
int readfd; /* Only used if manager == NULL */
off_t pos;
ino_t inode; /* Only used if manager != NULL */
char *path;
virLogManagerPtr manager;
};
const char *
qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase ATTRIBUTE_UNUSED)
{
switch (job) {
case QEMU_ASYNC_JOB_MIGRATION_OUT:
case QEMU_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeToString(phase);
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
case QEMU_ASYNC_JOB_SNAPSHOT:
case QEMU_ASYNC_JOB_START:
case QEMU_ASYNC_JOB_NONE:
case QEMU_ASYNC_JOB_LAST:
; /* fall through */
}
return "none";
}
int
qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
const char *phase)
{
if (!phase)
return 0;
switch (job) {
case QEMU_ASYNC_JOB_MIGRATION_OUT:
case QEMU_ASYNC_JOB_MIGRATION_IN:
return qemuMigrationJobPhaseTypeFromString(phase);
case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP:
case QEMU_ASYNC_JOB_SNAPSHOT:
case QEMU_ASYNC_JOB_START:
case QEMU_ASYNC_JOB_NONE:
case QEMU_ASYNC_JOB_LAST:
; /* fall through */
}
if (STREQ(phase, "none"))
return 0;
else
return -1;
}
void qemuDomainEventQueue(virQEMUDriverPtr driver,
virObjectEventPtr event)
{
if (event)
virObjectEventStateQueue(driver->domainEventState, event);
}
void
qemuDomainEventEmitJobCompleted(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virObjectEventPtr event;
virTypedParameterPtr params = NULL;
int nparams = 0;
int type;
if (!priv->job.completed)
return;
if (qemuDomainJobInfoToParams(priv->job.completed, &type,
&params, &nparams) < 0) {
VIR_WARN("Could not get stats for completed job; domain %s",
vm->def->name);
}
event = virDomainEventJobCompletedNewFromObj(vm, params, nparams);
qemuDomainEventQueue(driver, event);
}
static int
qemuDomainObjInitJob(qemuDomainObjPrivatePtr priv)
{
memset(&priv->job, 0, sizeof(priv->job));
if (virCondInit(&priv->job.cond) < 0)
return -1;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (virCondInit(&priv->job.asyncCond) < 0) {
virCondDestroy(&priv->job.cond);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
return -1;
}
return 0;
}
static void
qemuDomainObjResetJob(qemuDomainObjPrivatePtr priv)
{
struct qemuDomainJobObj *job = &priv->job;
job->active = QEMU_JOB_NONE;
job->owner = 0;
job->ownerAPI = NULL;
job->started = 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
static void
qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
{
struct qemuDomainJobObj *job = &priv->job;
job->asyncJob = QEMU_ASYNC_JOB_NONE;
job->asyncOwner = 0;
job->asyncOwnerAPI = NULL;
job->asyncStarted = 0;
job->phase = 0;
job->mask = QEMU_JOB_DEFAULT_MASK;
job->dump_memory_only = false;
job->abortJob = false;
job->spiceMigrated = false;
job->postcopyEnabled = false;
VIR_FREE(job->current);
}
void
qemuDomainObjRestoreJob(virDomainObjPtr obj,
struct qemuDomainJobObj *job)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
memset(job, 0, sizeof(*job));
job->active = priv->job.active;
job->owner = priv->job.owner;
job->asyncJob = priv->job.asyncJob;
job->asyncOwner = priv->job.asyncOwner;
job->phase = priv->job.phase;
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
}
static void
qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
{
VIR_FREE(priv->job.current);
VIR_FREE(priv->job.completed);
virCondDestroy(&priv->job.cond);
virCondDestroy(&priv->job.asyncCond);
}
static bool
qemuDomainTrackJob(qemuDomainJob job)
{
return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
}
int
qemuDomainJobInfoUpdateTime(qemuDomainJobInfoPtr jobInfo)
{
unsigned long long now;
if (!jobInfo->started)
return 0;
if (virTimeMillisNow(&now) < 0)
return -1;
if (now < jobInfo->started) {
VIR_WARN("Async job starts in the future");
jobInfo->started = 0;
return 0;
}
jobInfo->timeElapsed = now - jobInfo->started;
return 0;
}
int
qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo)
{
unsigned long long now;
if (!jobInfo->stopped)
return 0;
if (virTimeMillisNow(&now) < 0)
return -1;
if (now < jobInfo->stopped) {
VIR_WARN("Guest's CPUs stopped in the future");
jobInfo->stopped = 0;
return 0;
}
jobInfo->stats.downtime = now - jobInfo->stopped;
jobInfo->stats.downtime_set = true;
return 0;
}
int
qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo,
virDomainJobInfoPtr info)
{
info->type = jobInfo->type;
info->timeElapsed = jobInfo->timeElapsed;
info->timeRemaining = jobInfo->timeRemaining;
info->memTotal = jobInfo->stats.ram_total;
info->memRemaining = jobInfo->stats.ram_remaining;
info->memProcessed = jobInfo->stats.ram_transferred;
info->fileTotal = jobInfo->stats.disk_total;
info->fileRemaining = jobInfo->stats.disk_remaining;
info->fileProcessed = jobInfo->stats.disk_transferred;
info->dataTotal = info->memTotal + info->fileTotal;
info->dataRemaining = info->memRemaining + info->fileRemaining;
info->dataProcessed = info->memProcessed + info->fileProcessed;
return 0;
}
int
qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo,
int *type,
virTypedParameterPtr *params,
int *nparams)
{
qemuMonitorMigrationStats *stats = &jobInfo->stats;
virTypedParameterPtr par = NULL;
int maxpar = 0;
int npar = 0;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed) < 0)
goto error;
if (jobInfo->timeDeltaSet &&
jobInfo->timeElapsed > jobInfo->timeDelta &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED_NET,
jobInfo->timeElapsed - jobInfo->timeDelta) < 0)
goto error;
if (jobInfo->type == VIR_DOMAIN_JOB_BOUNDED &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_REMAINING,
jobInfo->timeRemaining) < 0)
goto error;
if (stats->downtime_set &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DOWNTIME,
stats->downtime) < 0)
goto error;
if (stats->downtime_set &&
jobInfo->timeDeltaSet &&
stats->downtime > jobInfo->timeDelta &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DOWNTIME_NET,
stats->downtime - jobInfo->timeDelta) < 0)
goto error;
if (stats->setup_time_set &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_SETUP_TIME,
stats->setup_time) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DATA_TOTAL,
stats->ram_total +
stats->disk_total) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DATA_PROCESSED,
stats->ram_transferred +
stats->disk_transferred) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DATA_REMAINING,
stats->ram_remaining +
stats->disk_remaining) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_TOTAL,
stats->ram_total) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_PROCESSED,
stats->ram_transferred) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_REMAINING,
stats->ram_remaining) < 0)
goto error;
if (stats->ram_bps &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_BPS,
stats->ram_bps) < 0)
goto error;
if (stats->ram_duplicate_set) {
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_CONSTANT,
stats->ram_duplicate) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_NORMAL,
stats->ram_normal) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_NORMAL_BYTES,
stats->ram_normal_bytes) < 0)
goto error;
}
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_DIRTY_RATE,
stats->ram_dirty_rate) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_MEMORY_ITERATION,
stats->ram_iteration) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DISK_TOTAL,
stats->disk_total) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DISK_PROCESSED,
stats->disk_transferred) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DISK_REMAINING,
stats->disk_remaining) < 0)
goto error;
if (stats->disk_bps &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DISK_BPS,
stats->disk_bps) < 0)
goto error;
if (stats->xbzrle_set) {
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_COMPRESSION_CACHE,
stats->xbzrle_cache_size) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_COMPRESSION_BYTES,
stats->xbzrle_bytes) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_COMPRESSION_PAGES,
stats->xbzrle_pages) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_COMPRESSION_CACHE_MISSES,
stats->xbzrle_cache_miss) < 0 ||
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_COMPRESSION_OVERFLOW,
stats->xbzrle_overflow) < 0)
goto error;
}
*type = jobInfo->type;
*params = par;
*nparams = npar;
return 0;
error:
virTypedParamsFree(par, npar);
return -1;
}
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
/* qemuDomainGetMasterKeyFilePath:
* @libDir: Directory path to domain lib files
*
* Generate a path to the domain master key file for libDir.
* It's up to the caller to handle checking if path exists.
*
* Returns path to memory containing the name of the file. It is up to the
* caller to free; otherwise, NULL on failure.
*/
char *
qemuDomainGetMasterKeyFilePath(const char *libDir)
{
if (!libDir) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("invalid path for master key file"));
return NULL;
}
return virFileBuildPath(libDir, "master-key.aes", NULL);
}
/* qemuDomainWriteMasterKeyFile:
* @driver: qemu driver data
* @vm: Pointer to the vm object
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
*
* Get the desired path to the masterKey file and store it in the path.
*
* Returns 0 on success, -1 on failure with error message indicating failure
*/
int
qemuDomainWriteMasterKeyFile(virQEMUDriverPtr driver,
virDomainObjPtr vm)
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
{
char *path;
int fd = -1;
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
/* Only gets filled in if we have the capability */
if (!priv->masterKey)
return 0;
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
if (!(path = qemuDomainGetMasterKeyFilePath(priv->libDir)))
return -1;
if ((fd = open(path, O_WRONLY|O_TRUNC|O_CREAT, 0600)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to open domain master key file for write"));
goto cleanup;
}
if (safewrite(fd, priv->masterKey, priv->masterKeyLen) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to write master key file for domain"));
goto cleanup;
}
if (virSecurityManagerDomainSetPathLabel(driver->securityManager,
vm->def, path) < 0)
goto cleanup;
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
ret = 0;
cleanup:
VIR_FORCE_CLOSE(fd);
VIR_FREE(path);
return ret;
}
/* qemuDomainMasterKeyReadFile:
* @priv: pointer to domain private object
*
* Expected to be called during qemuProcessReconnect once the domain
* libDir has been generated through qemuStateInitialize calling
* virDomainObjListLoadAllConfigs which will restore the libDir path
* to the domain private object.
*
* This function will get the path to the master key file and if it
* exists, it will read the contents of the file saving it in priv->masterKey.
*
* Once the file exists, the validity checks may cause failures; however,
* if the file doesn't exist or the capability doesn't exist, we just
* return (mostly) quietly.
*
* Returns 0 on success or lack of capability
* -1 on failure with error message indicating failure
*/
int
qemuDomainMasterKeyReadFile(qemuDomainObjPrivatePtr priv)
{
char *path;
int fd = -1;
uint8_t *masterKey = NULL;
ssize_t masterKeyLen = 0;
/* If we don't have the capability, then do nothing. */
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_SECRET))
return 0;
if (!(path = qemuDomainGetMasterKeyFilePath(priv->libDir)))
return -1;
if (!virFileExists(path)) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("domain master key file doesn't exist in %s"),
priv->libDir);
goto error;
}
if ((fd = open(path, O_RDONLY)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to open domain master key file for read"));
goto error;
}
if (VIR_ALLOC_N(masterKey, 1024) < 0)
goto error;
if ((masterKeyLen = saferead(fd, masterKey, 1024)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("unable to read domain master key file"));
goto error;
}
if (masterKeyLen != QEMU_DOMAIN_MASTER_KEY_LEN) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("invalid master key read, size=%zd"), masterKeyLen);
goto error;
}
ignore_value(VIR_REALLOC_N_QUIET(masterKey, masterKeyLen));
priv->masterKey = masterKey;
priv->masterKeyLen = masterKeyLen;
VIR_FORCE_CLOSE(fd);
VIR_FREE(path);
return 0;
error:
if (masterKeyLen > 0)
memset(masterKey, 0, masterKeyLen);
VIR_FREE(masterKey);
VIR_FORCE_CLOSE(fd);
VIR_FREE(path);
return -1;
}
/* qemuDomainMasterKeyRemove:
* @priv: Pointer to the domain private object
*
* Remove the traces of the master key, clear the heap, clear the file,
* delete the file.
*/
void
qemuDomainMasterKeyRemove(qemuDomainObjPrivatePtr priv)
{
char *path = NULL;
if (!priv->masterKey)
return;
/* Clear the contents */
memset(priv->masterKey, 0, priv->masterKeyLen);
VIR_FREE(priv->masterKey);
priv->masterKeyLen = 0;
/* Delete the master key file */
path = qemuDomainGetMasterKeyFilePath(priv->libDir);
unlink(path);
VIR_FREE(path);
}
/* qemuDomainMasterKeyCreate:
* @vm: Pointer to the domain object
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
*
* As long as the underlying qemu has the secret capability,
* generate and store 'raw' in a file a random 32-byte key to
* be used as a secret shared with qemu to share sensitive data.
*
* Returns: 0 on success, -1 w/ error message on failure
*/
int
qemuDomainMasterKeyCreate(virDomainObjPtr vm)
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
/* If we don't have the capability, then do nothing. */
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_SECRET))
return 0;
if (!(priv->masterKey =
virCryptoGenerateRandom(QEMU_DOMAIN_MASTER_KEY_LEN)))
return -1;
qemu: Create domain master key Add a masterKey and masterKeyLen to _qemuDomainObjPrivate to store a random domain master key and its length in order to support the ability to encrypt/decrypt sensitive data shared between libvirt and qemu. The key will be base64 encoded and written to a file to be used by the command line building code to share with qemu. New API's from this patch: qemuDomainGetMasterKeyFilePath: Return a path to where the key is located qemuDomainWriteMasterKeyFile: (private) Open (create/trunc) the masterKey path and write the masterKey qemuDomainMasterKeyReadFile: Using the master key path, open/read the file, and store the masterKey and masterKeyLen. Expected use only from qemuProcessReconnect qemuDomainGenerateRandomKey: (private) Generate a random key using available algorithms The key is generated either from the gnutls_rnd function if it exists or a less cryptographically strong mechanism using virGenerateRandomBytes qemuDomainMasterKeyRemove: Remove traces of the master key, remove the *KeyFilePath qemuDomainMasterKeyCreate: Generate the domain master key and save the key in the location returned by qemuDomainGetMasterKeyFilePath. This API will first ensure the QEMU_CAPS_OBJECT_SECRET is set in the capabilities. If not, then there's no need to generate the secret or file. The creation of the key will be attempted from qemuProcessPrepareHost once the libDir directory structure exists. The removal of the key will handled from qemuProcessStop just prior to deleting the libDir tree. Since the key will not be written out to the domain object XML file, the qemuProcessReconnect will read the saved file and restore the masterKey and masterKeyLen.
2016-03-29 22:22:46 +00:00
priv->masterKeyLen = QEMU_DOMAIN_MASTER_KEY_LEN;
return 0;
}
static void
qemuDomainSecretPlainClear(qemuDomainSecretPlain secret)
{
VIR_FREE(secret.username);
VIR_DISPOSE_N(secret.secret, secret.secretlen);
}
static void
qemuDomainSecretAESClear(qemuDomainSecretAES secret)
{
VIR_FREE(secret.username);
VIR_FREE(secret.alias);
VIR_FREE(secret.iv);
VIR_FREE(secret.ciphertext);
}
static void
qemuDomainSecretInfoFree(qemuDomainSecretInfoPtr *secinfo)
{
if (!*secinfo)
return;
switch ((qemuDomainSecretInfoType) (*secinfo)->type) {
case VIR_DOMAIN_SECRET_INFO_TYPE_PLAIN:
qemuDomainSecretPlainClear((*secinfo)->s.plain);
break;
case VIR_DOMAIN_SECRET_INFO_TYPE_AES:
qemuDomainSecretAESClear((*secinfo)->s.aes);
break;
case VIR_DOMAIN_SECRET_INFO_TYPE_LAST:
break;
}
VIR_FREE(*secinfo);
}
static virClassPtr qemuDomainDiskPrivateClass;
static void qemuDomainDiskPrivateDispose(void *obj);
static int
qemuDomainDiskPrivateOnceInit(void)
{
qemuDomainDiskPrivateClass = virClassNew(virClassForObject(),
"qemuDomainDiskPrivate",
sizeof(qemuDomainDiskPrivate),
qemuDomainDiskPrivateDispose);
if (!qemuDomainDiskPrivateClass)
return -1;
else
return 0;
}
VIR_ONCE_GLOBAL_INIT(qemuDomainDiskPrivate)
static virObjectPtr
qemuDomainDiskPrivateNew(void)
{
qemuDomainDiskPrivatePtr priv;
if (qemuDomainDiskPrivateInitialize() < 0)
return NULL;
if (!(priv = virObjectNew(qemuDomainDiskPrivateClass)))
return NULL;
return (virObjectPtr) priv;
}
static void
qemuDomainDiskPrivateDispose(void *obj)
{
qemuDomainDiskPrivatePtr priv = obj;
qemuDomainSecretInfoFree(&priv->secinfo);
}
static virClassPtr qemuDomainHostdevPrivateClass;
static void qemuDomainHostdevPrivateDispose(void *obj);
static int
qemuDomainHostdevPrivateOnceInit(void)
{
qemuDomainHostdevPrivateClass =
virClassNew(virClassForObject(),
"qemuDomainHostdevPrivate",
sizeof(qemuDomainHostdevPrivate),
qemuDomainHostdevPrivateDispose);
if (!qemuDomainHostdevPrivateClass)
return -1;
else
return 0;
}
VIR_ONCE_GLOBAL_INIT(qemuDomainHostdevPrivate)
static virObjectPtr
qemuDomainHostdevPrivateNew(void)
{
qemuDomainHostdevPrivatePtr priv;
if (qemuDomainHostdevPrivateInitialize() < 0)
return NULL;
if (!(priv = virObjectNew(qemuDomainHostdevPrivateClass)))
return NULL;
return (virObjectPtr) priv;
}
static void
qemuDomainHostdevPrivateDispose(void *obj)
{
qemuDomainHostdevPrivatePtr priv = obj;
qemuDomainSecretInfoFree(&priv->secinfo);
}
/* qemuDomainSecretPlainSetup:
* @conn: Pointer to connection
* @secinfo: Pointer to secret info
* @protocol: Protocol for secret
* @authdef: Pointer to auth data
*
* Taking a secinfo, fill in the plaintext information
*
* Returns 0 on success, -1 on failure with error message
*/
static int
qemuDomainSecretPlainSetup(virConnectPtr conn,
qemuDomainSecretInfoPtr secinfo,
virStorageNetProtocol protocol,
virStorageAuthDefPtr authdef)
{
int secretType = VIR_SECRET_USAGE_TYPE_ISCSI;
secinfo->type = VIR_DOMAIN_SECRET_INFO_TYPE_PLAIN;
if (VIR_STRDUP(secinfo->s.plain.username, authdef->username) < 0)
return -1;
if (protocol == VIR_STORAGE_NET_PROTOCOL_RBD)
secretType = VIR_SECRET_USAGE_TYPE_CEPH;
return virSecretGetSecretString(conn, authdef, secretType,
&secinfo->s.plain.secret,
&secinfo->s.plain.secretlen);
}
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
/* qemuDomainSecretAESSetup:
* @conn: Pointer to connection
* @priv: pointer to domain private object
* @secinfo: Pointer to secret info
* @srcalias: Alias of the disk/hostdev used to generate the secret alias
* @protocol: Protocol for secret
* @authdef: Pointer to auth data
*
* Taking a secinfo, fill in the AES specific information using the
*
* Returns 0 on success, -1 on failure with error message
*/
static int
qemuDomainSecretAESSetup(virConnectPtr conn,
qemuDomainObjPrivatePtr priv,
qemuDomainSecretInfoPtr secinfo,
const char *srcalias,
virStorageNetProtocol protocol,
virStorageAuthDefPtr authdef)
{
int ret = -1;
uint8_t *raw_iv = NULL;
size_t ivlen = QEMU_DOMAIN_AES_IV_LEN;
uint8_t *secret = NULL;
size_t secretlen = 0;
uint8_t *ciphertext = NULL;
size_t ciphertextlen = 0;
int secretType = VIR_SECRET_USAGE_TYPE_NONE;
secinfo->type = VIR_DOMAIN_SECRET_INFO_TYPE_AES;
if (VIR_STRDUP(secinfo->s.aes.username, authdef->username) < 0)
return -1;
switch ((virStorageNetProtocol)protocol) {
case VIR_STORAGE_NET_PROTOCOL_RBD:
secretType = VIR_SECRET_USAGE_TYPE_CEPH;
break;
case VIR_STORAGE_NET_PROTOCOL_NONE:
case VIR_STORAGE_NET_PROTOCOL_NBD:
case VIR_STORAGE_NET_PROTOCOL_SHEEPDOG:
case VIR_STORAGE_NET_PROTOCOL_GLUSTER:
case VIR_STORAGE_NET_PROTOCOL_ISCSI:
case VIR_STORAGE_NET_PROTOCOL_HTTP:
case VIR_STORAGE_NET_PROTOCOL_HTTPS:
case VIR_STORAGE_NET_PROTOCOL_FTP:
case VIR_STORAGE_NET_PROTOCOL_FTPS:
case VIR_STORAGE_NET_PROTOCOL_TFTP:
case VIR_STORAGE_NET_PROTOCOL_LAST:
virReportError(VIR_ERR_INTERNAL_ERROR,
_("protocol '%s' cannot be used for encrypted secrets"),
virStorageNetProtocolTypeToString(protocol));
return -1;
}
if (!(secinfo->s.aes.alias = qemuDomainGetSecretAESAlias(srcalias)))
return -1;
/* Create a random initialization vector */
if (!(raw_iv = virCryptoGenerateRandom(ivlen)))
return -1;
/* Encode the IV and save that since qemu will need it */
if (!(secinfo->s.aes.iv = virStringEncodeBase64(raw_iv, ivlen)))
goto cleanup;
/* Grab the unencoded secret */
if (virSecretGetSecretString(conn, authdef, secretType,
&secret, &secretlen) < 0)
goto cleanup;
if (virCryptoEncryptData(VIR_CRYPTO_CIPHER_AES256CBC,
priv->masterKey, QEMU_DOMAIN_MASTER_KEY_LEN,
raw_iv, ivlen, secret, secretlen,
&ciphertext, &ciphertextlen) < 0)
goto cleanup;
/* Clear out the secret */
memset(secret, 0, secretlen);
/* Now encode the ciphertext and store to be passed to qemu */
if (!(secinfo->s.aes.ciphertext = virStringEncodeBase64(ciphertext,
ciphertextlen)))
goto cleanup;
ret = 0;
cleanup:
VIR_DISPOSE_N(raw_iv, ivlen);
VIR_DISPOSE_N(secret, secretlen);
VIR_DISPOSE_N(ciphertext, ciphertextlen);
return ret;
}
/* qemuDomainSecretSetup:
* @conn: Pointer to connection
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
* @priv: pointer to domain private object
* @secinfo: Pointer to secret info
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
* @srcalias: Alias of the disk/hostdev used to generate the secret alias
* @protocol: Protocol for secret
* @authdef: Pointer to auth data
*
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
* If we have the encryption API present and can support a secret object, then
* build the AES secret; otherwise, build the Plain secret. This is the magic
* decision point for utilizing the AES secrets for an RBD disk. For now iSCSI
* disks and hostdevs will not be able to utilize this mechanism.
*
* Returns 0 on success, -1 on failure
*/
static int
qemuDomainSecretSetup(virConnectPtr conn,
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
qemuDomainObjPrivatePtr priv,
qemuDomainSecretInfoPtr secinfo,
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
const char *srcalias,
virStorageNetProtocol protocol,
virStorageAuthDefPtr authdef)
{
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
if (virCryptoHaveCipher(VIR_CRYPTO_CIPHER_AES256CBC) &&
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_SECRET) &&
protocol == VIR_STORAGE_NET_PROTOCOL_RBD) {
if (qemuDomainSecretAESSetup(conn, priv, secinfo,
srcalias, protocol, authdef) < 0)
return -1;
} else {
if (qemuDomainSecretPlainSetup(conn, secinfo, protocol, authdef) < 0)
return -1;
}
return 0;
}
/* qemuDomainSecretDiskDestroy:
* @disk: Pointer to a disk definition
*
* Clear and destroy memory associated with the secret
*/
void
qemuDomainSecretDiskDestroy(virDomainDiskDefPtr disk)
{
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (!diskPriv || !diskPriv->secinfo)
return;
qemuDomainSecretInfoFree(&diskPriv->secinfo);
}
/* qemuDomainSecretDiskPrepare:
* @conn: Pointer to connection
* @priv: pointer to domain private object
* @disk: Pointer to a disk definition
*
* For the right disk, generate the qemuDomainSecretInfo structure.
*
* Returns 0 on success, -1 on failure
*/
int
qemuDomainSecretDiskPrepare(virConnectPtr conn,
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
qemuDomainObjPrivatePtr priv,
virDomainDiskDefPtr disk)
{
virStorageSourcePtr src = disk->src;
qemuDomainSecretInfoPtr secinfo = NULL;
if (conn && !virStorageSourceIsEmpty(src) &&
virStorageSourceGetActualType(src) == VIR_STORAGE_TYPE_NETWORK &&
src->auth &&
(src->protocol == VIR_STORAGE_NET_PROTOCOL_ISCSI ||
src->protocol == VIR_STORAGE_NET_PROTOCOL_RBD)) {
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (VIR_ALLOC(secinfo) < 0)
return -1;
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
if (qemuDomainSecretSetup(conn, priv, secinfo, disk->info.alias,
src->protocol, src->auth) < 0)
goto error;
diskPriv->secinfo = secinfo;
}
return 0;
error:
qemuDomainSecretInfoFree(&secinfo);
return -1;
}
/* qemuDomainSecretHostdevDestroy:
* @disk: Pointer to a hostdev definition
*
* Clear and destroy memory associated with the secret
*/
void
qemuDomainSecretHostdevDestroy(virDomainHostdevDefPtr hostdev)
{
qemuDomainHostdevPrivatePtr hostdevPriv =
QEMU_DOMAIN_HOSTDEV_PRIVATE(hostdev);
if (!hostdevPriv || !hostdevPriv->secinfo)
return;
qemuDomainSecretInfoFree(&hostdevPriv->secinfo);
}
/* qemuDomainSecretHostdevPrepare:
* @conn: Pointer to connection
* @priv: pointer to domain private object
* @hostdev: Pointer to a hostdev definition
*
* For the right host device, generate the qemuDomainSecretInfo structure.
*
* Returns 0 on success, -1 on failure
*/
int
qemuDomainSecretHostdevPrepare(virConnectPtr conn,
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
qemuDomainObjPrivatePtr priv,
virDomainHostdevDefPtr hostdev)
{
virDomainHostdevSubsysPtr subsys = &hostdev->source.subsys;
qemuDomainSecretInfoPtr secinfo = NULL;
if (conn && hostdev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS &&
subsys->type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI) {
virDomainHostdevSubsysSCSIPtr scsisrc = &hostdev->source.subsys.u.scsi;
virDomainHostdevSubsysSCSIiSCSIPtr iscsisrc = &scsisrc->u.iscsi;
if (scsisrc->protocol == VIR_DOMAIN_HOSTDEV_SCSI_PROTOCOL_TYPE_ISCSI &&
iscsisrc->auth) {
qemuDomainHostdevPrivatePtr hostdevPriv =
QEMU_DOMAIN_HOSTDEV_PRIVATE(hostdev);
if (VIR_ALLOC(secinfo) < 0)
return -1;
qemu: Utilize qemu secret objects for RBD auth/secret https://bugzilla.redhat.com/show_bug.cgi?id=1182074 If they're available and we need to pass secrets to qemu, then use the qemu domain secret object in order to pass the secrets for RBD volumes instead of passing the base64 encoded secret on the command line. The goal is to make AES secrets the default and have no user interaction required in order to allow using the AES mechanism. If the mechanism is not available, then fall back to the current plain mechanism using a base64 encoded secret. New APIs: qemu_domain.c: qemuDomainGetSecretAESAlias: Generate/return the secret object alias for an AES Secret Info type. This will be called from qemuDomainSecretAESSetup. qemuDomainSecretAESSetup: (private) This API handles the details of the generation of the AES secret and saves the pieces that need to be passed to qemu in order for the secret to be decrypted. The encrypted secret based upon the domain master key, an initialization vector (16 byte random value), and the stored secret. Finally, the requirement from qemu is the IV and encrypted secret are to be base64 encoded. qemu_command.c: qemuBuildSecretInfoProps: (private) Generate/return a JSON properties object for the AES secret to be used by both the command building and eventually the hotplug code in order to add the secret object. Code was designed so that in the future perhaps hotplug could use it if it made sense. qemuBuildObjectSecretCommandLine (private) Generate and add to the command line the -object secret for the secret. This will be required for the subsequent RBD reference to the object. qemuBuildDiskSecinfoCommandLine (private) Handle adding the AES secret object. Adjustments: qemu_domain.c: The qemuDomainSecretSetup was altered to call either the AES or Plain Setup functions based upon whether AES secrets are possible (we have the encryption API) or not, we have secrets, and of course if the protocol source is RBD. qemu_command.c: Adjust the qemuBuildRBDSecinfoURI API's in order to generate the specific command options for an AES secret, such as: -object secret,id=$alias,keyid=$masterKey,data=$base64encodedencrypted, format=base64 -drive file=rbd:pool/image:id=myname:auth_supported=cephx\;none:\ mon_host=mon1.example.org\:6321,password-secret=$alias,... where the 'id=' value is the secret object alias generated by concatenating the disk alias and "-aesKey0". The 'keyid= $masterKey' is the master key shared with qemu, and the -drive syntax will reference that alias as the 'password-secret'. For the -drive syntax, the 'id=myname' is kept to define the username, while the 'key=$base64 encoded secret' is removed. While according to the syntax described for qemu commit '60390a21' or as seen in the email archive: https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg04083.html it is possible to pass a plaintext password via a file, the qemu commit 'ac1d8878' describes the more feature rich 'keyid=' option based upon the shared masterKey. Add tests for checking/comparing output. NB: For hotplug, since the hotplug code doesn't add command line arguments, passing the encoded secret directly to the monitor will suffice.
2016-04-11 15:26:14 +00:00
if (qemuDomainSecretSetup(conn, priv, secinfo, hostdev->info->alias,
VIR_STORAGE_NET_PROTOCOL_ISCSI,
iscsisrc->auth) < 0)
goto error;
hostdevPriv->secinfo = secinfo;
}
}
return 0;
error:
qemuDomainSecretInfoFree(&secinfo);
return -1;
}
/* qemuDomainSecretDestroy:
* @vm: Domain object
*
* Once completed with the generation of the command line it is
* expect to remove the secrets
*/
void
qemuDomainSecretDestroy(virDomainObjPtr vm)
{
size_t i;
for (i = 0; i < vm->def->ndisks; i++)
qemuDomainSecretDiskDestroy(vm->def->disks[i]);
for (i = 0; i < vm->def->nhostdevs; i++)
qemuDomainSecretHostdevDestroy(vm->def->hostdevs[i]);
}
/* qemuDomainSecretPrepare:
* @conn: Pointer to connection
* @vm: Domain object
*
* For any objects that may require an auth/secret setup, create a
* qemuDomainSecretInfo and save it in the approriate place within
* the private structures. This will be used by command line build
* code in order to pass the secret along to qemu in order to provide
* the necessary authentication data.
*
* Returns 0 on success, -1 on failure with error message set
*/
int
qemuDomainSecretPrepare(virConnectPtr conn,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
for (i = 0; i < vm->def->ndisks; i++) {
if (qemuDomainSecretDiskPrepare(conn, priv, vm->def->disks[i]) < 0)
return -1;
}
for (i = 0; i < vm->def->nhostdevs; i++) {
if (qemuDomainSecretHostdevPrepare(conn, priv,
vm->def->hostdevs[i]) < 0)
return -1;
}
return 0;
}
/* This is the old way of setting up per-domain directories */
static int
qemuDomainSetPrivatePathsOld(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int ret = -1;
if (!priv->libDir &&
virAsprintf(&priv->libDir, "%s/domain-%s",
cfg->libDir, vm->def->name) < 0)
goto cleanup;
if (!priv->channelTargetDir &&
virAsprintf(&priv->channelTargetDir, "%s/domain-%s",
cfg->channelTargetDir, vm->def->name) < 0)
goto cleanup;
ret = 0;
cleanup:
virObjectUnref(cfg);
return ret;
}
int
qemuDomainSetPrivatePaths(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
qemuDomainObjPrivatePtr priv = vm->privateData;
char *domname = virDomainObjGetShortName(vm);
int ret = -1;
if (!domname)
goto cleanup;
if (!priv->libDir &&
virAsprintf(&priv->libDir, "%s/domain-%s", cfg->libDir, domname) < 0)
goto cleanup;
if (!priv->channelTargetDir &&
virAsprintf(&priv->channelTargetDir, "%s/domain-%s",
cfg->channelTargetDir, domname) < 0)
goto cleanup;
ret = 0;
cleanup:
virObjectUnref(cfg);
VIR_FREE(domname);
return ret;
}
void
qemuDomainClearPrivatePaths(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
VIR_FREE(priv->libDir);
VIR_FREE(priv->channelTargetDir);
}
static void *
qemuDomainObjPrivateAlloc(void)
{
qemuDomainObjPrivatePtr priv;
if (VIR_ALLOC(priv) < 0)
return NULL;
if (qemuDomainObjInitJob(priv) < 0) {
virReportSystemError(errno, "%s",
_("Unable to init qemu driver mutexes"));
goto error;
}
if (!(priv->devs = virChrdevAlloc()))
goto error;
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
return priv;
error:
VIR_FREE(priv);
return NULL;
}
static void
qemuDomainObjPrivateFree(void *data)
{
qemuDomainObjPrivatePtr priv = data;
virObjectUnref(priv->qemuCaps);
virCgroupFree(&priv->cgroup);
virDomainPCIAddressSetFree(priv->pciaddrs);
virDomainCCWAddressSetFree(priv->ccwaddrs);
virDomainVirtioSerialAddrSetFree(priv->vioserialaddrs);
virDomainChrSourceDefFree(priv->monConfig);
qemuDomainObjFreeJob(priv);
VIR_FREE(priv->vcpupids);
VIR_FREE(priv->lockState);
VIR_FREE(priv->origname);
virStringFreeList(priv->qemuDevices);
virChrdevFree(priv->devs);
/* This should never be non-NULL if we get here, but just in case... */
if (priv->mon) {
VIR_ERROR(_("Unexpected QEMU monitor still active during domain deletion"));
qemuMonitorClose(priv->mon);
}
if (priv->agent) {
VIR_ERROR(_("Unexpected QEMU agent still active during domain deletion"));
qemuAgentClose(priv->agent);
}
VIR_FREE(priv->cleanupCallbacks);
virBitmapFree(priv->autoNodeset);
virBitmapFree(priv->autoCpuset);
VIR_FREE(priv->libDir);
VIR_FREE(priv->channelTargetDir);
VIR_FREE(priv);
}
static int
qemuDomainObjPrivateXMLFormat(virBufferPtr buf,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
const char *monitorpath;
qemuDomainJob job;
/* priv->monitor_chr is set only for qemu */
if (priv->monConfig) {
switch (priv->monConfig->type) {
case VIR_DOMAIN_CHR_TYPE_UNIX:
monitorpath = priv->monConfig->data.nix.path;
break;
default:
case VIR_DOMAIN_CHR_TYPE_PTY:
monitorpath = priv->monConfig->data.file.path;
break;
}
virBufferEscapeString(buf, "<monitor path='%s'", monitorpath);
if (priv->monJSON)
virBufferAddLit(buf, " json='1'");
virBufferAsprintf(buf, " type='%s'/>\n",
virDomainChrTypeToString(priv->monConfig->type));
}
if (priv->nvcpupids) {
size_t i;
virBufferAddLit(buf, "<vcpus>\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < priv->nvcpupids; i++)
virBufferAsprintf(buf, "<vcpu pid='%d'/>\n", priv->vcpupids[i]);
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</vcpus>\n");
}
if (priv->qemuCaps) {
size_t i;
virBufferAddLit(buf, "<qemuCaps>\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < QEMU_CAPS_LAST; i++) {
if (virQEMUCapsGet(priv->qemuCaps, i)) {
virBufferAsprintf(buf, "<flag name='%s'/>\n",
virQEMUCapsTypeToString(i));
}
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</qemuCaps>\n");
}
if (priv->lockState)
virBufferAsprintf(buf, "<lockstate>%s</lockstate>\n", priv->lockState);
job = priv->job.active;
if (!qemuDomainTrackJob(job))
priv->job.active = QEMU_JOB_NONE;
if (priv->job.active || priv->job.asyncJob) {
virBufferAsprintf(buf, "<job type='%s' async='%s'",
qemuDomainJobTypeToString(priv->job.active),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.phase) {
virBufferAsprintf(buf, " phase='%s'",
qemuDomainAsyncJobPhaseToString(
priv->job.asyncJob, priv->job.phase));
}
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
virBufferAddLit(buf, "/>\n");
} else {
size_t i;
virDomainDiskDefPtr disk;
qemuDomainDiskPrivatePtr diskPriv;
virBufferAddLit(buf, ">\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < vm->def->ndisks; i++) {
disk = vm->def->disks[i];
diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
virBufferAsprintf(buf, "<disk dev='%s' migrating='%s'/>\n",
disk->dst,
diskPriv->migrating ? "yes" : "no");
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</job>\n");
}
}
priv->job.active = job;
if (priv->fakeReboot)
virBufferAddLit(buf, "<fakereboot/>\n");
if (priv->qemuDevices && *priv->qemuDevices) {
char **tmp = priv->qemuDevices;
virBufferAddLit(buf, "<devices>\n");
virBufferAdjustIndent(buf, 2);
while (*tmp) {
virBufferAsprintf(buf, "<device alias='%s'/>\n", *tmp);
tmp++;
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</devices>\n");
}
if (priv->autoNodeset) {
char *nodeset = virBitmapFormat(priv->autoNodeset);
if (!nodeset)
return -1;
virBufferAsprintf(buf, "<numad nodeset='%s'/>\n", nodeset);
VIR_FREE(nodeset);
}
/* Various per-domain paths */
virBufferEscapeString(buf, "<libDir path='%s'/>\n", priv->libDir);
virBufferEscapeString(buf, "<channelTargetDir path='%s'/>\n",
priv->channelTargetDir);
return 0;
}
static int
qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
virDomainObjPtr vm,
virDomainDefParserConfigPtr config)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virQEMUDriverPtr driver = config->priv;
char *monitorpath;
char *tmp = NULL;
int n;
size_t i;
xmlNodePtr *nodes = NULL;
virQEMUCapsPtr qemuCaps = NULL;
virCapsPtr caps = NULL;
if (VIR_ALLOC(priv->monConfig) < 0)
goto error;
if (!(monitorpath =
virXPathString("string(./monitor[1]/@path)", ctxt))) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("no monitor path"));
goto error;
}
tmp = virXPathString("string(./monitor[1]/@type)", ctxt);
if (tmp)
priv->monConfig->type = virDomainChrTypeFromString(tmp);
else
priv->monConfig->type = VIR_DOMAIN_CHR_TYPE_PTY;
VIR_FREE(tmp);
priv->monJSON = virXPathBoolean("count(./monitor[@json = '1']) > 0",
ctxt) > 0;
switch (priv->monConfig->type) {
case VIR_DOMAIN_CHR_TYPE_PTY:
priv->monConfig->data.file.path = monitorpath;
break;
case VIR_DOMAIN_CHR_TYPE_UNIX:
priv->monConfig->data.nix.path = monitorpath;
break;
default:
VIR_FREE(monitorpath);
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unsupported monitor type '%s'"),
virDomainChrTypeToString(priv->monConfig->type));
goto error;
}
n = virXPathNodeSet("./vcpus/vcpu", ctxt, &nodes);
if (n < 0)
goto error;
if (n) {
priv->nvcpupids = n;
if (VIR_REALLOC_N(priv->vcpupids, priv->nvcpupids) < 0)
goto error;
for (i = 0; i < n; i++) {
char *pidstr = virXMLPropString(nodes[i], "pid");
if (!pidstr)
goto error;
if (virStrToLong_i(pidstr, NULL, 10, &(priv->vcpupids[i])) < 0) {
VIR_FREE(pidstr);
goto error;
}
VIR_FREE(pidstr);
}
VIR_FREE(nodes);
}
if ((n = virXPathNodeSet("./qemuCaps/flag", ctxt, &nodes)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("failed to parse qemu capabilities flags"));
goto error;
}
if (n > 0) {
if (!(qemuCaps = virQEMUCapsNew()))
goto error;
for (i = 0; i < n; i++) {
char *str = virXMLPropString(nodes[i], "name");
if (str) {
int flag = virQEMUCapsTypeFromString(str);
if (flag < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown qemu capabilities flag %s"), str);
VIR_FREE(str);
goto error;
}
VIR_FREE(str);
virQEMUCapsSet(qemuCaps, flag);
}
}
priv->qemuCaps = qemuCaps;
qemuCaps = NULL;
}
VIR_FREE(nodes);
priv->lockState = virXPathString("string(./lockstate)", ctxt);
if ((tmp = virXPathString("string(./job[1]/@type)", ctxt))) {
int type;
if ((type = qemuDomainJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job type %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
priv->job.active = type;
}
if ((tmp = virXPathString("string(./job[1]/@async)", ctxt))) {
int async;
if ((async = qemuDomainAsyncJobTypeFromString(tmp)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown async job type %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
priv->job.asyncJob = async;
if ((tmp = virXPathString("string(./job[1]/@phase)", ctxt))) {
priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
if (priv->job.phase < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Unknown job phase %s"), tmp);
VIR_FREE(tmp);
goto error;
}
VIR_FREE(tmp);
}
}
if ((n = virXPathNodeSet("./job[1]/disk[@migrating='yes']",
ctxt, &nodes)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse list of disks marked for migration"));
goto error;
}
if (n > 0) {
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
VIR_WARN("Found disks marked for migration but we were not "
"migrating");
n = 0;
}
for (i = 0; i < n; i++) {
char *dst = virXMLPropString(nodes[i], "dev");
virDomainDiskDefPtr disk;
if (dst && (disk = virDomainDiskByName(vm->def, dst, false)))
QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating = true;
VIR_FREE(dst);
}
}
VIR_FREE(nodes);
priv->fakeReboot = virXPathBoolean("boolean(./fakereboot)", ctxt) == 1;
if ((n = virXPathNodeSet("./devices/device", ctxt, &nodes)) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse qemu device list"));
goto error;
}
if (n > 0) {
/* NULL-terminated list */
if (VIR_ALLOC_N(priv->qemuDevices, n + 1) < 0)
goto error;
for (i = 0; i < n; i++) {
priv->qemuDevices[i] = virXMLPropString(nodes[i], "alias");
if (!priv->qemuDevices[i]) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse qemu device list"));
goto error;
}
}
}
VIR_FREE(nodes);
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto error;
if ((tmp = virXPathString("string(./numad/@nodeset)", ctxt))) {
if (virBitmapParse(tmp, 0, &priv->autoNodeset,
caps->host.nnumaCell_max) < 0)
goto error;
if (!(priv->autoCpuset = virCapabilitiesGetCpusForNodemask(caps,
priv->autoNodeset)))
goto error;
}
virObjectUnref(caps);
VIR_FREE(tmp);
if ((tmp = virXPathString("string(./libDir/@path)", ctxt)))
priv->libDir = tmp;
if ((tmp = virXPathString("string(./channelTargetDir/@path)", ctxt)))
priv->channelTargetDir = tmp;
tmp = NULL;
if (qemuDomainSetPrivatePathsOld(driver, vm) < 0)
goto error;
return 0;
error:
virDomainChrSourceDefFree(priv->monConfig);
priv->monConfig = NULL;
VIR_FREE(nodes);
VIR_FREE(tmp);
virStringFreeList(priv->qemuDevices);
priv->qemuDevices = NULL;
virObjectUnref(qemuCaps);
virObjectUnref(caps);
return -1;
}
virDomainXMLPrivateDataCallbacks virQEMUDriverPrivateDataCallbacks = {
.alloc = qemuDomainObjPrivateAlloc,
.free = qemuDomainObjPrivateFree,
.diskNew = qemuDomainDiskPrivateNew,
.hostdevNew = qemuDomainHostdevPrivateNew,
.parse = qemuDomainObjPrivateXMLParse,
.format = qemuDomainObjPrivateXMLFormat,
};
static void
qemuDomainDefNamespaceFree(void *nsdata)
{
qemuDomainCmdlineDefPtr cmd = nsdata;
qemuDomainCmdlineDefFree(cmd);
}
static int
qemuDomainDefNamespaceParse(xmlDocPtr xml ATTRIBUTE_UNUSED,
xmlNodePtr root ATTRIBUTE_UNUSED,
xmlXPathContextPtr ctxt,
void **data)
{
qemuDomainCmdlineDefPtr cmd = NULL;
bool uses_qemu_ns = false;
xmlNodePtr *nodes = NULL;
int n;
size_t i;
if (xmlXPathRegisterNs(ctxt, BAD_CAST "qemu", BAD_CAST QEMU_NAMESPACE_HREF) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to register xml namespace '%s'"),
QEMU_NAMESPACE_HREF);
return -1;
}
if (VIR_ALLOC(cmd) < 0)
return -1;
/* first handle the extra command-line arguments */
n = virXPathNodeSet("./qemu:commandline/qemu:arg", ctxt, &nodes);
if (n < 0)
goto error;
uses_qemu_ns |= n > 0;
if (n && VIR_ALLOC_N(cmd->args, n) < 0)
goto error;
for (i = 0; i < n; i++) {
cmd->args[cmd->num_args] = virXMLPropString(nodes[i], "value");
if (cmd->args[cmd->num_args] == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("No qemu command-line argument specified"));
goto error;
}
cmd->num_args++;
}
VIR_FREE(nodes);
/* now handle the extra environment variables */
n = virXPathNodeSet("./qemu:commandline/qemu:env", ctxt, &nodes);
if (n < 0)
goto error;
uses_qemu_ns |= n > 0;
if (n && VIR_ALLOC_N(cmd->env_name, n) < 0)
goto error;
if (n && VIR_ALLOC_N(cmd->env_value, n) < 0)
goto error;
for (i = 0; i < n; i++) {
char *tmp;
tmp = virXMLPropString(nodes[i], "name");
if (tmp == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("No qemu environment name specified"));
goto error;
}
if (tmp[0] == '\0') {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Empty qemu environment name specified"));
goto error;
}
if (!c_isalpha(tmp[0]) && tmp[0] != '_') {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Invalid environment name, it must begin with a letter or underscore"));
goto error;
}
if (strspn(tmp, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_") != strlen(tmp)) {
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("Invalid environment name, it must contain only alphanumerics and underscore"));
goto error;
}
cmd->env_name[cmd->num_env] = tmp;
cmd->env_value[cmd->num_env] = virXMLPropString(nodes[i], "value");
/* a NULL value for command is allowed, since it might be empty */
cmd->num_env++;
}
VIR_FREE(nodes);
if (uses_qemu_ns)
*data = cmd;
else
VIR_FREE(cmd);
return 0;
error:
VIR_FREE(nodes);
qemuDomainDefNamespaceFree(cmd);
return -1;
}
static int
qemuDomainDefNamespaceFormatXML(virBufferPtr buf,
void *nsdata)
{
qemuDomainCmdlineDefPtr cmd = nsdata;
size_t i;
if (!cmd->num_args && !cmd->num_env)
return 0;
virBufferAddLit(buf, "<qemu:commandline>\n");
virBufferAdjustIndent(buf, 2);
for (i = 0; i < cmd->num_args; i++)
virBufferEscapeString(buf, "<qemu:arg value='%s'/>\n",
cmd->args[i]);
for (i = 0; i < cmd->num_env; i++) {
virBufferAsprintf(buf, "<qemu:env name='%s'", cmd->env_name[i]);
if (cmd->env_value[i])
virBufferEscapeString(buf, " value='%s'", cmd->env_value[i]);
virBufferAddLit(buf, "/>\n");
}
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "</qemu:commandline>\n");
return 0;
}
static const char *
qemuDomainDefNamespaceHref(void)
{
return "xmlns:qemu='" QEMU_NAMESPACE_HREF "'";
}
virDomainXMLNamespace virQEMUDriverDomainXMLNamespace = {
.parse = qemuDomainDefNamespaceParse,
.free = qemuDomainDefNamespaceFree,
.format = qemuDomainDefNamespaceFormatXML,
.href = qemuDomainDefNamespaceHref,
};
static int
qemuDomainDefAddImplicitInputDevice(virDomainDef *def)
{
if (ARCH_IS_X86(def->os.arch)) {
if (virDomainDefMaybeAddInput(def,
VIR_DOMAIN_INPUT_TYPE_MOUSE,
VIR_DOMAIN_INPUT_BUS_PS2) < 0)
return -1;
if (virDomainDefMaybeAddInput(def,
VIR_DOMAIN_INPUT_TYPE_KBD,
VIR_DOMAIN_INPUT_BUS_PS2) < 0)
return -1;
}
return 0;
}
static int
qemuDomainDefAddDefaultDevices(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps)
{
bool addDefaultUSB = true;
int usbModel = -1; /* "default for machinetype" */
int pciRoot; /* index within def->controllers */
qemu: fix handling of default/implicit devices for q35 This patch adds in special handling for a few devices that need to be treated differently for q35 domains: usb - there is no implicit/default usb controller for the q35 machinetype. This is done because normally the default usb controller is added to a domain by just adding "-usb" to the qemu commandline, and it's assumed that this will add a single piix3 usb1 controller at slot 1 function 2. That's not what happens when the machinetype is q35, though. Instead, adding -usb to the commandline adds 3 usb (version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather than having <controller type='usb' index='0'/> translate into 3 separate devices on the PCI bus, it's cleaner to not automatically add a default usb device; one can always be added explicitly if desired. Or we may decide that on q35 machines, 3 usb controllers will be automatically added when none is given. But for this initial commit, at least we aren't locking ourselves into something we later won't want. video - qemu always initializes the primary video device immediately after any integrated devices for the machinetype. Unless instructed otherwise (by using "-device vga..." instead of "-vga" which libvirt uses in many cases to work around deficiencies and bugs in various qemu versions) qemu will always pick the first unused slot. In the case of the "pc" machinetype and its derivatives, this is always slot 2, but on q35 machinetypes, the first free slot is slot 1 (since the q35's integrated peripheral devices are placed in other slots, e.g. slot 0x1f). In order to make the PCI address of the video device predictable, that slot (1 or 2, depending on machinetype) is reserved even when no video device has been specified. sata - a q35 machine always has a sata controller implicitly added at slot 0x1F, function 2. There is no way to avoid this controller, so we always add it. Note that the xml2xml tests for the pcie-root and q35 cases were changed to use DO_TEST_DIFFERENT() so that we can check for the sata controller being automatically added. This is especially important because we can't check for it in the xml2argv output (it has no effect on that output since it's an implicit device). ide - q35 has no ide controllers. isa and smbus controllers - these two are always present in a q35 (at slot 0x1F functions 0 and 3) but we have no way of modelling them in our config. We do need to reserve those functions so that the user doesn't attempt to put anything else there though. (note that the "pc" machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
bool addImplicitSATA = false;
bool addPCIRoot = false;
bool addPCIeRoot = false;
bool addDefaultMemballoon = true;
bool addDefaultUSBKBD = false;
bool addDefaultUSBMouse = false;
bool addPanicDevice = false;
int ret = -1;
/* add implicit input devices */
if (qemuDomainDefAddImplicitInputDevice(def) < 0)
goto cleanup;
/* Add implicit PCI root controller if the machine has one */
switch (def->os.arch) {
case VIR_ARCH_I686:
case VIR_ARCH_X86_64:
if (STREQ(def->os.machine, "isapc")) {
addDefaultUSB = false;
break;
}
if (qemuDomainMachineIsQ35(def)) {
addPCIeRoot = true;
addImplicitSATA = true;
/* add a USB2 controller set, but only if the
* ich9-usb-ehci1 device is supported
*/
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_ICH9_USB_EHCI1))
usbModel = VIR_DOMAIN_CONTROLLER_MODEL_USB_ICH9_EHCI1;
else
addDefaultUSB = false;
break;
}
if (qemuDomainMachineIsI440FX(def))
addPCIRoot = true;
break;
case VIR_ARCH_ARMV7L:
case VIR_ARCH_AARCH64:
addDefaultUSB = false;
addDefaultMemballoon = false;
if (qemuDomainMachineIsVirt(def))
addPCIeRoot = virQEMUCapsGet(qemuCaps, QEMU_CAPS_OBJECT_GPEX);
break;
case VIR_ARCH_PPC64:
case VIR_ARCH_PPC64LE:
addPCIRoot = true;
addDefaultUSBKBD = true;
addDefaultUSBMouse = true;
/* For pSeries guests, the firmware provides the same
* functionality as the pvpanic device, so automatically
* add the definition if not already present */
if (STRPREFIX(def->os.machine, "pseries"))
addPanicDevice = true;
break;
case VIR_ARCH_ALPHA:
case VIR_ARCH_PPC:
case VIR_ARCH_PPCEMB:
case VIR_ARCH_SH4:
case VIR_ARCH_SH4EB:
addPCIRoot = true;
break;
case VIR_ARCH_S390:
case VIR_ARCH_S390X:
addDefaultUSB = false;
addPanicDevice = true;
break;
case VIR_ARCH_SPARC:
case VIR_ARCH_SPARC64:
addPCIRoot = true;
break;
default:
break;
}
if (addDefaultUSB &&
virDomainControllerFind(def, VIR_DOMAIN_CONTROLLER_TYPE_USB, 0) < 0 &&
virDomainDefAddUSBController(def, 0, usbModel) < 0)
goto cleanup;
qemu: fix handling of default/implicit devices for q35 This patch adds in special handling for a few devices that need to be treated differently for q35 domains: usb - there is no implicit/default usb controller for the q35 machinetype. This is done because normally the default usb controller is added to a domain by just adding "-usb" to the qemu commandline, and it's assumed that this will add a single piix3 usb1 controller at slot 1 function 2. That's not what happens when the machinetype is q35, though. Instead, adding -usb to the commandline adds 3 usb (version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather than having <controller type='usb' index='0'/> translate into 3 separate devices on the PCI bus, it's cleaner to not automatically add a default usb device; one can always be added explicitly if desired. Or we may decide that on q35 machines, 3 usb controllers will be automatically added when none is given. But for this initial commit, at least we aren't locking ourselves into something we later won't want. video - qemu always initializes the primary video device immediately after any integrated devices for the machinetype. Unless instructed otherwise (by using "-device vga..." instead of "-vga" which libvirt uses in many cases to work around deficiencies and bugs in various qemu versions) qemu will always pick the first unused slot. In the case of the "pc" machinetype and its derivatives, this is always slot 2, but on q35 machinetypes, the first free slot is slot 1 (since the q35's integrated peripheral devices are placed in other slots, e.g. slot 0x1f). In order to make the PCI address of the video device predictable, that slot (1 or 2, depending on machinetype) is reserved even when no video device has been specified. sata - a q35 machine always has a sata controller implicitly added at slot 0x1F, function 2. There is no way to avoid this controller, so we always add it. Note that the xml2xml tests for the pcie-root and q35 cases were changed to use DO_TEST_DIFFERENT() so that we can check for the sata controller being automatically added. This is especially important because we can't check for it in the xml2argv output (it has no effect on that output since it's an implicit device). ide - q35 has no ide controllers. isa and smbus controllers - these two are always present in a q35 (at slot 0x1F functions 0 and 3) but we have no way of modelling them in our config. We do need to reserve those functions so that the user doesn't attempt to put anything else there though. (note that the "pc" machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
if (addImplicitSATA &&
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_SATA, 0, -1) < 0)
goto cleanup;
qemu: fix handling of default/implicit devices for q35 This patch adds in special handling for a few devices that need to be treated differently for q35 domains: usb - there is no implicit/default usb controller for the q35 machinetype. This is done because normally the default usb controller is added to a domain by just adding "-usb" to the qemu commandline, and it's assumed that this will add a single piix3 usb1 controller at slot 1 function 2. That's not what happens when the machinetype is q35, though. Instead, adding -usb to the commandline adds 3 usb (version 2) controllers to the domain at slot 0x1D.{1,2,7}. Rather than having <controller type='usb' index='0'/> translate into 3 separate devices on the PCI bus, it's cleaner to not automatically add a default usb device; one can always be added explicitly if desired. Or we may decide that on q35 machines, 3 usb controllers will be automatically added when none is given. But for this initial commit, at least we aren't locking ourselves into something we later won't want. video - qemu always initializes the primary video device immediately after any integrated devices for the machinetype. Unless instructed otherwise (by using "-device vga..." instead of "-vga" which libvirt uses in many cases to work around deficiencies and bugs in various qemu versions) qemu will always pick the first unused slot. In the case of the "pc" machinetype and its derivatives, this is always slot 2, but on q35 machinetypes, the first free slot is slot 1 (since the q35's integrated peripheral devices are placed in other slots, e.g. slot 0x1f). In order to make the PCI address of the video device predictable, that slot (1 or 2, depending on machinetype) is reserved even when no video device has been specified. sata - a q35 machine always has a sata controller implicitly added at slot 0x1F, function 2. There is no way to avoid this controller, so we always add it. Note that the xml2xml tests for the pcie-root and q35 cases were changed to use DO_TEST_DIFFERENT() so that we can check for the sata controller being automatically added. This is especially important because we can't check for it in the xml2argv output (it has no effect on that output since it's an implicit device). ide - q35 has no ide controllers. isa and smbus controllers - these two are always present in a q35 (at slot 0x1F functions 0 and 3) but we have no way of modelling them in our config. We do need to reserve those functions so that the user doesn't attempt to put anything else there though. (note that the "pc" machine type also has an ISA controller, which we also ignore).
2013-08-02 08:55:55 +00:00
pciRoot = virDomainControllerFind(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0);
/* NB: any machine that sets addPCIRoot to true must also return
* true from the function qemuDomainSupportsPCI().
*/
if (addPCIRoot) {
if (pciRoot >= 0) {
if (def->controllers[pciRoot]->model != VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT) {
virReportError(VIR_ERR_XML_ERROR,
_("The PCI controller with index='0' must be "
"model='pci-root' for this machine type, "
"but model='%s' was found instead"),
virDomainControllerModelPCITypeToString(def->controllers[pciRoot]->model));
goto cleanup;
}
} else if (!virDomainDefAddController(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0,
VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT)) {
goto cleanup;
}
}
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
/* When a machine has a pcie-root, make sure that there is always
* a dmi-to-pci-bridge controller added as bus 1, and a pci-bridge
* as bus 2, so that standard PCI devices can be connected
*
* NB: any machine that sets addPCIeRoot to true must also return
* true from the function qemuDomainSupportsPCI().
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
*/
if (addPCIeRoot) {
if (pciRoot >= 0) {
if (def->controllers[pciRoot]->model != VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT) {
virReportError(VIR_ERR_XML_ERROR,
_("The PCI controller with index='0' must be "
"model='pcie-root' for this machine type, "
"but model='%s' was found instead"),
virDomainControllerModelPCITypeToString(def->controllers[pciRoot]->model));
goto cleanup;
}
} else if (!virDomainDefAddController(def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 0,
VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT)) {
goto cleanup;
}
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
if (virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 1,
VIR_DOMAIN_CONTROLLER_MODEL_DMI_TO_PCI_BRIDGE) < 0 ||
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
virDomainDefMaybeAddController(
def, VIR_DOMAIN_CONTROLLER_TYPE_PCI, 2,
VIR_DOMAIN_CONTROLLER_MODEL_PCI_BRIDGE) < 0) {
goto cleanup;
qemu: add dmi-to-pci-bridge controller This PCI controller, named "dmi-to-pci-bridge" in the libvirt config, and implemented with qemu's "i82801b11-bridge" device, connects to a PCI Express slot (e.g. one of the slots provided by the pcie-root controller, aka "pcie.0" on the qemu commandline), and provides 31 *non-hot-pluggable* PCI (*not* PCIe) slots, numbered 1-31. Any time a machine is defined which has a pcie-root controller (i.e. any q35-based machinetype), libvirt will automatically add a dmi-to-pci-bridge controller if one doesn't exist, and also add a pci-bridge controller. The reasoning here is that any useful domain will have either an immediate (startup time) or eventual (subsequent hot-plug) need for a standard PCI slot; since the pcie-root controller only provides PCIe slots, we need to connect a dmi-to-pci-bridge controller to it in order to get a non-hot-plug PCI slot that we can then use to connect a pci-bridge - the slots provided by the pci-bridge will be both standard PCI and hot-pluggable. Since pci-bridge devices themselves can not be hot-plugged into a running system (although you can hot-plug other devices into a pci-bridge's slots), any new pci-bridge controller that is added can (and will) be plugged into the dmi-to-pci-bridge as long as it has empty slots available. This patch is also changing the qemuxml2xml-pcie test from a "DO_TEST" to a "DO_DIFFERENT_TEST". This is so that the "before" xml can omit the automatically added dmi-to-pci-bridge and pci-bridge devices, and the "after" xml can include it - this way we are testing if libvirt is properly adding these devices.
2013-07-31 01:37:32 +00:00
}
}
if (addDefaultMemballoon && !def->memballoon) {
virDomainMemballoonDefPtr memballoon;
if (VIR_ALLOC(memballoon) < 0)
goto cleanup;
memballoon->model = VIR_DOMAIN_MEMBALLOON_MODEL_VIRTIO;
def->memballoon = memballoon;
}
if (addDefaultUSBKBD &&
def->ngraphics > 0 &&
virDomainDefMaybeAddInput(def,
VIR_DOMAIN_INPUT_TYPE_KBD,
VIR_DOMAIN_INPUT_BUS_USB) < 0)
goto cleanup;
if (addDefaultUSBMouse &&
def->ngraphics > 0 &&
virDomainDefMaybeAddInput(def,
VIR_DOMAIN_INPUT_TYPE_MOUSE,
VIR_DOMAIN_INPUT_BUS_USB) < 0)
goto cleanup;
if (addPanicDevice) {
size_t j;
for (j = 0; j < def->npanics; j++) {
if (def->panics[j]->model == VIR_DOMAIN_PANIC_MODEL_DEFAULT ||
(ARCH_IS_PPC64(def->os.arch) &&
def->panics[j]->model == VIR_DOMAIN_PANIC_MODEL_PSERIES) ||
(ARCH_IS_S390(def->os.arch) &&
def->panics[j]->model == VIR_DOMAIN_PANIC_MODEL_S390))
break;
}
if (j == def->npanics) {
virDomainPanicDefPtr panic;
if (VIR_ALLOC(panic) < 0 ||
VIR_APPEND_ELEMENT_COPY(def->panics,
def->npanics, panic) < 0) {
VIR_FREE(panic);
goto cleanup;
}
}
}
ret = 0;
cleanup:
return ret;
}
/**
* qemuDomainDefEnableDefaultFeatures:
* @def: domain definition
* @qemuCaps: QEMU capabilities
*
* Make sure that features that should be enabled by default are actually
* enabled and configure default values related to those features.
*/
static void
qemuDomainDefEnableDefaultFeatures(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps)
{
virGICVersion version;
/* The virt machine type always uses GIC: if the relevant element
* was not included in the domain XML, we need to choose a suitable
* GIC version ourselves */
if (def->features[VIR_DOMAIN_FEATURE_GIC] == VIR_TRISTATE_SWITCH_ABSENT &&
(def->os.arch == VIR_ARCH_ARMV7L || def->os.arch == VIR_ARCH_AARCH64) &&
qemuDomainMachineIsVirt(def)) {
VIR_DEBUG("Looking for usable GIC version in domain capabilities");
for (version = VIR_GIC_VERSION_LAST - 1;
version > VIR_GIC_VERSION_NONE;
version--) {
if (virQEMUCapsSupportsGICVersion(qemuCaps,
def->virtType,
version)) {
VIR_DEBUG("Using GIC version %s",
virGICVersionTypeToString(version));
def->gic_version = version;
break;
}
}
/* Even if we haven't found a usable GIC version in the domain
* capabilities, we still want to enable this */
def->features[VIR_DOMAIN_FEATURE_GIC] = VIR_TRISTATE_SWITCH_ON;
}
/* Use the default GIC version if no version was specified */
if (def->features[VIR_DOMAIN_FEATURE_GIC] == VIR_TRISTATE_SWITCH_ON &&
def->gic_version == VIR_GIC_VERSION_NONE)
def->gic_version = VIR_GIC_VERSION_DEFAULT;
}
static int
qemuCanonicalizeMachine(virDomainDefPtr def, virQEMUCapsPtr qemuCaps)
{
const char *canon;
if (!(canon = virQEMUCapsGetCanonicalMachine(qemuCaps, def->os.machine)))
return 0;
if (STRNEQ(canon, def->os.machine)) {
char *tmp;
if (VIR_STRDUP(tmp, canon) < 0)
return -1;
VIR_FREE(def->os.machine);
def->os.machine = tmp;
}
return 0;
}
static int
qemuDomainRecheckInternalPaths(virDomainDefPtr def,
virQEMUDriverConfigPtr cfg,
unsigned int flags)
{
size_t i = 0;
size_t j = 0;
for (i = 0; i < def->ngraphics; ++i) {
virDomainGraphicsDefPtr graphics = def->graphics[i];
for (j = 0; j < graphics->nListens; ++j) {
virDomainGraphicsListenDefPtr glisten = &graphics->listens[j];
/* This will happen only if we parse XML from old libvirts where
* unix socket was available only for VNC graphics. In this
* particular case we should follow the behavior and if we remove
* the auto-generated socket base on config option from qemu.conf
* we need to change the listen type to address. */
if (graphics->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC &&
glisten->type == VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_SOCKET &&
glisten->socket &&
STRPREFIX(glisten->socket, cfg->libDir)) {
if (flags & VIR_DOMAIN_DEF_PARSE_INACTIVE) {
VIR_FREE(glisten->socket);
glisten->type = VIR_DOMAIN_GRAPHICS_LISTEN_TYPE_ADDRESS;
} else {
glisten->fromConfig = true;
}
}
}
}
return 0;
}
static int
qemuDomainDefPostParse(virDomainDefPtr def,
virCapsPtr caps,
unsigned int parseFlags,
void *opaque)
{
virQEMUDriverPtr driver = opaque;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virQEMUCapsPtr qemuCaps = NULL;
int ret = -1;
if (def->os.bootloader || def->os.bootloaderArgs) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("bootloader is not supported by QEMU"));
return ret;
}
if (!def->os.machine) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("missing machine type"));
return ret;
}
if (def->os.loader &&
def->os.loader->type == VIR_DOMAIN_LOADER_TYPE_PFLASH &&
def->os.loader->readonly == VIR_TRISTATE_SWITCH_ON &&
!def->os.loader->nvram) {
if (virAsprintf(&def->os.loader->nvram, "%s/%s_VARS.fd",
cfg->nvramDir, def->name) < 0)
goto cleanup;
}
/* check for emulator and create a default one if needed */
if (!def->emulator &&
!(def->emulator = virDomainDefGetDefaultEmulator(def, caps)))
return ret;
if (!(qemuCaps = virQEMUCapsCacheLookup(driver->qemuCapsCache,
def->emulator)))
goto cleanup;
if (qemuDomainDefAddDefaultDevices(def, qemuCaps) < 0)
goto cleanup;
if (qemuCanonicalizeMachine(def, qemuCaps) < 0)
goto cleanup;
qemuDomainDefEnableDefaultFeatures(def, qemuCaps);
if (qemuDomainRecheckInternalPaths(def, cfg, parseFlags) < 0)
goto cleanup;
if (virSecurityManagerVerify(driver->securityManager, def) < 0)
goto cleanup;
ret = 0;
cleanup:
virObjectUnref(qemuCaps);
virObjectUnref(cfg);
return ret;
}
static int
qemuDomainDefValidate(const virDomainDef *def,
virCapsPtr caps ATTRIBUTE_UNUSED,
void *opaque ATTRIBUTE_UNUSED)
{
if (def->mem.min_guarantee) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Parameter 'min_guarantee' not supported by QEMU."));
return -1;
}
return 0;
}
static const char *
qemuDomainDefaultNetModel(const virDomainDef *def,
virQEMUCapsPtr qemuCaps)
maint: avoid 'const fooPtr' in domain_conf 'const fooPtr' is the same as 'foo * const' (the pointer won't change, but it's contents can). But in general, if an interface is trying to be const-correct, it should be using 'const foo *' (the pointer is to data that can't be changed). Fix up offenders in src/conf/domain_conf, and their fallout. Several things to note: virObjectLock() requires a non-const argument; if this were C++, we could treat the locking field as 'mutable' and allow locking an otherwise 'const' object, but that is a more invasive change, so I instead dropped attempts to be const-correct on domain lookup. virXMLPropString and friends require a non-const xmlNodePtr - this is because libxml2 is not a const-correct library. We could make the src/util/virxml wrappers cast away const, but I figured it was easier to not try to mark xmlNodePtr as const. Finally, virDomainDeviceDefCopy was a rather hard conversion - it calls virDomainDeviceDefPostParse, which in turn in the xen driver was actually modifying the domain outside of the current device being visited. We should not be adding a device on the first per-device callback, but waiting until after all per-device callbacks are complete. * src/conf/domain_conf.h (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainObjAssignDef, virDomainObjListAdd): Drop attempt at const. (virDomainDeviceDefCopy): Use intended type. (virDomainDeviceDefParse, virDomainDeviceDefPostParseCallback) (virDomainVideoDefaultType, virDomainVideoDefaultRAM) (virDomainChrGetDomainPtrs): Make const-correct. * src/conf/domain_conf.c (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainDeviceDefCopy, virDomainObjListAdd) (virDomainObjAssignDef, virDomainHostdevSubsysUsbDefParseXML) (virDomainHostdevSubsysPciOrigStatesDefParseXML) (virDomainHostdevSubsysPciDefParseXML) (virDomainHostdevSubsysScsiDefParseXML) (virDomainControllerModelTypeFromString) (virDomainTPMDefParseXML, virDomainTimerDefParseXML) (virDomainSoundCodecDefParseXML, virDomainSoundDefParseXML) (virDomainWatchdogDefParseXML, virDomainRNGDefParseXML) (virDomainMemballoonDefParseXML, virDomainNVRAMDefParseXML) (virSysinfoParseXML, virDomainVideoAccelDefParseXML) (virDomainVideoDefParseXML, virDomainHostdevDefParseXML) (virDomainRedirdevDefParseXML) (virDomainRedirFilterUsbDevDefParseXML) (virDomainRedirFilterDefParseXML, virDomainIdMapEntrySort) (virDomainIdmapDefParseXML, virDomainVcpuPinDefParseXML) (virDiskNameToBusDeviceIndex, virDomainDeviceDefCopy) (virDomainVideoDefaultType, virDomainHostdevAssignAddress) (virDomainDeviceDefPostParseInternal, virDomainDeviceDefPostParse) (virDomainChrGetDomainPtrs, virDomainControllerSCSINextUnit) (virDomainSCSIDriveAddressIsUsed) (virDomainDriveAddressIsUsedByDisk) (virDomainDriveAddressIsUsedByHostdev): Fix fallout. * src/openvz/openvz_driver.c (openvzDomainDeviceDefPostParse): Likewise. * src/libxl/libxl_domain.c (libxlDomainDeviceDefPostParse): Likewise. * src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse) (qemuDomainDefaultNetModel): Likewise. * src/lxc/lxc_domain.c (virLXCDomainDeviceDefPostParse): Likewise. * src/uml/uml_driver.c (umlDomainDeviceDefPostParse): Likewise. * src/xen/xen_driver.c (xenDomainDeviceDefPostParse): Split... (xenDomainDefPostParse): ...since per-device callback is not the time to be adding a device. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-10-08 15:08:25 +00:00
{
if (ARCH_IS_S390(def->os.arch))
return "virtio";
if (def->os.arch == VIR_ARCH_ARMV7L ||
def->os.arch == VIR_ARCH_AARCH64) {
if (STREQ(def->os.machine, "versatilepb"))
return "smc91c111";
if (qemuDomainMachineIsVirt(def))
return "virtio";
/* Incomplete. vexpress (and a few others) use this, but not all
* arm boards */
return "lan9118";
}
/* Try several network devices in turn; each of these devices is
* less likely be supported out-of-the-box by the guest operating
* system than the previous one */
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_RTL8139))
return "rtl8139";
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_E1000))
return "e1000";
else if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VIRTIO_NET))
return "virtio";
/* We've had no luck detecting support for any network device,
* but we have to return something: might as well be rtl8139 */
return "rtl8139";
}
static int
qemuDomainDeviceDefPostParse(virDomainDeviceDefPtr dev,
maint: avoid 'const fooPtr' in domain_conf 'const fooPtr' is the same as 'foo * const' (the pointer won't change, but it's contents can). But in general, if an interface is trying to be const-correct, it should be using 'const foo *' (the pointer is to data that can't be changed). Fix up offenders in src/conf/domain_conf, and their fallout. Several things to note: virObjectLock() requires a non-const argument; if this were C++, we could treat the locking field as 'mutable' and allow locking an otherwise 'const' object, but that is a more invasive change, so I instead dropped attempts to be const-correct on domain lookup. virXMLPropString and friends require a non-const xmlNodePtr - this is because libxml2 is not a const-correct library. We could make the src/util/virxml wrappers cast away const, but I figured it was easier to not try to mark xmlNodePtr as const. Finally, virDomainDeviceDefCopy was a rather hard conversion - it calls virDomainDeviceDefPostParse, which in turn in the xen driver was actually modifying the domain outside of the current device being visited. We should not be adding a device on the first per-device callback, but waiting until after all per-device callbacks are complete. * src/conf/domain_conf.h (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainObjAssignDef, virDomainObjListAdd): Drop attempt at const. (virDomainDeviceDefCopy): Use intended type. (virDomainDeviceDefParse, virDomainDeviceDefPostParseCallback) (virDomainVideoDefaultType, virDomainVideoDefaultRAM) (virDomainChrGetDomainPtrs): Make const-correct. * src/conf/domain_conf.c (virDomainObjListFindByID) (virDomainObjListFindByUUID, virDomainObjListFindByName) (virDomainDeviceDefCopy, virDomainObjListAdd) (virDomainObjAssignDef, virDomainHostdevSubsysUsbDefParseXML) (virDomainHostdevSubsysPciOrigStatesDefParseXML) (virDomainHostdevSubsysPciDefParseXML) (virDomainHostdevSubsysScsiDefParseXML) (virDomainControllerModelTypeFromString) (virDomainTPMDefParseXML, virDomainTimerDefParseXML) (virDomainSoundCodecDefParseXML, virDomainSoundDefParseXML) (virDomainWatchdogDefParseXML, virDomainRNGDefParseXML) (virDomainMemballoonDefParseXML, virDomainNVRAMDefParseXML) (virSysinfoParseXML, virDomainVideoAccelDefParseXML) (virDomainVideoDefParseXML, virDomainHostdevDefParseXML) (virDomainRedirdevDefParseXML) (virDomainRedirFilterUsbDevDefParseXML) (virDomainRedirFilterDefParseXML, virDomainIdMapEntrySort) (virDomainIdmapDefParseXML, virDomainVcpuPinDefParseXML) (virDiskNameToBusDeviceIndex, virDomainDeviceDefCopy) (virDomainVideoDefaultType, virDomainHostdevAssignAddress) (virDomainDeviceDefPostParseInternal, virDomainDeviceDefPostParse) (virDomainChrGetDomainPtrs, virDomainControllerSCSINextUnit) (virDomainSCSIDriveAddressIsUsed) (virDomainDriveAddressIsUsedByDisk) (virDomainDriveAddressIsUsedByHostdev): Fix fallout. * src/openvz/openvz_driver.c (openvzDomainDeviceDefPostParse): Likewise. * src/libxl/libxl_domain.c (libxlDomainDeviceDefPostParse): Likewise. * src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse) (qemuDomainDefaultNetModel): Likewise. * src/lxc/lxc_domain.c (virLXCDomainDeviceDefPostParse): Likewise. * src/uml/uml_driver.c (umlDomainDeviceDefPostParse): Likewise. * src/xen/xen_driver.c (xenDomainDeviceDefPostParse): Split... (xenDomainDefPostParse): ...since per-device callback is not the time to be adding a device. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-10-08 15:08:25 +00:00
const virDomainDef *def,
virCapsPtr caps ATTRIBUTE_UNUSED,
unsigned int parseFlags,
void *opaque)
{
virQEMUDriverPtr driver = opaque;
virQEMUCapsPtr qemuCaps = NULL;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int ret = -1;
qemuCaps = virQEMUCapsCacheLookup(driver->qemuCapsCache, def->emulator);
if (dev->type == VIR_DOMAIN_DEVICE_NET &&
dev->data.net->type != VIR_DOMAIN_NET_TYPE_HOSTDEV &&
!dev->data.net->model) {
if (VIR_STRDUP(dev->data.net->model,
qemuDomainDefaultNetModel(def, qemuCaps)) < 0)
goto cleanup;
}
/* set default disk types and drivers */
if (dev->type == VIR_DOMAIN_DEVICE_DISK) {
virDomainDiskDefPtr disk = dev->data.disk;
/* assign default storage format and driver according to config */
if (cfg->allowDiskFormatProbing) {
/* default disk format for drives */
if (virDomainDiskGetFormat(disk) == VIR_STORAGE_FILE_NONE &&
(virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE ||
virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_BLOCK))
virDomainDiskSetFormat(disk, VIR_STORAGE_FILE_AUTO);
/* default disk format for mirrored drive */
if (disk->mirror &&
disk->mirror->format == VIR_STORAGE_FILE_NONE)
disk->mirror->format = VIR_STORAGE_FILE_AUTO;
} else {
/* default driver if probing is forbidden */
if (!virDomainDiskGetDriver(disk) &&
virDomainDiskSetDriver(disk, "qemu") < 0)
goto cleanup;
/* default disk format for drives */
if (virDomainDiskGetFormat(disk) == VIR_STORAGE_FILE_NONE &&
(virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE ||
virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_BLOCK))
virDomainDiskSetFormat(disk, VIR_STORAGE_FILE_RAW);
/* default disk format for mirrored drive */
if (disk->mirror &&
disk->mirror->format == VIR_STORAGE_FILE_NONE)
disk->mirror->format = VIR_STORAGE_FILE_RAW;
}
}
/* set the default console type for S390 arches */
if (dev->type == VIR_DOMAIN_DEVICE_CHR &&
dev->data.chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CONSOLE &&
dev->data.chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_NONE &&
ARCH_IS_S390(def->os.arch))
dev->data.chr->targetType = VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_VIRTIO;
/* set the default USB model to none for s390 unless an address is found */
if (dev->type == VIR_DOMAIN_DEVICE_CONTROLLER &&
dev->data.controller->type == VIR_DOMAIN_CONTROLLER_TYPE_USB &&
dev->data.controller->model == -1 &&
dev->data.controller->info.type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE &&
ARCH_IS_S390(def->os.arch))
dev->data.controller->model = VIR_DOMAIN_CONTROLLER_MODEL_USB_NONE;
/* set the default SCSI controller model for S390 arches */
if (dev->type == VIR_DOMAIN_DEVICE_CONTROLLER &&
dev->data.controller->type == VIR_DOMAIN_CONTROLLER_TYPE_SCSI &&
dev->data.controller->model == -1 &&
ARCH_IS_S390(def->os.arch))
dev->data.controller->model = VIR_DOMAIN_CONTROLLER_MODEL_SCSI_VIRTIO_SCSI;
/* clear auto generated unix socket path for inactive definitions */
if ((parseFlags & VIR_DOMAIN_DEF_PARSE_INACTIVE) &&
dev->type == VIR_DOMAIN_DEVICE_CHR &&
dev->data.chr->deviceType == VIR_DOMAIN_CHR_DEVICE_TYPE_CHANNEL &&
dev->data.chr->targetType == VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO &&
dev->data.chr->source.type == VIR_DOMAIN_CHR_TYPE_UNIX &&
dev->data.chr->source.data.nix.path &&
STRPREFIX(dev->data.chr->source.data.nix.path, cfg->channelTargetDir)) {
/*
* If the address is generated by us (starts with our
* channel dir), we should not keep it in the persistent
* XML. If libvirt is the one who generated it, users
* shouldn't care about that. If they do, they are
* supposed to set it themselves.
*/
VIR_FREE(dev->data.chr->source.data.nix.path);
}
/* forbid capabilities mode hostdev in this kind of hypervisor */
if (dev->type == VIR_DOMAIN_DEVICE_HOSTDEV &&
dev->data.hostdev->mode == VIR_DOMAIN_HOSTDEV_MODE_CAPABILITIES) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("hostdev mode 'capabilities' is not "
"supported in %s"),
virDomainVirtTypeToString(def->virtType));
goto cleanup;
}
if (dev->type == VIR_DOMAIN_DEVICE_VIDEO &&
dev->data.video->type == VIR_DOMAIN_VIDEO_TYPE_QXL) {
if (dev->data.video->vgamem) {
if (dev->data.video->vgamem < 1024) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("value for 'vgamem' must be at least 1 MiB "
"(1024 KiB)"));
goto cleanup;
}
if (dev->data.video->vgamem != VIR_ROUND_UP_POWER_OF_TWO(dev->data.video->vgamem)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("value for 'vgamem' must be power of two"));
goto cleanup;
}
} else {
dev->data.video->vgamem = QEMU_QXL_VGAMEM_DEFAULT;
}
}
if (dev->type == VIR_DOMAIN_DEVICE_PANIC &&
dev->data.panic->model == VIR_DOMAIN_PANIC_MODEL_DEFAULT) {
if (ARCH_IS_PPC64(def->os.arch) &&
STRPREFIX(def->os.machine, "pseries"))
dev->data.panic->model = VIR_DOMAIN_PANIC_MODEL_PSERIES;
else if (ARCH_IS_S390(def->os.arch))
dev->data.panic->model = VIR_DOMAIN_PANIC_MODEL_S390;
else
dev->data.panic->model = VIR_DOMAIN_PANIC_MODEL_ISA;
}
if (dev->type == VIR_DOMAIN_DEVICE_CONTROLLER) {
virDomainControllerDefPtr cont = dev->data.controller;
if (cont->type == VIR_DOMAIN_CONTROLLER_TYPE_PCI) {
if (cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_EXPANDER_BUS &&
!qemuDomainMachineIsI440FX(def)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("pci-expander-bus controllers are only supported "
"on 440fx-based machinetypes"));
goto cleanup;
}
if (cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCIE_EXPANDER_BUS &&
!qemuDomainMachineIsQ35(def)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("pcie-expander-bus controllers are only supported "
"on q35-based machinetypes"));
goto cleanup;
}
/* if a PCI expander bus has a NUMA node set, make sure
* that NUMA node is configured in the guest <cpu><numa>
* array. NUMA cell id's in this array are numbered
* from 0 .. size-1.
*/
if ((cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_EXPANDER_BUS ||
cont->model == VIR_DOMAIN_CONTROLLER_MODEL_PCIE_EXPANDER_BUS) &&
(int) virDomainNumaGetNodeCount(def->numa)
<= cont->opts.pciopts.numaNode) {
virReportError(VIR_ERR_XML_ERROR,
_("%s with index %d is "
"configured for a NUMA node (%d) "
"not present in the domain's "
"<cpu><numa> array (%zu)"),
virDomainControllerModelPCITypeToString(cont->model),
cont->idx, cont->opts.pciopts.numaNode,
virDomainNumaGetNodeCount(def->numa));
goto cleanup;
}
}
}
ret = 0;
cleanup:
virObjectUnref(qemuCaps);
virObjectUnref(cfg);
return ret;
}
static int
qemuDomainDefAssignAddresses(virDomainDef *def,
virCapsPtr caps ATTRIBUTE_UNUSED,
unsigned int parseFlags ATTRIBUTE_UNUSED,
void *opaque)
{
virQEMUDriverPtr driver = opaque;
virQEMUCapsPtr qemuCaps = NULL;
int ret = -1;
if (!(qemuCaps = virQEMUCapsCacheLookup(driver->qemuCapsCache,
def->emulator)))
goto cleanup;
if (qemuDomainAssignAddresses(def, qemuCaps, NULL) < 0)
goto cleanup;
ret = 0;
cleanup:
virObjectUnref(qemuCaps);
return ret;
}
virDomainDefParserConfig virQEMUDriverDomainDefParserConfig = {
.devicesPostParseCallback = qemuDomainDeviceDefPostParse,
.domainPostParseCallback = qemuDomainDefPostParse,
.assignAddressesCallback = qemuDomainDefAssignAddresses,
.domainValidateCallback = qemuDomainDefValidate,
.features = VIR_DOMAIN_DEF_FEATURE_MEMORY_HOTPLUG |
VIR_DOMAIN_DEF_FEATURE_OFFLINE_VCPUPIN
};
static void
qemuDomainObjSaveJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
if (virDomainObjIsActive(obj)) {
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, obj, driver->caps) < 0)
VIR_WARN("Failed to save status on vm %s", obj->def->name);
}
virObjectUnref(cfg);
}
void
qemuDomainObjSetJobPhase(virQEMUDriverPtr driver,
virDomainObjPtr obj,
int phase)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
unsigned long long me = virThreadSelfID();
if (!priv->job.asyncJob)
return;
VIR_DEBUG("Setting '%s' phase to '%s'",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
if (priv->job.asyncOwner && me != priv->job.asyncOwner) {
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_WARN("'%s' async job is owned by thread %llu",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
priv->job.phase = phase;
priv->job.asyncOwner = me;
qemuDomainObjSaveJob(driver, obj);
}
void
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
qemuDomainObjSetAsyncJobMask(virDomainObjPtr obj,
unsigned long long allowedJobs)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (!priv->job.asyncJob)
return;
priv->job.mask = allowedJobs | JOB_MASK(QEMU_JOB_DESTROY);
}
void
qemuDomainObjDiscardAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
void
qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
VIR_DEBUG("Releasing ownership of '%s' async job",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
if (priv->job.asyncOwner != virThreadSelfID()) {
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_WARN("'%s' async job is owned by thread %llu",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.asyncOwner);
}
priv->job.asyncOwner = 0;
}
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
static bool
qemuDomainNestedJobAllowed(qemuDomainObjPrivatePtr priv, qemuDomainJob job)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
return !priv->job.asyncJob || (priv->job.mask & JOB_MASK(job)) != 0;
}
bool
qemuDomainJobAllowed(qemuDomainObjPrivatePtr priv, qemuDomainJob job)
{
return !priv->job.active && qemuDomainNestedJobAllowed(priv, job);
}
/* Give up waiting for mutex after 30 seconds */
#define QEMU_JOB_WAIT_TIME (1000ull * 30)
/*
* obj must be locked before calling
*/
static int ATTRIBUTE_NONNULL(1)
qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virDomainObjPtr obj,
qemuDomainJob job,
qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
2011-06-01 10:35:18 +00:00
unsigned long long now;
unsigned long long then;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
bool nested = job == QEMU_JOB_ASYNC_NESTED;
bool async = job == QEMU_JOB_ASYNC;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
const char *blocker = NULL;
int ret = -1;
unsigned long long duration = 0;
unsigned long long asyncDuration = 0;
const char *jobStr;
if (async)
jobStr = qemuDomainAsyncJobTypeToString(asyncJob);
else
jobStr = qemuDomainJobTypeToString(job);
VIR_DEBUG("Starting %s: %s (vm=%p name=%s, current job=%s async=%s)",
async ? "async job" : "job", jobStr, obj, obj->def->name,
qemuDomainJobTypeToString(priv->job.active),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
if (virTimeMillisNow(&now) < 0) {
virObjectUnref(cfg);
return -1;
}
priv->jobs_queued++;
2011-06-01 10:35:18 +00:00
then = now + QEMU_JOB_WAIT_TIME;
retry:
if (cfg->maxQueuedJobs &&
priv->jobs_queued > cfg->maxQueuedJobs) {
goto error;
}
while (!nested && !qemuDomainNestedJobAllowed(priv, job)) {
VIR_DEBUG("Waiting for async job (vm=%p name=%s)", obj, obj->def->name);
if (virCondWaitUntil(&priv->job.asyncCond, &obj->parent.lock, then) < 0)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
goto error;
}
while (priv->job.active) {
VIR_DEBUG("Waiting for job (vm=%p name=%s)", obj, obj->def->name);
if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
goto error;
}
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/* No job is active but a new async job could have been started while obj
* was unlocked, so we need to recheck it. */
if (!nested && !qemuDomainNestedJobAllowed(priv, job))
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
goto retry;
qemuDomainObjResetJob(priv);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
ignore_value(virTimeMillisNow(&now));
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (job != QEMU_JOB_ASYNC) {
VIR_DEBUG("Started job: %s (async=%s vm=%p name=%s)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
priv->job.active = job;
priv->job.owner = virThreadSelfID();
priv->job.ownerAPI = virThreadJobGet();
priv->job.started = now;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
} else {
VIR_DEBUG("Started async job: %s (vm=%p name=%s)",
qemuDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
qemuDomainObjResetAsyncJob(priv);
if (VIR_ALLOC(priv->job.current) < 0)
goto cleanup;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
priv->job.asyncJob = asyncJob;
priv->job.asyncOwner = virThreadSelfID();
priv->job.asyncOwnerAPI = virThreadJobGet();
priv->job.asyncStarted = now;
priv->job.current->started = now;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
if (qemuDomainTrackJob(job))
qemuDomainObjSaveJob(driver, obj);
virObjectUnref(cfg);
return 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
error:
ignore_value(virTimeMillisNow(&now));
if (priv->job.active && priv->job.started)
duration = now - priv->job.started;
if (priv->job.asyncJob && priv->job.asyncStarted)
asyncDuration = now - priv->job.asyncStarted;
VIR_WARN("Cannot start job (%s, %s) for domain %s; "
"current job is (%s, %s) owned by (%llu %s, %llu %s) "
"for (%llus, %llus)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(asyncJob),
obj->def->name,
qemuDomainJobTypeToString(priv->job.active),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.owner, NULLSTR(priv->job.ownerAPI),
priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
duration / 1000, asyncDuration / 1000);
if (nested || qemuDomainNestedJobAllowed(priv, job))
blocker = priv->job.ownerAPI;
else
blocker = priv->job.asyncOwnerAPI;
ret = -1;
if (errno == ETIMEDOUT) {
if (blocker) {
virReportError(VIR_ERR_OPERATION_TIMEOUT,
_("cannot acquire state change lock (held by %s)"),
blocker);
} else {
virReportError(VIR_ERR_OPERATION_TIMEOUT, "%s",
_("cannot acquire state change lock"));
}
ret = -2;
} else if (cfg->maxQueuedJobs &&
priv->jobs_queued > cfg->maxQueuedJobs) {
if (blocker) {
virReportError(VIR_ERR_OPERATION_FAILED,
_("cannot acquire state change lock (held by %s) "
"due to max_queued limit"),
blocker);
} else {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("cannot acquire state change lock "
"due to max_queued limit"));
}
ret = -2;
} else {
virReportSystemError(errno, "%s", _("cannot acquire job mutex"));
}
cleanup:
priv->jobs_queued--;
virObjectUnref(cfg);
return ret;
}
/*
* obj must be locked before calling
*
* This must be called by anything that will change the VM state
* in any way, or anything that will use the QEMU monitor.
*
* Successful calls must be followed by EndJob eventually
*/
int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainJob job)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
if (qemuDomainObjBeginJobInternal(driver, obj, job,
QEMU_ASYNC_JOB_NONE) < 0)
return -1;
else
return 0;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
{
if (qemuDomainObjBeginJobInternal(driver, obj, QEMU_JOB_ASYNC,
asyncJob) < 0)
return -1;
else
return 0;
}
int
qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
if (asyncJob != priv->job.asyncJob) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("unexpected async job %d"), asyncJob);
return -1;
}
if (priv->job.asyncOwner != virThreadSelfID()) {
build: avoid non-portable cast of pthread_t POSIX says pthread_t is opaque. We can't guarantee if it is scaler or a pointer, nor what size it is; and BSD differs from Linux. We've also had reports of gcc complaining on attempts to cast it, if we use a cast to the wrong type (for example, pointers have to be cast to void* or intptr_t before being narrowed; while casting a function return of scalar pthread_t to void* triggers a different warning). Give up on casts, and use unions to get at decent bits instead. And rather than futz around with figuring which 32 bits of a potentially 64-bit pointer are most likely to be unique, convert the rest of the code base to use 64-bit values when using a debug id. Based on a report by Guido Günther against kFreeBSD, but with a fix that doesn't regress commit 4d970fd29 for FreeBSD. * src/util/virthreadpthread.c (virThreadSelfID, virThreadID): Use union to get at a decent bit representation of thread_t bits. * src/util/virthread.h (virThreadSelfID, virThreadID): Alter signature. * src/util/virthreadwin32.c (virThreadSelfID, virThreadID): Likewise. * src/qemu/qemu_domain.h (qemuDomainJobObj): Alter type of owner. * src/qemu/qemu_domain.c (qemuDomainObjTransferJob) (qemuDomainObjSetJobPhase, qemuDomainObjReleaseAsyncJob) (qemuDomainObjBeginNestedJob, qemuDomainObjBeginJobInternal): Fix clients. * src/util/virlog.c (virLogFormatString): Likewise. * src/util/vireventpoll.c (virEventPollInterruptLocked): Likewise. Signed-off-by: Eric Blake <eblake@redhat.com>
2013-05-02 20:23:02 +00:00
VIR_WARN("This thread doesn't seem to be the async job owner: %llu",
priv->job.asyncOwner);
}
return qemuDomainObjBeginJobInternal(driver, obj,
QEMU_JOB_ASYNC_NESTED,
QEMU_ASYNC_JOB_NONE);
}
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
/*
* obj must be locked and have a reference before calling
*
* To be called after completing the work associated with the
* earlier qemuDomainBeginJob() call
*/
void
qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemuDomainJob job = priv->job.active;
priv->jobs_queued--;
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemuDomainObjResetJob(priv);
if (qemuDomainTrackJob(job))
qemuDomainObjSaveJob(driver, obj);
virCondSignal(&priv->job.cond);
}
void
qemuDomainObjEndAsyncJob(virQEMUDriverPtr driver, virDomainObjPtr obj)
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
{
qemuDomainObjPrivatePtr priv = obj->privateData;
priv->jobs_queued--;
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
qemuDomainObjResetAsyncJob(priv);
qemuDomainObjSaveJob(driver, obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
virCondBroadcast(&priv->job.asyncCond);
}
void
qemuDomainObjAbortAsyncJob(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
obj, obj->def->name);
priv->job.abortJob = true;
virDomainObjBroadcast(obj);
}
/*
* obj must be locked before calling
*
* To be called immediately before any QEMU monitor API call
* Must have already either called qemuDomainObjBeginJob() and checked
* that the VM is still active; may not be used for nested async jobs.
*
* To be followed with qemuDomainObjExitMonitor() once complete
*/
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
static int
qemuDomainObjEnterMonitorInternal(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
if (asyncJob != QEMU_ASYNC_JOB_NONE) {
int ret;
if ((ret = qemuDomainObjBeginNestedJob(driver, obj, asyncJob)) < 0)
return ret;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (!virDomainObjIsActive(obj)) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running"));
qemuDomainObjEndJob(driver, obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
return -1;
}
} else if (priv->job.asyncOwner == virThreadSelfID()) {
VIR_WARN("This thread seems to be the async job owner; entering"
" monitor without asking for a nested job is dangerous");
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
}
VIR_DEBUG("Entering monitor (mon=%p vm=%p name=%s)",
priv->mon, obj, obj->def->name);
virObjectLock(priv->mon);
virObjectRef(priv->mon);
ignore_value(virTimeMillisNow(&priv->monStart));
virObjectUnlock(obj);
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
return 0;
}
static void ATTRIBUTE_NONNULL(1)
qemuDomainObjExitMonitorInternal(virQEMUDriverPtr driver,
virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
bool hasRefs;
hasRefs = virObjectUnref(priv->mon);
if (hasRefs)
virObjectUnlock(priv->mon);
virObjectLock(obj);
VIR_DEBUG("Exited monitor (mon=%p vm=%p name=%s)",
priv->mon, obj, obj->def->name);
priv->monStart = 0;
if (!hasRefs)
priv->mon = NULL;
qemu: Allow all query commands to be run during long jobs Query commands are safe to be called during long running jobs (such as migration). This patch makes them all work without the need to special-case every single one of them. The patch introduces new job.asyncCond condition and associated job.asyncJob which are dedicated to asynchronous (from qemu monitor point of view) jobs that can take arbitrarily long time to finish while qemu monitor is still usable for other commands. The existing job.active (and job.cond condition) is used all other synchronous jobs (including the commands run during async job). Locking schema is changed to use these two conditions. While asyncJob is active, only allowed set of synchronous jobs is allowed (the set can be different according to a particular asyncJob) so any method that communicates to qemu monitor needs to check if it is allowed to be executed during current asyncJob (if any). Once the check passes, the method needs to normally acquire job.cond to ensure no other command is running. Since domain object lock is released during that time, asyncJob could have been started in the meantime so the method needs to recheck the first condition. Then, normal jobs set job.active and asynchronous jobs set job.asyncJob and optionally change the list of allowed job groups. Since asynchronous jobs only set job.asyncJob, other allowed commands can still be run when domain object is unlocked (when communicating to remote libvirtd or sleeping). To protect its own internal synchronous commands, the asynchronous job needs to start a special nested job before entering qemu monitor. The nested job doesn't check asyncJob, it only acquires job.cond and sets job.active to block other jobs.
2011-06-30 09:23:50 +00:00
if (priv->job.active == QEMU_JOB_ASYNC_NESTED)
qemuDomainObjEndJob(driver, obj);
}
void qemuDomainObjEnterMonitor(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj)
{
ignore_value(qemuDomainObjEnterMonitorInternal(driver, obj,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
QEMU_ASYNC_JOB_NONE));
}
/* obj must NOT be locked before calling
*
* Should be paired with an earlier qemuDomainObjEnterMonitor() call
*
* Returns -1 if the domain is no longer alive after exiting the monitor.
* In that case, the caller should be careful when using obj's data,
* e.g. the live definition in vm->def has been freed by qemuProcessStop
* and replaced by the persistent definition, so pointers stolen
* from the live definition could no longer be valid.
*/
int qemuDomainObjExitMonitor(virQEMUDriverPtr driver,
virDomainObjPtr obj)
{
qemuDomainObjExitMonitorInternal(driver, obj);
if (!virDomainObjIsActive(obj)) {
if (!virGetLastError())
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
_("domain is no longer running"));
return -1;
}
return 0;
}
/*
* obj must be locked before calling
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
*
* To be called immediately before any QEMU monitor API call.
* Must have already either called qemuDomainObjBeginJob()
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
* and checked that the VM is still active, with asyncJob of
* QEMU_ASYNC_JOB_NONE; or already called qemuDomainObjBeginAsyncJob,
* with the same asyncJob.
*
* Returns 0 if job was started, in which case this must be followed with
* qemuDomainObjExitMonitor(); -2 if waiting for the nested job times out;
* or -1 if the job could not be started (probably because the vm exited
* in the meantime).
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
*/
int
qemuDomainObjEnterMonitorAsync(virQEMUDriverPtr driver,
qemu: fix crash when mixing sync and async monitor jobs Currently, we attempt to run sync job and async job at the same time. It means that the monitor commands for two jobs can be run in any order. In the function qemuDomainObjEnterMonitorInternal(): if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) { if (qemuDomainObjBeginNestedJob(driver, obj) < 0) We check whether the caller is an async job by priv->job.active and priv->job.asynJob. But when an async job is running, and a sync job is also running at the time of the check, then priv->job.active is not QEMU_JOB_NONE. So we cannot check whether the caller is an async job in the function qemuDomainObjEnterMonitorInternal(), and must instead put the burden on the caller to tell us when an async command wants to do a nested job. Once the burden is on the caller, then only async monitor enters need to worry about whether the VM is still running; for sync monitor enter, the internal return is always 0, so lots of ignore_value can be dropped. * src/qemu/THREADS.txt: Reflect new rules. * src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New prototype. * src/qemu/qemu_process.h (qemuProcessStartCPUs) (qemuProcessStopCPUs): Add parameter. * src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise. (qemuMigrationWaitForCompletion): Make static. * src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add parameter. (qemuDomainObjEnterMonitorAsync): New function. (qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver): Update callers. * src/qemu/qemu_driver.c (qemuDomainSaveInternal) (qemudDomainCoreDump, doCoreDump, processWatchdogEvent) (qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM) (qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot): Likewise. * src/qemu/qemu_process.c (qemuProcessStopCPUs) (qemuProcessFakeReboot, qemuProcessRecoverMigration) (qemuProcessRecoverJob, qemuProcessStart): Likewise. * src/qemu/qemu_migration.c (qemuMigrationToFile) (qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus) (qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate) (doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob) (qemuMigrationPerformPhase, qemuMigrationFinish) (qemuMigrationConfirm): Likewise. * src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob)
{
return qemuDomainObjEnterMonitorInternal(driver, obj, asyncJob);
}
/**
* qemuDomainGetAgent:
* @vm: domain object
*
* Returns the agent pointer of @vm;
*/
qemuAgentPtr
qemuDomainGetAgent(virDomainObjPtr vm)
{
return (((qemuDomainObjPrivatePtr)(vm->privateData))->agent);
}
/*
* obj must be locked before calling
*
* To be called immediately before any QEMU agent API call.
* Must have already called qemuDomainObjBeginJob() and checked
* that the VM is still active.
*
* To be followed with qemuDomainObjExitAgent() once complete
*/
void
qemuDomainObjEnterAgent(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
VIR_DEBUG("Entering agent (agent=%p vm=%p name=%s)",
priv->agent, obj, obj->def->name);
virObjectLock(priv->agent);
virObjectRef(priv->agent);
ignore_value(virTimeMillisNow(&priv->agentStart));
virObjectUnlock(obj);
}
/* obj must NOT be locked before calling
*
* Should be paired with an earlier qemuDomainObjEnterAgent() call
*/
void
qemuDomainObjExitAgent(virDomainObjPtr obj)
{
qemuDomainObjPrivatePtr priv = obj->privateData;
bool hasRefs;
hasRefs = virObjectUnref(priv->agent);
if (hasRefs)
virObjectUnlock(priv->agent);
virObjectLock(obj);
VIR_DEBUG("Exited agent (agent=%p vm=%p name=%s)",
priv->agent, obj, obj->def->name);
priv->agentStart = 0;
if (!hasRefs)
priv->agent = NULL;
}
void qemuDomainObjEnterRemote(virDomainObjPtr obj)
{
VIR_DEBUG("Entering remote (vm=%p name=%s)",
obj, obj->def->name);
virObjectUnlock(obj);
}
void qemuDomainObjExitRemote(virDomainObjPtr obj)
{
virObjectLock(obj);
VIR_DEBUG("Exited remote (vm=%p name=%s)",
obj, obj->def->name);
}
virDomainDefPtr
qemuDomainDefCopy(virQEMUDriverPtr driver,
virDomainDefPtr src,
unsigned int flags)
{
virBuffer buf = VIR_BUFFER_INITIALIZER;
virDomainDefPtr ret = NULL;
virCapsPtr caps = NULL;
char *xml = NULL;
if (qemuDomainDefFormatBuf(driver, src, flags, &buf) < 0)
goto cleanup;
xml = virBufferContentAndReset(&buf);
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
if (!(ret = virDomainDefParseString(xml, caps, driver->xmlopt,
VIR_DOMAIN_DEF_PARSE_INACTIVE |
VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
goto cleanup;
cleanup:
VIR_FREE(xml);
virObjectUnref(caps);
return ret;
}
int
qemuDomainDefFormatBuf(virQEMUDriverPtr driver,
virDomainDefPtr def,
unsigned int flags,
virBuffer *buf)
{
int ret = -1;
virCPUDefPtr cpu = NULL;
virCPUDefPtr def_cpu = def->cpu;
virDomainControllerDefPtr *controllers = NULL;
int ncontrollers = 0;
virCapsPtr caps = NULL;
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
/* Update guest CPU requirements according to host CPU */
if ((flags & VIR_DOMAIN_XML_UPDATE_CPU) &&
def_cpu &&
(def_cpu->mode != VIR_CPU_MODE_CUSTOM || def_cpu->model)) {
if (!caps->host.cpu ||
!caps->host.cpu->model) {
virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("cannot get host CPU capabilities"));
goto cleanup;
}
if (!(cpu = virCPUDefCopy(def_cpu)) ||
cpuUpdate(cpu, caps->host.cpu) < 0)
goto cleanup;
def->cpu = cpu;
}
if ((flags & VIR_DOMAIN_XML_MIGRATABLE)) {
size_t i;
int toremove = 0;
virDomainControllerDefPtr usb = NULL, pci = NULL;
/* If only the default USB controller is present, we can remove it
* and make the XML compatible with older versions of libvirt which
* didn't support USB controllers in the XML but always added the
* default one to qemu anyway.
*/
for (i = 0; i < def->ncontrollers; i++) {
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_USB) {
if (usb) {
usb = NULL;
break;
}
usb = def->controllers[i];
}
}
/* The original purpose of the check was the migration compatibility
* with libvirt <= 0.9.4. Limitation doesn't apply to other archs
* and can cause problems on PPC64.
*/
if (ARCH_IS_X86(def->os.arch) && qemuDomainMachineIsI440FX(def) &&
usb && usb->idx == 0 && usb->model == -1) {
VIR_DEBUG("Removing default USB controller from domain '%s'"
" for migration compatibility", def->name);
toremove++;
} else {
usb = NULL;
}
/* Remove the default PCI controller if there is only one present
* and its model is pci-root */
for (i = 0; i < def->ncontrollers; i++) {
if (def->controllers[i]->type == VIR_DOMAIN_CONTROLLER_TYPE_PCI) {
if (pci) {
pci = NULL;
break;
}
pci = def->controllers[i];
}
}
if (pci && pci->idx == 0 &&
pci->model == VIR_DOMAIN_CONTROLLER_MODEL_PCI_ROOT) {
VIR_DEBUG("Removing default pci-root from domain '%s'"
" for migration compatibility", def->name);
toremove++;
} else {
pci = NULL;
}
if (toremove) {
controllers = def->controllers;
ncontrollers = def->ncontrollers;
if (VIR_ALLOC_N(def->controllers, ncontrollers - toremove) < 0) {
controllers = NULL;
goto cleanup;
}
def->ncontrollers = 0;
for (i = 0; i < ncontrollers; i++) {
if (controllers[i] != usb && controllers[i] != pci)
def->controllers[def->ncontrollers++] = controllers[i];
}
}
}
ret = virDomainDefFormatInternal(def, driver->caps,
virDomainDefFormatConvertXMLFlags(flags),
buf);
cleanup:
def->cpu = def_cpu;
virCPUDefFree(cpu);
if (controllers) {
VIR_FREE(def->controllers);
def->controllers = controllers;
def->ncontrollers = ncontrollers;
}
virObjectUnref(caps);
return ret;
}
char *qemuDomainDefFormatXML(virQEMUDriverPtr driver,
virDomainDefPtr def,
unsigned int flags)
{
virBuffer buf = VIR_BUFFER_INITIALIZER;
if (qemuDomainDefFormatBuf(driver, def, flags, &buf) < 0) {
virBufferFreeAndReset(&buf);
return NULL;
}
if (virBufferError(&buf)) {
virReportOOMError();
virBufferFreeAndReset(&buf);
return NULL;
}
return virBufferContentAndReset(&buf);
}
char *qemuDomainFormatXML(virQEMUDriverPtr driver,
virDomainObjPtr vm,
unsigned int flags)
{
virDomainDefPtr def;
if ((flags & VIR_DOMAIN_XML_INACTIVE) && vm->newDef)
def = vm->newDef;
else
def = vm->def;
return qemuDomainDefFormatXML(driver, def, flags);
}
char *
qemuDomainDefFormatLive(virQEMUDriverPtr driver,
virDomainDefPtr def,
bool inactive,
bool compatible)
{
unsigned int flags = QEMU_DOMAIN_FORMAT_LIVE_FLAGS;
if (inactive)
flags |= VIR_DOMAIN_XML_INACTIVE;
if (compatible)
flags |= VIR_DOMAIN_XML_MIGRATABLE;
return qemuDomainDefFormatXML(driver, def, flags);
}
void qemuDomainObjTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainTaintFlags taint,
qemuDomainLogContextPtr logCtxt)
{
virErrorPtr orig_err = NULL;
bool closeLog = false;
if (virDomainObjTaint(obj, taint)) {
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(obj->def->uuid, uuidstr);
VIR_WARN("Domain id=%d name='%s' uuid=%s is tainted: %s",
obj->def->id,
obj->def->name,
uuidstr,
virDomainTaintTypeToString(taint));
/* We don't care about errors logging taint info, so
* preserve original error, and clear any error that
* is raised */
orig_err = virSaveLastError();
if (logCtxt == NULL) {
logCtxt = qemuDomainLogContextNew(driver, obj,
QEMU_DOMAIN_LOG_CONTEXT_MODE_ATTACH);
if (!logCtxt) {
if (orig_err) {
virSetError(orig_err);
virFreeError(orig_err);
}
VIR_WARN("Unable to open domainlog");
return;
}
closeLog = true;
}
if (qemuDomainLogContextWrite(logCtxt,
"Domain id=%d is tainted: %s\n",
obj->def->id,
virDomainTaintTypeToString(taint)) < 0)
virResetLastError();
if (closeLog)
qemuDomainLogContextFree(logCtxt);
if (orig_err) {
virSetError(orig_err);
virFreeError(orig_err);
}
}
}
void qemuDomainObjCheckTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainLogContextPtr logCtxt)
{
size_t i;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
qemuDomainObjPrivatePtr priv = obj->privateData;
if (virQEMUDriverIsPrivileged(driver) &&
(!cfg->clearEmulatorCapabilities ||
cfg->user == 0 ||
cfg->group == 0))
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES, logCtxt);
if (priv->hookRun)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOOK, logCtxt);
if (obj->def->namespaceData) {
qemuDomainCmdlineDefPtr qemucmd = obj->def->namespaceData;
if (qemucmd->num_args || qemucmd->num_env)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CUSTOM_ARGV, logCtxt);
}
if (obj->def->cpu && obj->def->cpu->mode == VIR_CPU_MODE_HOST_PASSTHROUGH)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HOST_CPU, logCtxt);
for (i = 0; i < obj->def->ndisks; i++)
qemuDomainObjCheckDiskTaint(driver, obj, obj->def->disks[i], logCtxt);
for (i = 0; i < obj->def->nhostdevs; i++)
qemuDomainObjCheckHostdevTaint(driver, obj, obj->def->hostdevs[i],
logCtxt);
for (i = 0; i < obj->def->nnets; i++)
qemuDomainObjCheckNetTaint(driver, obj, obj->def->nets[i], logCtxt);
if (obj->def->os.dtb)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CUSTOM_DTB, logCtxt);
virObjectUnref(cfg);
}
void qemuDomainObjCheckDiskTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainDiskDefPtr disk,
qemuDomainLogContextPtr logCtxt)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int format = virDomainDiskGetFormat(disk);
if ((!format || format == VIR_STORAGE_FILE_AUTO) &&
cfg->allowDiskFormatProbing)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_DISK_PROBING, logCtxt);
if (disk->rawio == VIR_TRISTATE_BOOL_YES)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES,
logCtxt);
if (disk->device == VIR_DOMAIN_DISK_DEVICE_CDROM &&
virStorageSourceGetActualType(disk->src) == VIR_STORAGE_TYPE_BLOCK &&
disk->src->path)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_CDROM_PASSTHROUGH,
logCtxt);
virObjectUnref(cfg);
}
void qemuDomainObjCheckHostdevTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainHostdevDefPtr hostdev,
qemuDomainLogContextPtr logCtxt)
{
virDomainHostdevSubsysSCSIPtr scsisrc = &hostdev->source.subsys.u.scsi;
if (hostdev->source.subsys.type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI &&
scsisrc->rawio == VIR_TRISTATE_BOOL_YES)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_HIGH_PRIVILEGES,
logCtxt);
}
void qemuDomainObjCheckNetTaint(virQEMUDriverPtr driver,
virDomainObjPtr obj,
virDomainNetDefPtr net,
qemuDomainLogContextPtr logCtxt)
{
config: report error when script given for inappropriate interface type This fixes https://bugzilla.redhat.com/show_bug.cgi?id=638633 Although scripts are not used by interfaces of type other than "ethernet" in qemu, due to the fact that the parser stores the script name in a union that is only valid when type is ethernet or bridge, there is no way for anyone except the parser itself to catch the problem of specifying an interface script for an inappropriate interface type (by the time the parsed data gets back to the code that called the parser, all evidence that a script was specified is forgotten). Since the parser itself should be agnostic to which type of interface allows scripts (an example of why: a script specified for an interface of type bridge is valid for xen domains, but not for qemu domains), the solution here is to move the script out of the union(s) in the DomainNetDef, always populate it when specified (regardless of interface type), and let the driver decide whether or not it is appropriate. Currently the qemu, xen, libxml, and uml drivers recognize the script parameter and do something with it (the uml driver only to report that it isn't supported). Those drivers have been updated to log a CONFIG_UNSUPPORTED error when a script is specified for an interface type that's inappropriate for that particular hypervisor. (NB: There was earlier discussion of solving this problem by adding a VALIDATE flag to all libvirt APIs that accept XML, which would cause the XML to be validated against the RNG files. One statement during that discussion was that the RNG shouldn't contain hypervisor-specific things, though, and a proper solution to this problem would require that (again, because a script for an interface of type "bridge" is accepted by xen, but not by qemu).
2012-01-06 17:59:47 +00:00
/* script is only useful for NET_TYPE_ETHERNET (qemu) and
* NET_TYPE_BRIDGE (xen), but could be (incorrectly) specified for
* any interface type. In any case, it's adding user sauce into
* the soup, so it should taint the domain.
*/
if (net->script != NULL)
qemuDomainObjTaint(driver, obj, VIR_DOMAIN_TAINT_SHELL_SCRIPTS, logCtxt);
}
qemuDomainLogContextPtr qemuDomainLogContextNew(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainLogContextMode mode)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
qemuDomainLogContextPtr ctxt = NULL;
if (VIR_ALLOC(ctxt) < 0)
goto error;
VIR_DEBUG("Context new %p stdioLogD=%d", ctxt, cfg->stdioLogD);
ctxt->writefd = -1;
ctxt->readfd = -1;
virAtomicIntSet(&ctxt->refs, 1);
if (virAsprintf(&ctxt->path, "%s/%s.log", cfg->logDir, vm->def->name) < 0)
goto error;
if (cfg->stdioLogD) {
ctxt->manager = virLogManagerNew(virQEMUDriverIsPrivileged(driver));
if (!ctxt->manager)
goto error;
ctxt->writefd = virLogManagerDomainOpenLogFile(ctxt->manager,
"qemu",
vm->def->uuid,
vm->def->name,
ctxt->path,
0,
&ctxt->inode,
&ctxt->pos);
if (ctxt->writefd < 0)
goto error;
} else {
if ((ctxt->writefd = open(ctxt->path, O_WRONLY | O_CREAT | O_APPEND, S_IRUSR | S_IWUSR)) < 0) {
virReportSystemError(errno, _("failed to create logfile %s"),
ctxt->path);
goto error;
}
if (virSetCloseExec(ctxt->writefd) < 0) {
virReportSystemError(errno, _("failed to set close-on-exec flag on %s"),
ctxt->path);
goto error;
}
/* For unprivileged startup we must truncate the file since
* we can't rely on logrotate. We don't use O_TRUNC since
* it is better for SELinux policy if we truncate afterwards */
if (mode == QEMU_DOMAIN_LOG_CONTEXT_MODE_START &&
!virQEMUDriverIsPrivileged(driver) &&
ftruncate(ctxt->writefd, 0) < 0) {
virReportSystemError(errno, _("failed to truncate %s"),
ctxt->path);
goto error;
}
if (mode == QEMU_DOMAIN_LOG_CONTEXT_MODE_START) {
if ((ctxt->readfd = open(ctxt->path, O_RDONLY, S_IRUSR | S_IWUSR)) < 0) {
virReportSystemError(errno, _("failed to open logfile %s"),
ctxt->path);
goto error;
}
if (virSetCloseExec(ctxt->readfd) < 0) {
virReportSystemError(errno, _("failed to set close-on-exec flag on %s"),
ctxt->path);
goto error;
}
}
if ((ctxt->pos = lseek(ctxt->writefd, 0, SEEK_END)) < 0) {
virReportSystemError(errno, _("failed to seek in log file %s"),
ctxt->path);
goto error;
}
}
cleanup:
virObjectUnref(cfg);
return ctxt;
error:
qemuDomainLogContextFree(ctxt);
ctxt = NULL;
goto cleanup;
}
int qemuDomainLogContextWrite(qemuDomainLogContextPtr ctxt,
const char *fmt, ...)
{
va_list argptr;
char *message = NULL;
int ret = -1;
va_start(argptr, fmt);
if (virVasprintf(&message, fmt, argptr) < 0)
goto cleanup;
if (!ctxt->manager &&
lseek(ctxt->writefd, 0, SEEK_END) < 0) {
virReportSystemError(errno, "%s",
_("Unable to seek to end of domain logfile"));
goto cleanup;
}
if (safewrite(ctxt->writefd, message, strlen(message)) < 0) {
virReportSystemError(errno, "%s",
_("Unable to write to domain logfile"));
goto cleanup;
}
ret = 0;
cleanup:
va_end(argptr);
VIR_FREE(message);
return ret;
}
ssize_t qemuDomainLogContextRead(qemuDomainLogContextPtr ctxt,
char **msg)
{
VIR_DEBUG("Context read %p manager=%p inode=%llu pos=%llu",
ctxt, ctxt->manager,
(unsigned long long)ctxt->inode,
(unsigned long long)ctxt->pos);
char *buf;
size_t buflen;
if (ctxt->manager) {
buf = virLogManagerDomainReadLogFile(ctxt->manager,
ctxt->path,
ctxt->inode,
ctxt->pos,
1024 * 128,
0);
if (!buf)
return -1;
buflen = strlen(buf);
} else {
ssize_t got;
buflen = 1024 * 128;
/* Best effort jump to start of messages */
ignore_value(lseek(ctxt->readfd, ctxt->pos, SEEK_SET));
if (VIR_ALLOC_N(buf, buflen) < 0)
return -1;
got = saferead(ctxt->readfd, buf, buflen - 1);
if (got < 0) {
VIR_FREE(buf);
virReportSystemError(errno, "%s",
_("Unable to read from log file"));
return -1;
}
buf[got] = '\0';
ignore_value(VIR_REALLOC_N_QUIET(buf, got + 1));
buflen = got;
}
*msg = buf;
return buflen;
}
/**
* qemuDomainLogAppendMessage:
*
* This is a best-effort attempt to add a log message to the qemu log file
* either by using virtlogd or the legacy approach */
int
qemuDomainLogAppendMessage(virQEMUDriverPtr driver,
virDomainObjPtr vm,
const char *fmt,
...)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virLogManagerPtr manager = NULL;
va_list ap;
char *path = NULL;
int writefd = -1;
char *message = NULL;
int ret = -1;
va_start(ap, fmt);
if (virVasprintf(&message, fmt, ap) < 0)
goto cleanup;
VIR_DEBUG("Append log message (vm='%s' message='%s) stdioLogD=%d",
vm->def->name, message, cfg->stdioLogD);
if (virAsprintf(&path, "%s/%s.log", cfg->logDir, vm->def->name) < 0)
goto cleanup;
if (cfg->stdioLogD) {
if (!(manager = virLogManagerNew(virQEMUDriverIsPrivileged(driver))))
goto cleanup;
if (virLogManagerDomainAppendMessage(manager, "qemu", vm->def->uuid,
vm->def->name, path, message, 0) < 0)
goto cleanup;
} else {
if ((writefd = open(path, O_WRONLY | O_CREAT | O_APPEND, S_IRUSR | S_IWUSR)) < 0) {
virReportSystemError(errno, _("failed to create logfile %s"),
path);
goto cleanup;
}
if (safewrite(writefd, message, strlen(message)) < 0)
goto cleanup;
}
ret = 0;
cleanup:
va_end(ap);
VIR_FREE(message);
VIR_FORCE_CLOSE(writefd);
virLogManagerFree(manager);
virObjectUnref(cfg);
VIR_FREE(path);
return ret;
}
int qemuDomainLogContextGetWriteFD(qemuDomainLogContextPtr ctxt)
{
return ctxt->writefd;
}
void qemuDomainLogContextMarkPosition(qemuDomainLogContextPtr ctxt)
{
if (ctxt->manager)
virLogManagerDomainGetLogFilePosition(ctxt->manager,
ctxt->path,
0,
&ctxt->inode,
&ctxt->pos);
else
ctxt->pos = lseek(ctxt->writefd, 0, SEEK_END);
}
void qemuDomainLogContextRef(qemuDomainLogContextPtr ctxt)
{
VIR_DEBUG("Context ref %p", ctxt);
virAtomicIntInc(&ctxt->refs);
}
virLogManagerPtr qemuDomainLogContextGetManager(qemuDomainLogContextPtr ctxt)
{
return ctxt->manager;
}
void qemuDomainLogContextFree(qemuDomainLogContextPtr ctxt)
{
bool lastRef;
if (!ctxt)
return;
lastRef = virAtomicIntDecAndTest(&ctxt->refs);
VIR_DEBUG("Context free %p lastref=%d", ctxt, lastRef);
if (!lastRef)
return;
virLogManagerFree(ctxt->manager);
VIR_FREE(ctxt->path);
VIR_FORCE_CLOSE(ctxt->writefd);
VIR_FORCE_CLOSE(ctxt->readfd);
VIR_FREE(ctxt);
}
/* Locate an appropriate 'qemu-img' binary. */
const char *
qemuFindQemuImgBinary(virQEMUDriverPtr driver)
{
if (!driver->qemuImgBinary)
virReportError(VIR_ERR_INTERNAL_ERROR,
"%s", _("unable to find qemu-img"));
return driver->qemuImgBinary;
}
int
qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
virDomainSnapshotObjPtr snapshot,
virCapsPtr caps,
char *snapshotDir)
{
char *newxml = NULL;
int ret = -1;
char *snapDir = NULL;
char *snapFile = NULL;
char uuidstr[VIR_UUID_STRING_BUFLEN];
virUUIDFormat(vm->def->uuid, uuidstr);
newxml = virDomainSnapshotDefFormat(
uuidstr, snapshot->def, caps,
virDomainDefFormatConvertXMLFlags(QEMU_DOMAIN_FORMAT_LIVE_FLAGS),
1);
if (newxml == NULL)
return -1;
if (virAsprintf(&snapDir, "%s/%s", snapshotDir, vm->def->name) < 0)
goto cleanup;
if (virFileMakePath(snapDir) < 0) {
virReportSystemError(errno, _("cannot create snapshot directory '%s'"),
snapDir);
goto cleanup;
}
if (virAsprintf(&snapFile, "%s/%s.xml", snapDir, snapshot->def->name) < 0)
goto cleanup;
ret = virXMLSaveFile(snapFile, NULL, "snapshot-edit", newxml);
cleanup:
VIR_FREE(snapFile);
VIR_FREE(snapDir);
VIR_FREE(newxml);
return ret;
}
/* The domain is expected to be locked and inactive. Return -1 on normal
* failure, 1 if we skipped a disk due to try_all. */
static int
qemuDomainSnapshotForEachQcow2Raw(virQEMUDriverPtr driver,
virDomainDefPtr def,
const char *name,
const char *op,
bool try_all,
int ndisks)
{
const char *qemuimgarg[] = { NULL, "snapshot", NULL, NULL, NULL, NULL };
size_t i;
bool skipped = false;
qemuimgarg[0] = qemuFindQemuImgBinary(driver);
if (qemuimgarg[0] == NULL) {
/* qemuFindQemuImgBinary set the error */
return -1;
}
qemuimgarg[2] = op;
qemuimgarg[3] = name;
for (i = 0; i < ndisks; i++) {
/* FIXME: we also need to handle LVM here */
if (def->disks[i]->device == VIR_DOMAIN_DISK_DEVICE_DISK) {
int format = virDomainDiskGetFormat(def->disks[i]);
if (format > 0 && format != VIR_STORAGE_FILE_QCOW2) {
if (try_all) {
/* Continue on even in the face of error, since other
* disks in this VM may have the same snapshot name.
*/
VIR_WARN("skipping snapshot action on %s",
def->disks[i]->dst);
skipped = true;
continue;
} else if (STREQ(op, "-c") && i) {
/* We must roll back partial creation by deleting
* all earlier snapshots. */
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
"-d", false, i);
}
virReportError(VIR_ERR_OPERATION_INVALID,
_("Disk device '%s' does not support"
" snapshotting"),
def->disks[i]->dst);
return -1;
}
qemuimgarg[4] = virDomainDiskGetSource(def->disks[i]);
if (virRun(qemuimgarg, NULL) < 0) {
if (try_all) {
VIR_WARN("skipping snapshot action on %s",
def->disks[i]->dst);
skipped = true;
continue;
} else if (STREQ(op, "-c") && i) {
/* We must roll back partial creation by deleting
* all earlier snapshots. */
qemuDomainSnapshotForEachQcow2Raw(driver, def, name,
"-d", false, i);
}
return -1;
}
}
}
return skipped ? 1 : 0;
}
/* The domain is expected to be locked and inactive. Return -1 on normal
* failure, 1 if we skipped a disk due to try_all. */
int
qemuDomainSnapshotForEachQcow2(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
const char *op,
bool try_all)
{
/* Prefer action on the disks in use at the time the snapshot was
* created; but fall back to current definition if dealing with a
* snapshot created prior to libvirt 0.9.5. */
virDomainDefPtr def = snap->def->dom;
if (!def)
def = vm->def;
return qemuDomainSnapshotForEachQcow2Raw(driver, def, snap->def->name,
op, try_all, def->ndisks);
}
/* Discard one snapshot (or its metadata), without reparenting any children. */
int
qemuDomainSnapshotDiscard(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainSnapshotObjPtr snap,
bool update_current,
bool metadata_only)
{
char *snapFile = NULL;
int ret = -1;
qemuDomainObjPrivatePtr priv;
virDomainSnapshotObjPtr parentsnap = NULL;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
if (!metadata_only) {
if (!virDomainObjIsActive(vm)) {
/* Ignore any skipped disks */
if (qemuDomainSnapshotForEachQcow2(driver, vm, snap, "-d",
true) < 0)
goto cleanup;
} else {
priv = vm->privateData;
qemuDomainObjEnterMonitor(driver, vm);
/* we continue on even in the face of error */
qemuMonitorDeleteSnapshot(priv->mon, snap->def->name);
ignore_value(qemuDomainObjExitMonitor(driver, vm));
}
}
if (virAsprintf(&snapFile, "%s/%s/%s.xml", cfg->snapshotDir,
vm->def->name, snap->def->name) < 0)
goto cleanup;
if (snap == vm->current_snapshot) {
if (update_current && snap->def->parent) {
snapshot: make virDomainSnapshotObjList opaque We were failing to react to allocation failure when initializing a snapshot object list. Changing things to store a pointer instead of a complete object adds one more possible point of allocation failure, but at the same time, will make it easier to react to failure now, as well as making it easier for a future patch to split all virDomainSnapshotPtr handling into a separate file, as I continue to add even more snapshot code. Luckily, there was only one client outside of domain_conf.c that was actually peeking inside the object, and a new wrapper function was easy. * src/conf/domain_conf.h (_virDomainObj): Use a pointer. (virDomainSnapshotObjListInit): Rename. (virDomainSnapshotObjListFree, virDomainSnapshotForEach): New declarations. (_virDomainSnapshotObjList): Move definitions... * src/conf/domain_conf.c: ...here. (virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit): Rename... (virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to these. (virDomainSnapshotForEach): New function. (virDomainObjDispose, virDomainListPopulate): Adjust callers. * src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard) (qemuDomainSnapshotDiscardAllMetadata): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise. * src/qemu/qemu_driver.c (qemuDomainSnapshotLoad) (qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML) (qemuDomainSnapshotListNames, qemuDomainSnapshotNum) (qemuDomainListAllSnapshots) (qemuDomainSnapshotListChildrenNames) (qemuDomainSnapshotNumChildren) (qemuDomainSnapshotListAllChildren) (qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent) (qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent) (qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot) (qemuDomainSnapshotDelete): Likewise. * src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
parentsnap = virDomainSnapshotFindByName(vm->snapshots,
snap->def->parent);
if (!parentsnap) {
VIR_WARN("missing parent snapshot matching name '%s'",
snap->def->parent);
} else {
parentsnap->def->current = true;
if (qemuDomainSnapshotWriteMetadata(vm, parentsnap, driver->caps,
cfg->snapshotDir) < 0) {
VIR_WARN("failed to set parent snapshot '%s' as current",
snap->def->parent);
parentsnap->def->current = false;
parentsnap = NULL;
}
}
}
vm->current_snapshot = parentsnap;
}
if (unlink(snapFile) < 0)
VIR_WARN("Failed to unlink %s", snapFile);
snapshot: make virDomainSnapshotObjList opaque We were failing to react to allocation failure when initializing a snapshot object list. Changing things to store a pointer instead of a complete object adds one more possible point of allocation failure, but at the same time, will make it easier to react to failure now, as well as making it easier for a future patch to split all virDomainSnapshotPtr handling into a separate file, as I continue to add even more snapshot code. Luckily, there was only one client outside of domain_conf.c that was actually peeking inside the object, and a new wrapper function was easy. * src/conf/domain_conf.h (_virDomainObj): Use a pointer. (virDomainSnapshotObjListInit): Rename. (virDomainSnapshotObjListFree, virDomainSnapshotForEach): New declarations. (_virDomainSnapshotObjList): Move definitions... * src/conf/domain_conf.c: ...here. (virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit): Rename... (virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to these. (virDomainSnapshotForEach): New function. (virDomainObjDispose, virDomainListPopulate): Adjust callers. * src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard) (qemuDomainSnapshotDiscardAllMetadata): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise. * src/qemu/qemu_driver.c (qemuDomainSnapshotLoad) (qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML) (qemuDomainSnapshotListNames, qemuDomainSnapshotNum) (qemuDomainListAllSnapshots) (qemuDomainSnapshotListChildrenNames) (qemuDomainSnapshotNumChildren) (qemuDomainSnapshotListAllChildren) (qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent) (qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent) (qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot) (qemuDomainSnapshotDelete): Likewise. * src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
virDomainSnapshotObjListRemove(vm->snapshots, snap);
ret = 0;
cleanup:
VIR_FREE(snapFile);
virObjectUnref(cfg);
return ret;
}
/* Hash iterator callback to discard multiple snapshots. */
int qemuDomainSnapshotDiscardAll(void *payload,
const void *name ATTRIBUTE_UNUSED,
void *data)
{
virDomainSnapshotObjPtr snap = payload;
virQEMUSnapRemovePtr curr = data;
int err;
if (snap->def->current)
curr->current = true;
err = qemuDomainSnapshotDiscard(curr->driver, curr->vm, snap, false,
curr->metadata_only);
if (err && !curr->err)
curr->err = err;
return 0;
}
int
qemuDomainSnapshotDiscardAllMetadata(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
virQEMUSnapRemove rem;
rem.driver = driver;
rem.vm = vm;
rem.metadata_only = true;
rem.err = 0;
snapshot: make virDomainSnapshotObjList opaque We were failing to react to allocation failure when initializing a snapshot object list. Changing things to store a pointer instead of a complete object adds one more possible point of allocation failure, but at the same time, will make it easier to react to failure now, as well as making it easier for a future patch to split all virDomainSnapshotPtr handling into a separate file, as I continue to add even more snapshot code. Luckily, there was only one client outside of domain_conf.c that was actually peeking inside the object, and a new wrapper function was easy. * src/conf/domain_conf.h (_virDomainObj): Use a pointer. (virDomainSnapshotObjListInit): Rename. (virDomainSnapshotObjListFree, virDomainSnapshotForEach): New declarations. (_virDomainSnapshotObjList): Move definitions... * src/conf/domain_conf.c: ...here. (virDomainSnapshotObjListInit, virDomainSnapshotObjListDeinit): Rename... (virDomainSnapshotObjListNew, virDomainSnapshotObjListFree): ...to these. (virDomainSnapshotForEach): New function. (virDomainObjDispose, virDomainListPopulate): Adjust callers. * src/qemu/qemu_domain.c (qemuDomainSnapshotDiscard) (qemuDomainSnapshotDiscardAllMetadata): Likewise. * src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Likewise. * src/qemu/qemu_driver.c (qemuDomainSnapshotLoad) (qemuDomainUndefineFlags, qemuDomainSnapshotCreateXML) (qemuDomainSnapshotListNames, qemuDomainSnapshotNum) (qemuDomainListAllSnapshots) (qemuDomainSnapshotListChildrenNames) (qemuDomainSnapshotNumChildren) (qemuDomainSnapshotListAllChildren) (qemuDomainSnapshotLookupByName, qemuDomainSnapshotGetParent) (qemuDomainSnapshotGetXMLDesc, qemuDomainSnapshotIsCurrent) (qemuDomainSnapshotHasMetadata, qemuDomainRevertToSnapshot) (qemuDomainSnapshotDelete): Likewise. * src/libvirt_private.syms (domain_conf.h): Export new function.
2012-08-14 06:22:39 +00:00
virDomainSnapshotForEach(vm->snapshots, qemuDomainSnapshotDiscardAll,
&rem);
return rem.err;
}
/*
* The caller must hold a lock the vm.
*/
void
qemuDomainRemoveInactive(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
bool haveJob = true;
char *snapDir;
virQEMUDriverConfigPtr cfg;
if (vm->persistent) {
/* Short-circuit, we don't want to remove a persistent domain */
return;
}
cfg = virQEMUDriverGetConfig(driver);
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
haveJob = false;
/* Remove any snapshot metadata prior to removing the domain */
if (qemuDomainSnapshotDiscardAllMetadata(driver, vm) < 0) {
VIR_WARN("unable to remove all snapshots for domain %s",
vm->def->name);
}
else if (virAsprintf(&snapDir, "%s/%s", cfg->snapshotDir,
vm->def->name) < 0) {
VIR_WARN("unable to remove snapshot directory %s/%s",
cfg->snapshotDir, vm->def->name);
} else {
if (rmdir(snapDir) < 0 && errno != ENOENT)
VIR_WARN("unable to remove snapshot directory %s", snapDir);
VIR_FREE(snapDir);
}
qemu: fix crash in qemuProcessAutoDestroy The destination libvirt daemon in a migration may segfault if the client disconnects immediately after the migration has begun: # virsh -c qemu+tls://remote/system list --all Id Name State ---------------------------------------------------- ... # timeout --signal KILL 1 \ virsh migrate example qemu+tls://remote/system \ --verbose --compressed --live --auto-converge \ --abort-on-error --unsafe --persistent \ --undefinesource --copy-storage-all --xml example.xml Killed # virsh -c qemu+tls://remote/system list --all error: failed to connect to the hypervisor error: unable to connect to server at 'remote:16514': Connection refused The crash is in: 1531 void 1532 qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) 1533 { 1534 qemuDomainObjPrivatePtr priv = obj->privateData; 1535 qemuDomainJob job = priv->job.active; 1536 1537 priv->jobs_queued--; Backtrace: #0 at qemuDomainObjEndJob at qemu/qemu_domain.c:1537 #1 in qemuDomainRemoveInactive at qemu/qemu_domain.c:2497 #2 in qemuProcessAutoDestroy at qemu/qemu_process.c:5646 #3 in virCloseCallbacksRun at util/virclosecallbacks.c:350 #4 in qemuConnectClose at qemu/qemu_driver.c:1154 ... qemuDomainRemoveInactive calls virDomainObjListRemove, which in this case is holding the last remaining reference to the domain. qemuDomainRemoveInactive then calls qemuDomainObjEndJob, but the domain object has been freed and poisoned by then. This patch bumps the domain's refcount until qemuDomainRemoveInactive has completed. We also ensure qemuProcessAutoDestroy does not return the domain to virCloseCallbacksRun to be unlocked in this case. There is similar logic in bhyveProcessAutoDestroy and lxcProcessAutoDestroy (which call virDomainObjListRemove directly). Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-03-30 02:40:58 +00:00
virObjectRef(vm);
virDomainObjListRemove(driver->domains, vm);
/*
* virDomainObjListRemove() leaves the domain unlocked so it can
* be unref'd for other drivers that depend on that, but we still
* need to reset a job and we have a reference from the API that
* called this function. So we need to lock it back. This is
* just a workaround for the qemu driver.
*
* XXX: Ideally, the global handling of domain objects and object
* lists would be refactored so we don't need hacks like
* this, but since that requires refactor of all drivers,
* it's a work for another day.
*/
virObjectLock(vm);
virObjectUnref(cfg);
if (haveJob)
qemuDomainObjEndJob(driver, vm);
qemu: fix crash in qemuProcessAutoDestroy The destination libvirt daemon in a migration may segfault if the client disconnects immediately after the migration has begun: # virsh -c qemu+tls://remote/system list --all Id Name State ---------------------------------------------------- ... # timeout --signal KILL 1 \ virsh migrate example qemu+tls://remote/system \ --verbose --compressed --live --auto-converge \ --abort-on-error --unsafe --persistent \ --undefinesource --copy-storage-all --xml example.xml Killed # virsh -c qemu+tls://remote/system list --all error: failed to connect to the hypervisor error: unable to connect to server at 'remote:16514': Connection refused The crash is in: 1531 void 1532 qemuDomainObjEndJob(virQEMUDriverPtr driver, virDomainObjPtr obj) 1533 { 1534 qemuDomainObjPrivatePtr priv = obj->privateData; 1535 qemuDomainJob job = priv->job.active; 1536 1537 priv->jobs_queued--; Backtrace: #0 at qemuDomainObjEndJob at qemu/qemu_domain.c:1537 #1 in qemuDomainRemoveInactive at qemu/qemu_domain.c:2497 #2 in qemuProcessAutoDestroy at qemu/qemu_process.c:5646 #3 in virCloseCallbacksRun at util/virclosecallbacks.c:350 #4 in qemuConnectClose at qemu/qemu_driver.c:1154 ... qemuDomainRemoveInactive calls virDomainObjListRemove, which in this case is holding the last remaining reference to the domain. qemuDomainRemoveInactive then calls qemuDomainObjEndJob, but the domain object has been freed and poisoned by then. This patch bumps the domain's refcount until qemuDomainRemoveInactive has completed. We also ensure qemuProcessAutoDestroy does not return the domain to virCloseCallbacksRun to be unlocked in this case. There is similar logic in bhyveProcessAutoDestroy and lxcProcessAutoDestroy (which call virDomainObjListRemove directly). Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-03-30 02:40:58 +00:00
virObjectUnref(vm);
}
void
qemuDomainSetFakeReboot(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool value)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
if (priv->fakeReboot == value)
goto cleanup;
priv->fakeReboot = value;
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
VIR_WARN("Failed to save status on vm %s", vm->def->name);
cleanup:
virObjectUnref(cfg);
}
static void
qemuDomainCheckRemoveOptionalDisk(virQEMUDriverPtr driver,
virDomainObjPtr vm,
size_t diskIndex)
{
char uuid[VIR_UUID_STRING_BUFLEN];
virObjectEventPtr event = NULL;
virDomainDiskDefPtr disk = vm->def->disks[diskIndex];
const char *src = virDomainDiskGetSource(disk);
virUUIDFormat(vm->def->uuid, uuid);
VIR_DEBUG("Dropping disk '%s' on domain '%s' (UUID '%s') "
"due to inaccessible source '%s'",
disk->dst, vm->def->name, uuid, src);
if (disk->device == VIR_DOMAIN_DISK_DEVICE_CDROM ||
disk->device == VIR_DOMAIN_DISK_DEVICE_FLOPPY) {
event = virDomainEventDiskChangeNewFromObj(vm, src, NULL,
disk->info.alias,
VIR_DOMAIN_EVENT_DISK_CHANGE_MISSING_ON_START);
ignore_value(virDomainDiskSetSource(disk, NULL));
} else {
event = virDomainEventDiskChangeNewFromObj(vm, src, NULL,
disk->info.alias,
VIR_DOMAIN_EVENT_DISK_DROP_MISSING_ON_START);
virDomainDiskRemove(vm->def, diskIndex);
virDomainDiskDefFree(disk);
}
qemuDomainEventQueue(driver, event);
}
static int
qemuDomainCheckDiskStartupPolicy(virQEMUDriverPtr driver,
virDomainObjPtr vm,
size_t diskIndex,
bool cold_boot)
{
int startupPolicy = vm->def->disks[diskIndex]->startupPolicy;
int device = vm->def->disks[diskIndex]->device;
switch ((virDomainStartupPolicy) startupPolicy) {
case VIR_DOMAIN_STARTUP_POLICY_OPTIONAL:
/* Once started with an optional disk, qemu saves its section
* in the migration stream, so later, when restoring from it
* we must make sure the sections match. */
if (!cold_boot &&
device != VIR_DOMAIN_DISK_DEVICE_FLOPPY &&
device != VIR_DOMAIN_DISK_DEVICE_CDROM)
return -1;
break;
case VIR_DOMAIN_STARTUP_POLICY_MANDATORY:
return -1;
case VIR_DOMAIN_STARTUP_POLICY_REQUISITE:
if (cold_boot)
return -1;
break;
case VIR_DOMAIN_STARTUP_POLICY_DEFAULT:
case VIR_DOMAIN_STARTUP_POLICY_LAST:
/* this should never happen */
break;
}
qemuDomainCheckRemoveOptionalDisk(driver, vm, diskIndex);
return 0;
}
int
qemuDomainCheckDiskPresence(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool cold_boot)
{
int ret = -1;
size_t i;
VIR_DEBUG("Checking for disk presence");
for (i = vm->def->ndisks; i > 0; i--) {
size_t idx = i - 1;
virDomainDiskDefPtr disk = vm->def->disks[idx];
virStorageFileFormat format = virDomainDiskGetFormat(disk);
if (virStorageSourceIsEmpty(disk->src))
continue;
/* There is no need to check the backing chain for disks
* without backing support, the fact that the file exists is
* more than enough */
if (virStorageSourceIsLocalStorage(disk->src) &&
format > VIR_STORAGE_FILE_NONE &&
format < VIR_STORAGE_FILE_BACKING &&
virFileExists(virDomainDiskGetSource(disk)))
continue;
if (qemuDomainDetermineDiskChain(driver, vm, disk, true, true) >= 0)
continue;
if (disk->startupPolicy &&
qemuDomainCheckDiskStartupPolicy(driver, vm, idx,
cold_boot) >= 0) {
virResetLastError();
continue;
}
goto error;
}
ret = 0;
error:
return ret;
}
/*
* The vm must be locked when any of the following cleanup functions is
* called.
*/
int
qemuDomainCleanupAdd(virDomainObjPtr vm,
qemuDomainCleanupCallback cb)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
for (i = 0; i < priv->ncleanupCallbacks; i++) {
if (priv->cleanupCallbacks[i] == cb)
return 0;
}
if (VIR_RESIZE_N(priv->cleanupCallbacks,
priv->ncleanupCallbacks_max,
priv->ncleanupCallbacks, 1) < 0)
return -1;
priv->cleanupCallbacks[priv->ncleanupCallbacks++] = cb;
return 0;
}
void
qemuDomainCleanupRemove(virDomainObjPtr vm,
qemuDomainCleanupCallback cb)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
VIR_DEBUG("vm=%s, cb=%p", vm->def->name, cb);
for (i = 0; i < priv->ncleanupCallbacks; i++) {
if (priv->cleanupCallbacks[i] == cb)
VIR_DELETE_ELEMENT_INPLACE(priv->cleanupCallbacks,
i, priv->ncleanupCallbacks);
}
VIR_SHRINK_N(priv->cleanupCallbacks,
priv->ncleanupCallbacks_max,
priv->ncleanupCallbacks_max - priv->ncleanupCallbacks);
}
void
qemuDomainCleanupRun(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
size_t i;
VIR_DEBUG("driver=%p, vm=%s", driver, vm->def->name);
/* run cleanup callbacks in reverse order */
for (i = 0; i < priv->ncleanupCallbacks; i++) {
if (priv->cleanupCallbacks[priv->ncleanupCallbacks - (i + 1)])
priv->cleanupCallbacks[i](driver, vm);
}
VIR_FREE(priv->cleanupCallbacks);
priv->ncleanupCallbacks = 0;
priv->ncleanupCallbacks_max = 0;
}
static void
qemuDomainGetImageIds(virQEMUDriverConfigPtr cfg,
virDomainObjPtr vm,
virStorageSourcePtr src,
uid_t *uid, gid_t *gid)
{
virSecurityLabelDefPtr vmlabel;
virSecurityDeviceLabelDefPtr disklabel;
if (uid)
*uid = -1;
if (gid)
*gid = -1;
if (cfg) {
if (uid)
*uid = cfg->user;
if (gid)
*gid = cfg->group;
}
if (vm && (vmlabel = virDomainDefGetSecurityLabelDef(vm->def, "dac")) &&
vmlabel->label)
virParseOwnershipIds(vmlabel->label, uid, gid);
if ((disklabel = virStorageSourceGetSecurityLabelDef(src, "dac")) &&
qemuDomainGetImageIds: Skip <seclabel/> without label It's easy to shed the daemon these days. With this XML snippet: <disk type='file' device='disk'> <driver name='qemu' type='raw'/> <source file='/some/dummy/path/test.bin'> <seclabel model='dac' relabel='no'/> </source> <target dev='vdb' bus='virtio'/> <readonly/> <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/> </disk> I get the SIGSEGV when starting the domain. The thing is, when starting a domain, we check for its disk presence. For some reason, when determining the disk chain, we parse the <seclabel/> (don't ask me why). However, there's no label attribute in the XML, so we end up calling virParseOwnershipIds() over NULL string: [Switching to Thread 0x7ffff10c4700 (LWP 30956)] __strchr_sse42 () at ../sysdeps/x86_64/multiarch/strchr.S:136 136 ../sysdeps/x86_64/multiarch/strchr.S: No such file or directory. (gdb) bt #0 __strchr_sse42 () at ../sysdeps/x86_64/multiarch/strchr.S:136 #1 0x00007ffff749f800 in virParseOwnershipIds (label=0x0, uidPtr=uidPtr@entry=0x7ffff10c2df0, gidPtr=gidPtr@entry=0x7ffff10c2df4) at util/virutil.c:2115 #2 0x00007fffe929f006 in qemuDomainGetImageIds (gid=0x7ffff10c2df4, uid=0x7ffff10c2df0, disk=0x7fffe40cb000, vm=0x7fffe40a6410, cfg=0x7fffe409ae00) at qemu/qemu_domain.c:2385 #3 qemuDomainDetermineDiskChain (driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, disk=disk@entry=0x7fffe40cb000, force=force@entry=false) at qemu/qemu_domain.c:2414 #4 0x00007fffe929f128 in qemuDomainCheckDiskPresence (driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, cold_boot=cold_boot@entry=true) at qemu/qemu_domain.c:2250 #5 0x00007fffe92b6fc8 in qemuProcessStart (conn=conn@entry=0x7fffd4000b60, driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, migrateFrom=migrateFrom@entry=0x0, stdin_fd=stdin_fd@entry=-1, stdin_path=stdin_path@entry=0x0, snapshot=snapshot@entry=0x0, vmop=vmop@entry=VIR_NETDEV_VPORT_PROFILE_OP_CREATE, flags=flags@entry=1) at qemu/qemu_process.c:3813 #6 0x00007fffe93087e8 in qemuDomainObjStart (conn=0x7fffd4000b60, driver=driver@entry=0x7fffe40120e0, vm=vm@entry=0x7fffe40a6410, flags=flags@entry=0) at qemu/qemu_driver.c:6051 #7 0x00007fffe9308e32 in qemuDomainCreateWithFlags (dom=0x7fffcc000d50, flags=0) at qemu/qemu_driver.c:6105 #8 0x00007ffff753c5cc in virDomainCreate (domain=domain@entry=0x7fffcc000d50) at libvirt.c:8861 Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
2014-03-24 15:44:17 +00:00
disklabel->label)
virParseOwnershipIds(disklabel->label, uid, gid);
}
int
qemuDomainStorageFileInit(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr src)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
uid_t uid;
gid_t gid;
int ret = -1;
qemuDomainGetImageIds(cfg, vm, src, &uid, &gid);
if (virStorageFileInitAs(src, uid, gid) < 0)
goto cleanup;
ret = 0;
cleanup:
virObjectUnref(cfg);
return ret;
}
char *
qemuDomainStorageAlias(const char *device, int depth)
{
char *alias;
if (STRPREFIX(device, QEMU_DRIVE_HOST_PREFIX))
device += strlen(QEMU_DRIVE_HOST_PREFIX);
if (!depth)
ignore_value(VIR_STRDUP(alias, device));
else
ignore_value(virAsprintf(&alias, "%s.%d", device, depth));
return alias;
}
int
qemuDomainDetermineDiskChain(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainDiskDefPtr disk,
bool force_probe,
bool report_broken)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
int ret = 0;
uid_t uid;
gid_t gid;
if (virStorageSourceIsEmpty(disk->src))
goto cleanup;
if (disk->src->backingStore) {
if (force_probe)
virStorageSourceBackingStoreClear(disk->src);
else
goto cleanup;
}
qemuDomainGetImageIds(cfg, vm, disk->src, &uid, &gid);
if (virStorageFileGetMetadata(disk->src,
uid, gid,
cfg->allowDiskFormatProbing,
report_broken) < 0)
ret = -1;
cleanup:
virObjectUnref(cfg);
return ret;
}
/**
* qemuDomainDiskChainElementRevoke:
*
* Revoke access to a single backing chain element. This restores the labels,
* removes cgroup ACLs for devices and removes locks.
*/
void
qemuDomainDiskChainElementRevoke(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr elem)
{
if (virSecurityManagerRestoreImageLabel(driver->securityManager,
vm->def, elem) < 0)
VIR_WARN("Unable to restore security label on %s", NULLSTR(elem->path));
if (qemuTeardownImageCgroup(vm, elem) < 0)
VIR_WARN("Failed to teardown cgroup for disk path %s",
NULLSTR(elem->path));
if (virDomainLockImageDetach(driver->lockManager, vm, elem) < 0)
VIR_WARN("Unable to release lock on %s", NULLSTR(elem->path));
}
/**
* qemuDomainDiskChainElementPrepare:
*
* Allow a VM access to a single element of a disk backing chain; this helper
* ensures that the lock manager, cgroup device controller, and security manager
* labelling are all aware of each new file before it is added to a chain */
int
qemuDomainDiskChainElementPrepare(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virStorageSourcePtr elem,
bool readonly)
{
bool was_readonly = elem->readonly;
virQEMUDriverConfigPtr cfg = NULL;
int ret = -1;
cfg = virQEMUDriverGetConfig(driver);
elem->readonly = readonly;
if (virDomainLockImageAttach(driver->lockManager, cfg->uri, vm, elem) < 0)
goto cleanup;
if (qemuSetupImageCgroup(vm, elem) < 0)
goto cleanup;
if (virSecurityManagerSetImageLabel(driver->securityManager, vm->def,
elem) < 0)
goto cleanup;
ret = 0;
cleanup:
elem->readonly = was_readonly;
virObjectUnref(cfg);
return ret;
}
bool
qemuDomainDiskSourceDiffers(virDomainDiskDefPtr disk,
virDomainDiskDefPtr origDisk)
{
char *diskSrc = NULL, *origDiskSrc = NULL;
bool diskEmpty, origDiskEmpty;
bool ret = true;
diskEmpty = virStorageSourceIsEmpty(disk->src);
origDiskEmpty = virStorageSourceIsEmpty(origDisk->src);
if (diskEmpty && origDiskEmpty)
return false;
if (diskEmpty ^ origDiskEmpty)
return true;
/* This won't be a network storage, so no need to get the diskPriv
* in order to fetch the secret, thus NULL for param2 */
if (qemuGetDriveSourceString(disk->src, NULL, &diskSrc) < 0 ||
qemuGetDriveSourceString(origDisk->src, NULL, &origDiskSrc) < 0)
goto cleanup;
/* So far in qemu disk sources are considered different
* if either path to disk or its format changes. */
ret = virDomainDiskGetFormat(disk) != virDomainDiskGetFormat(origDisk) ||
STRNEQ_NULLABLE(diskSrc, origDiskSrc);
cleanup:
VIR_FREE(diskSrc);
VIR_FREE(origDiskSrc);
return ret;
}
/*
* Makes sure the @disk differs from @orig_disk only by the source
* path and nothing else. Fields that are being checked and the
* information whether they are nullable (may not be specified) or is
* taken from the virDomainDiskDefFormat() code.
*/
bool
qemuDomainDiskChangeSupported(virDomainDiskDefPtr disk,
virDomainDiskDefPtr orig_disk)
{
#define CHECK_EQ(field, field_name, nullable) \
do { \
if (nullable && !disk->field) \
break; \
if (disk->field != orig_disk->field) { \
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, \
_("cannot modify field '%s' of the disk"), \
field_name); \
return false; \
} \
} while (0)
CHECK_EQ(device, "device", false);
CHECK_EQ(bus, "bus", false);
if (STRNEQ(disk->dst, orig_disk->dst)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot modify field '%s' of the disk"),
"target");
return false;
}
CHECK_EQ(tray_status, "tray", true);
CHECK_EQ(removable, "removable", true);
if (disk->geometry.cylinders &&
disk->geometry.heads &&
disk->geometry.sectors) {
CHECK_EQ(geometry.cylinders, "geometry cylinders", false);
CHECK_EQ(geometry.heads, "geometry heads", false);
CHECK_EQ(geometry.sectors, "geometry sectors", false);
CHECK_EQ(geometry.trans, "BIOS-translation-modus", true);
}
CHECK_EQ(blockio.logical_block_size,
"blockio logical_block_size", false);
CHECK_EQ(blockio.physical_block_size,
"blockio physical_block_size", false);
CHECK_EQ(blkdeviotune.total_bytes_sec,
"blkdeviotune total_bytes_sec",
true);
CHECK_EQ(blkdeviotune.read_bytes_sec,
"blkdeviotune read_bytes_sec",
true);
CHECK_EQ(blkdeviotune.write_bytes_sec,
"blkdeviotune write_bytes_sec",
true);
CHECK_EQ(blkdeviotune.total_iops_sec,
"blkdeviotune total_iops_sec",
true);
CHECK_EQ(blkdeviotune.read_iops_sec,
"blkdeviotune read_iops_sec",
true);
CHECK_EQ(blkdeviotune.write_iops_sec,
"blkdeviotune write_iops_sec",
true);
CHECK_EQ(blkdeviotune.total_bytes_sec_max,
"blkdeviotune total_bytes_sec_max",
true);
CHECK_EQ(blkdeviotune.read_bytes_sec_max,
"blkdeviotune read_bytes_sec_max",
true);
CHECK_EQ(blkdeviotune.write_bytes_sec_max,
"blkdeviotune write_bytes_sec_max",
true);
CHECK_EQ(blkdeviotune.total_iops_sec_max,
"blkdeviotune total_iops_sec_max",
true);
CHECK_EQ(blkdeviotune.read_iops_sec_max,
"blkdeviotune read_iops_sec_max",
true);
CHECK_EQ(blkdeviotune.write_iops_sec_max,
"blkdeviotune write_iops_sec_max",
true);
CHECK_EQ(blkdeviotune.size_iops_sec,
"blkdeviotune size_iops_sec",
true);
if (disk->serial && STRNEQ_NULLABLE(disk->serial, orig_disk->serial)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot modify field '%s' of the disk"),
"serial");
return false;
}
if (disk->wwn && STRNEQ_NULLABLE(disk->wwn, orig_disk->wwn)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot modify field '%s' of the disk"),
"wwn");
return false;
}
if (disk->vendor && STRNEQ_NULLABLE(disk->vendor, orig_disk->vendor)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot modify field '%s' of the disk"),
"vendor");
return false;
}
if (disk->product && STRNEQ_NULLABLE(disk->product, orig_disk->product)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot modify field '%s' of the disk"),
"product");
return false;
}
CHECK_EQ(cachemode, "cache", true);
CHECK_EQ(error_policy, "error_policy", true);
CHECK_EQ(rerror_policy, "rerror_policy", true);
CHECK_EQ(iomode, "io", true);
CHECK_EQ(ioeventfd, "ioeventfd", true);
CHECK_EQ(event_idx, "event_idx", true);
CHECK_EQ(copy_on_read, "copy_on_read", true);
/* "snapshot" is a libvirt internal field and thus can be changed */
/* startupPolicy is allowed to be updated. Therefore not checked here. */
CHECK_EQ(transient, "transient", true);
CHECK_EQ(info.bootIndex, "boot order", true);
CHECK_EQ(rawio, "rawio", true);
CHECK_EQ(sgio, "sgio", true);
CHECK_EQ(discard, "discard", true);
CHECK_EQ(iothread, "iothread", true);
if (disk->domain_name &&
STRNEQ_NULLABLE(disk->domain_name, orig_disk->domain_name)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("cannot modify field '%s' of the disk"),
"backenddomain");
return false;
}
/* checks for fields stored in disk->src */
/* unfortunately 'readonly' and 'shared' can't be converted to tristate
* values thus we need to ignore the check if the new value is 'false' */
CHECK_EQ(src->readonly, "readonly", true);
CHECK_EQ(src->shared, "shared", true);
#undef CHECK_EQ
return true;
}
bool
qemuDomainDiskBlockJobIsActive(virDomainDiskDefPtr disk)
{
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (disk->mirror) {
virReportError(VIR_ERR_BLOCK_COPY_ACTIVE,
_("disk '%s' already in active block job"),
disk->dst);
return true;
}
if (diskPriv->blockjob) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
_("disk '%s' already in active block job"),
disk->dst);
return true;
}
return false;
}
/**
* qemuDomainHasBlockjob:
* @vm: domain object
* @copy_only: Reject only block copy job
*
* Return true if @vm has at least one disk involved in a current block
* copy/commit/pull job. If @copy_only is true this returns true only if the
* disk is involved in a block copy.
* */
bool
qemuDomainHasBlockjob(virDomainObjPtr vm,
bool copy_only)
{
size_t i;
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (!copy_only && diskPriv->blockjob)
return true;
if (disk->mirror && disk->mirrorJob == VIR_DOMAIN_BLOCK_JOB_TYPE_COPY)
return true;
}
return false;
}
int
qemuDomainUpdateDeviceList(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char **aliases;
int rc;
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DEVICE_DEL_EVENT))
return 0;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
rc = qemuMonitorGetDeviceAliases(priv->mon, &aliases);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
return -1;
if (rc < 0)
return -1;
virStringFreeList(priv->qemuDevices);
priv->qemuDevices = aliases;
return 0;
}
int
qemuDomainUpdateMemoryDeviceInfo(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr meminfo = NULL;
int rc;
size_t i;
if (vm->def->nmems == 0)
return 0;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
rc = qemuMonitorGetMemoryDeviceInfo(priv->mon, &meminfo);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
return -1;
/* if qemu doesn't support the info request, just carry on */
if (rc == -2)
return 0;
if (rc < 0)
return -1;
for (i = 0; i < vm->def->nmems; i++) {
virDomainMemoryDefPtr mem = vm->def->mems[i];
qemuMonitorMemoryDeviceInfoPtr dimm;
if (!mem->info.alias)
continue;
if (!(dimm = virHashLookup(meminfo, mem->info.alias)))
continue;
mem->info.type = VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM;
mem->info.addr.dimm.slot = dimm->slot;
mem->info.addr.dimm.base = dimm->address;
}
virHashFree(meminfo);
return 0;
}
bool
qemuDomainDefCheckABIStability(virQEMUDriverPtr driver,
virDomainDefPtr src,
virDomainDefPtr dst)
{
virDomainDefPtr migratableDefSrc = NULL;
virDomainDefPtr migratableDefDst = NULL;
const int flags = VIR_DOMAIN_XML_SECURE | VIR_DOMAIN_XML_UPDATE_CPU | VIR_DOMAIN_XML_MIGRATABLE;
bool ret = false;
if (!(migratableDefSrc = qemuDomainDefCopy(driver, src, flags)) ||
!(migratableDefDst = qemuDomainDefCopy(driver, dst, flags)))
goto cleanup;
ret = virDomainDefCheckABIStability(migratableDefSrc, migratableDefDst);
cleanup:
virDomainDefFree(migratableDefSrc);
virDomainDefFree(migratableDefDst);
return ret;
}
bool
qemuDomainAgentAvailable(virDomainObjPtr vm,
bool reportError)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
if (virDomainObjGetState(vm, NULL) != VIR_DOMAIN_RUNNING) {
if (reportError) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
}
return false;
}
if (priv->agentError) {
if (reportError) {
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
_("QEMU guest agent is not "
"available due to an error"));
}
return false;
}
if (!priv->agent) {
if (qemuFindAgentConfig(vm->def)) {
if (reportError) {
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
_("QEMU guest agent is not connected"));
}
return false;
} else {
if (reportError) {
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
_("QEMU guest agent is not configured"));
}
return false;
}
}
return true;
}
static unsigned long long
qemuDomainGetMemorySizeAlignment(virDomainDefPtr def)
{
/* PPC requires the memory sizes to be rounded to 256MiB increments, so
* round them to the size always. */
if (ARCH_IS_PPC64(def->os.arch))
return 256 * 1024;
/* Align memory size. QEMU requires rounding to next 4KiB block.
* We'll take the "traditional" path and round it to 1MiB*/
return 1024;
}
static unsigned long long
qemuDomainGetMemoryModuleSizeAlignment(const virDomainDef *def,
const virDomainMemoryDef *mem ATTRIBUTE_UNUSED)
{
/* PPC requires the memory sizes to be rounded to 256MiB increments, so
* round them to the size always. */
if (ARCH_IS_PPC64(def->os.arch))
return 256 * 1024;
/* dimm memory modules require 2MiB alignment rather than the 1MiB we are
* using elsewhere. */
return 2048;
}
int
qemuDomainAlignMemorySizes(virDomainDefPtr def)
{
unsigned long long maxmemkb = virMemoryMaxValue(false) >> 10;
unsigned long long maxmemcapped = virMemoryMaxValue(true) >> 10;
unsigned long long initialmem = 0;
unsigned long long mem;
unsigned long long align = qemuDomainGetMemorySizeAlignment(def);
size_t ncells = virDomainNumaGetNodeCount(def->numa);
size_t i;
/* align NUMA cell sizes if relevant */
for (i = 0; i < ncells; i++) {
mem = VIR_ROUND_UP(virDomainNumaGetNodeMemorySize(def->numa, i), align);
initialmem += mem;
if (mem > maxmemkb) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("memory size of NUMA node '%zu' overflowed after "
"alignment"), i);
return -1;
}
virDomainNumaSetNodeMemorySize(def->numa, i, mem);
}
/* align initial memory size, if NUMA is present calculate it as total of
* individual aligned NUMA node sizes */
if (initialmem == 0)
initialmem = VIR_ROUND_UP(virDomainDefGetMemoryInitial(def), align);
if (initialmem > maxmemcapped) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("initial memory size overflowed after alignment"));
return -1;
}
virDomainDefSetMemoryInitial(def, initialmem);
def->mem.max_memory = VIR_ROUND_UP(def->mem.max_memory, align);
if (def->mem.max_memory > maxmemkb) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("maximum memory size overflowed after alignment"));
return -1;
}
/* Align memory module sizes */
for (i = 0; i < def->nmems; i++) {
align = qemuDomainGetMemoryModuleSizeAlignment(def, def->mems[i]);
def->mems[i]->size = VIR_ROUND_UP(def->mems[i]->size, align);
if (def->mems[i]->size > maxmemkb) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("size of memory module '%zu' overflowed after "
"alignment"), i);
return -1;
}
}
return 0;
}
/**
* qemuDomainMemoryDeviceAlignSize:
* @mem: memory device definition object
*
* Aligns the size of the memory module as qemu enforces it. The size is updated
* inplace. Default rounding is now to 1 MiB (qemu requires rouding to page,
* size so this should be safe).
*/
void
qemuDomainMemoryDeviceAlignSize(virDomainDefPtr def,
virDomainMemoryDefPtr mem)
{
mem->size = VIR_ROUND_UP(mem->size, qemuDomainGetMemorySizeAlignment(def));
}
/**
* qemuDomainGetMonitor:
* @vm: domain object
*
* Returns the monitor pointer corresponding to the domain object @vm.
*/
qemuMonitorPtr
qemuDomainGetMonitor(virDomainObjPtr vm)
{
return ((qemuDomainObjPrivatePtr) vm->privateData)->mon;
}
/**
* qemuDomainSupportsBlockJobs:
* @vm: domain object
* @modern: pointer to bool that returns whether modern block jobs are supported
*
* Returns -1 in case when qemu does not support block jobs at all. Otherwise
* returns 0 and optionally fills @modern to denote that modern (async) block
* jobs are supported.
*/
int
qemuDomainSupportsBlockJobs(virDomainObjPtr vm,
bool *modern)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
bool asynchronous = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKJOB_ASYNC);
bool synchronous = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKJOB_SYNC);
if (!synchronous && !asynchronous) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("block jobs not supported with this QEMU binary"));
return -1;
}
if (modern)
*modern = asynchronous;
return 0;
}
/**
* qemuFindAgentConfig:
* @def: domain definition
*
* Returns the pointer to the channel definition that is used to access the
* guest agent if the agent is configured or NULL otherwise.
*/
virDomainChrDefPtr
qemuFindAgentConfig(virDomainDefPtr def)
{
size_t i;
for (i = 0; i < def->nchannels; i++) {
virDomainChrDefPtr channel = def->channels[i];
if (channel->targetType != VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO)
continue;
if (STREQ_NULLABLE(channel->target.name, "org.qemu.guest_agent.0"))
return channel;
}
return NULL;
}
bool
qemuDomainMachineIsQ35(const virDomainDef *def)
{
return (STRPREFIX(def->os.machine, "pc-q35") ||
STREQ(def->os.machine, "q35"));
}
bool
qemuDomainMachineIsI440FX(const virDomainDef *def)
{
return (STREQ(def->os.machine, "pc") ||
STRPREFIX(def->os.machine, "pc-0.") ||
STRPREFIX(def->os.machine, "pc-1.") ||
STRPREFIX(def->os.machine, "pc-i440") ||
STRPREFIX(def->os.machine, "rhel"));
}
bool
qemuDomainMachineNeedsFDC(const virDomainDef *def)
{
char *p = STRSKIP(def->os.machine, "pc-q35-");
if (p) {
if (STRPREFIX(p, "1.") ||
STRPREFIX(p, "2.0") ||
STRPREFIX(p, "2.1") ||
STRPREFIX(p, "2.2") ||
STRPREFIX(p, "2.3"))
return false;
return true;
}
return false;
}
bool
qemuDomainMachineIsS390CCW(const virDomainDef *def)
{
return STRPREFIX(def->os.machine, "s390-ccw");
}
bool
qemuDomainMachineIsVirt(const virDomainDef *def)
{
return STREQ(def->os.machine, "virt") ||
STRPREFIX(def->os.machine, "virt-");
}
static bool
qemuCheckMemoryDimmConflict(const virDomainDef *def,
const virDomainMemoryDef *mem)
{
size_t i;
for (i = 0; i < def->nmems; i++) {
virDomainMemoryDefPtr tmp = def->mems[i];
if (tmp == mem ||
tmp->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM)
continue;
if (mem->info.addr.dimm.slot == tmp->info.addr.dimm.slot) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("memory device slot '%u' is already being "
"used by another memory device"),
mem->info.addr.dimm.slot);
return true;
}
if (mem->info.addr.dimm.base != 0 &&
mem->info.addr.dimm.base == tmp->info.addr.dimm.base) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("memory device base '0x%llx' is already being "
"used by another memory device"),
mem->info.addr.dimm.base);
return true;
}
}
return false;
}
static int
qemuDomainDefValidateMemoryHotplugDevice(const virDomainMemoryDef *mem,
const virDomainDef *def)
{
switch ((virDomainMemoryModel) mem->model) {
case VIR_DOMAIN_MEMORY_MODEL_DIMM:
if (mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM &&
mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_NONE) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("only 'dimm' addresses are supported for the "
"pc-dimm device"));
return -1;
}
if (virDomainNumaGetNodeCount(def->numa) != 0) {
if (mem->targetNode == -1) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
2016-03-07 13:24:51 +00:00
_("target NUMA node needs to be specified for "
"memory device"));
return -1;
}
}
if (mem->info.type == VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM) {
if (mem->info.addr.dimm.slot >= def->mem.memory_slots) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("memory device slot '%u' exceeds slots "
"count '%u'"),
mem->info.addr.dimm.slot, def->mem.memory_slots);
return -1;
}
if (qemuCheckMemoryDimmConflict(def, mem))
return -1;
}
break;
case VIR_DOMAIN_MEMORY_MODEL_NONE:
case VIR_DOMAIN_MEMORY_MODEL_LAST:
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("invalid memory device type"));
return -1;
}
return 0;
}
/**
* qemuDomainDefValidateMemoryHotplug:
* @def: domain definition
* @qemuCaps: qemu capabilities object
* @mem: definition of memory device that is to be added to @def with hotplug,
* NULL in case of regular VM startup
*
* Validates that the domain definition and memory modules have valid
* configuration and are possibly able to accept @mem via hotplug if it's
* non-NULL.
*
* Returns 0 on success; -1 and a libvirt error on error.
*/
int
qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
virQEMUCapsPtr qemuCaps,
const virDomainMemoryDef *mem)
{
unsigned int nmems = def->nmems;
unsigned long long hotplugSpace;
unsigned long long hotplugMemory = 0;
size_t i;
hotplugSpace = def->mem.max_memory - virDomainDefGetMemoryInitial(def);
if (mem) {
nmems++;
hotplugMemory = mem->size;
if (qemuDomainDefValidateMemoryHotplugDevice(mem, def) < 0)
return -1;
}
if (!virDomainDefHasMemoryHotplug(def)) {
if (nmems) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("cannot use/hotplug a memory device when domain "
"'maxMemory' is not defined"));
return -1;
}
return 0;
}
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_PC_DIMM)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("memory hotplug isn't supported by this QEMU binary"));
return -1;
}
if (!ARCH_IS_PPC64(def->os.arch)) {
/* due to guest support, qemu would silently enable NUMA with one node
* once the memory hotplug backend is enabled. To avoid possible
* confusion we will enforce user originated numa configuration along
* with memory hotplug. */
if (virDomainNumaGetNodeCount(def->numa) == 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("At least one numa node has to be configured when "
"enabling memory hotplug"));
return -1;
}
}
if (nmems > def->mem.memory_slots) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("memory device count '%u' exceeds slots count '%u'"),
nmems, def->mem.memory_slots);
return -1;
}
for (i = 0; i < def->nmems; i++) {
hotplugMemory += def->mems[i]->size;
/* already existing devices don't need to be checked on hotplug */
if (!mem &&
qemuDomainDefValidateMemoryHotplugDevice(def->mems[i], def) < 0)
return -1;
}
if (hotplugMemory > hotplugSpace) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("memory device total size exceeds hotplug space"));
return -1;
}
return 0;
}
bool
qemuDomainMachineHasBuiltinIDE(const virDomainDef *def)
{
return qemuDomainMachineIsI440FX(def) ||
STREQ(def->os.machine, "malta") ||
STREQ(def->os.machine, "sun4u") ||
STREQ(def->os.machine, "g3beige");
}
/**
* qemuDomainUpdateCurrentMemorySize:
*
* Updates the current balloon size from the monitor if necessary. In case when
* the balloon is not present for the domain, the function recalculates the
* maximum size to reflect possible changes.
*
* Returns 0 on success and updates vm->def->mem.cur_balloon if necessary, -1 on
* error and reports libvirt error.
*/
int
qemuDomainUpdateCurrentMemorySize(virQEMUDriverPtr driver,
virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
unsigned long long balloon;
int ret = -1;
/* inactive domain doesn't need size update */
if (!virDomainObjIsActive(vm))
return 0;
/* if no balloning is available, the current size equals to the current
* full memory size */
if (!virDomainDefHasMemballoon(vm->def)) {
vm->def->mem.cur_balloon = virDomainDefGetMemoryActual(vm->def);
return 0;
}
/* current size is always automagically updated via the event */
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BALLOON_EVENT))
return 0;
/* here we need to ask the monitor */
/* Don't delay if someone's using the monitor, just use existing most
* recent data instead */
if (qemuDomainJobAllowed(priv, QEMU_JOB_QUERY)) {
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
return -1;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("domain is not running"));
goto endjob;
}
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBalloonInfo(priv->mon, &balloon);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
ret = -1;
endjob:
qemuDomainObjEndJob(driver, vm);
if (ret < 0)
return -1;
vm->def->mem.cur_balloon = balloon;
}
return 0;
}
/**
* qemuDomainGetMemLockLimitBytes:
*
* @def: domain definition
*
* Returns the size of the memory in bytes that needs to be set as
* RLIMIT_MEMLOCK for the QEMU process.
* If a mem.hard_limit is set, then that value is preferred; otherwise, the
* value returned may depend upon the architecture or devices present.
*/
unsigned long long
qemuDomainGetMemLockLimitBytes(virDomainDefPtr def)
{
unsigned long long memKB;
/* prefer the hard limit */
if (virMemoryLimitIsSet(def->mem.hard_limit)) {
memKB = def->mem.hard_limit;
goto done;
}
if (ARCH_IS_PPC64(def->os.arch)) {
unsigned long long maxMemory;
unsigned long long memory;
unsigned long long baseLimit;
unsigned long long passthroughLimit;
size_t nPCIHostBridges;
size_t i;
bool usesVFIO = false;
/* TODO: Detect at runtime once we start using more than just
* the default PCI Host Bridge */
nPCIHostBridges = 1;
for (i = 0; i < def->nhostdevs; i++) {
virDomainHostdevDefPtr dev = def->hostdevs[i];
if (dev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS &&
dev->source.subsys.type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI &&
dev->source.subsys.u.pci.backend == VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO) {
usesVFIO = true;
break;
}
}
memory = virDomainDefGetMemoryActual(def);
if (def->mem.max_memory)
maxMemory = def->mem.max_memory;
else
maxMemory = memory;
/* baseLimit := maxMemory / 128 (a)
* + 4 MiB * #PHBs + 8 MiB (b)
*
* (a) is the hash table
*
* (b) is accounting for the 32-bit DMA window - it could be either the
* KVM accelerated TCE tables for emulated devices, or the VFIO
* userspace view. The 4 MiB per-PHB (including the default one) covers
* a 2GiB DMA window: default is 1GiB, but it's possible it'll be
* increased to help performance. The 8 MiB extra should be plenty for
* the TCE table index for any reasonable number of PHBs and several
* spapr-vlan or spapr-vscsi devices (512kB + a tiny bit each) */
baseLimit = maxMemory / 128 +
4096 * nPCIHostBridges +
8192;
/* passthroughLimit := max( 2 GiB * #PHBs, (c)
* memory (d)
* + memory * 1/512 * #PHBs + 8 MiB ) (e)
*
* (c) is the pre-DDW VFIO DMA window accounting. We're allowing 2 GiB
* rather than 1 GiB
*
* (d) is the with-DDW (and memory pre-registration and related
* features) DMA window accounting - assuming that we only account RAM
* once, even if mapped to multiple PHBs
*
* (e) is the with-DDW userspace view and overhead for the 64-bit DMA
* window. This is based a bit on expected guest behaviour, but there
* really isn't a way to completely avoid that. We assume the guest
* requests a 64-bit DMA window (per PHB) just big enough to map all
* its RAM. 4 kiB page size gives the 1/512; it will be less with 64
* kiB pages, less still if the guest is mapped with hugepages (unlike
* the default 32-bit DMA window, DDW windows can use large IOMMU
* pages). 8 MiB is for second and further level overheads, like (b) */
passthroughLimit = MAX(2 * 1024 * 1024 * nPCIHostBridges,
memory +
memory / 512 * nPCIHostBridges + 8192);
if (usesVFIO)
memKB = baseLimit + passthroughLimit;
else
memKB = baseLimit;
goto done;
}
/* For device passthrough using VFIO the guest memory and MMIO memory
* regions need to be locked persistent in order to allow DMA.
*
* Currently the below limit is based on assumptions about the x86 platform.
*
* The chosen value of 1GiB below originates from x86 systems where it was
* used as space reserved for the MMIO region for the whole system.
*
* On x86_64 systems the MMIO regions of the IOMMU mapped devices don't
* count towards the locked memory limit since the memory is owned by the
* device. Emulated devices though do count, but the regions are usually
* small. Although it's not guaranteed that the limit will be enough for all
* configurations it didn't pose a problem for now.
*
* http://www.redhat.com/archives/libvir-list/2015-November/msg00329.html
*
* Note that this may not be valid for all platforms.
*/
memKB = virDomainDefGetMemoryActual(def) + 1024 * 1024;
done:
return memKB << 10;
}
/**
* @def: domain definition
*
* Returns true if the locked memory limit needs to be set or updated because
* of domain configuration, VFIO passthrough devices or architecture-specific
* requirements.
* */
bool
qemuDomainRequiresMemLock(virDomainDefPtr def)
{
size_t i;
if (def->mem.locked)
return true;
/* ppc64 domains need to lock some memory even when VFIO is not used */
if (ARCH_IS_PPC64(def->os.arch))
return true;
for (i = 0; i < def->nhostdevs; i++) {
virDomainHostdevDefPtr dev = def->hostdevs[i];
if (dev->mode == VIR_DOMAIN_HOSTDEV_MODE_SUBSYS &&
dev->source.subsys.type == VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI &&
dev->source.subsys.u.pci.backend == VIR_DOMAIN_HOSTDEV_PCI_BACKEND_VFIO)
return true;
}
return false;
}
/**
* qemuDomainAdjustMaxMemLock:
* @vm: domain
*
* Adjust the memory locking limit for the QEMU process associated to @vm, in
* order to comply with VFIO or architecture requirements.
*
* The limit will not be changed unless doing so is needed; the first time
* the limit is changed, the original (default) limit is stored in @vm and
* that value will be restored if qemuDomainAdjustMaxMemLock() is called once
* memory locking is no longer required.
*
* Returns: 0 on success, <0 on failure
*/
int
qemuDomainAdjustMaxMemLock(virDomainObjPtr vm)
{
unsigned long long bytes = 0;
int ret = -1;
if (qemuDomainRequiresMemLock(vm->def)) {
/* If this is the first time adjusting the limit, save the current
* value so that we can restore it once memory locking is no longer
* required. Failing to obtain the current limit is not a critical
* failure, it just means we'll be unable to lower it later */
if (!vm->original_memlock) {
if (virProcessGetMaxMemLock(vm->pid, &(vm->original_memlock)) < 0)
vm->original_memlock = 0;
}
bytes = qemuDomainGetMemLockLimitBytes(vm->def);
} else {
/* Once memory locking is no longer required, we can restore the
* original, usually very low, limit */
bytes = vm->original_memlock;
vm->original_memlock = 0;
}
/* Trying to set the memory locking limit to zero is a no-op */
if (virProcessSetMaxMemLock(vm->pid, bytes) < 0)
goto out;
ret = 0;
out:
return ret;
}
/**
* qemuDomainHasVcpuPids:
* @vm: Domain object
*
* Returns true if we were able to successfully detect vCPU pids for the VM.
*/
bool
qemuDomainHasVcpuPids(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
return priv->nvcpupids > 0;
}
/**
* qemuDomainGetVcpuPid:
* @vm: domain object
* @vcpu: cpu id
*
* Returns the vCPU pid. If @vcpu is offline or out of range 0 is returned.
*/
pid_t
qemuDomainGetVcpuPid(virDomainObjPtr vm,
unsigned int vcpu)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
if (vcpu >= priv->nvcpupids)
return 0;
return priv->vcpupids[vcpu];
}
/**
* qemuDomainDetectVcpuPids:
* @driver: qemu driver data
* @vm: domain object
* @asyncJob: current asynchronous job type
*
* Updates vCPU thread ids in the private data of @vm.
*
* Returns number of detected vCPU threads on success, -1 on error and reports
* an appropriate error, -2 if the domain doesn't exist any more.
*/
int
qemuDomainDetectVcpuPids(virQEMUDriverPtr driver,
virDomainObjPtr vm,
int asyncJob)
{
pid_t *cpupids = NULL;
int ncpupids = 0;
qemuDomainObjPrivatePtr priv = vm->privateData;
/*
* Current QEMU *can* report info about host threads mapped
* to vCPUs, but it is not in a manner we can correctly
* deal with. The TCG CPU emulation does have a separate vCPU
* thread, but it runs every vCPU in that same thread. So it
* is impossible to setup different affinity per thread.
*
* What's more the 'query-cpus' command returns bizarre
* data for the threads. It gives the TCG thread for the
* vCPU 0, but for vCPUs 1-> N, it actually replies with
* the main process thread ID.
*
* The result is that when we try to set affinity for
* vCPU 1, it will actually change the affinity of the
* emulator thread :-( When you try to set affinity for
* vCPUs 2, 3.... it will fail if the affinity was
* different from vCPU 1.
*
* We *could* allow vcpu pinning with TCG, if we made the
* restriction that all vCPUs had the same mask. This would
* at least let us separate emulator from vCPUs threads, as
* we do for KVM. It would need some changes to our cgroups
* CPU layout though, and error reporting for the config
* restrictions.
*
* Just disable CPU pinning with TCG until someone wants
* to try to do this hard work.
*/
if (vm->def->virtType == VIR_DOMAIN_VIRT_QEMU)
goto done;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids);
if (qemuDomainObjExitMonitor(driver, vm) < 0) {
VIR_FREE(cpupids);
return -2;
}
/* failure to get the VCPU <-> PID mapping or to execute the query
* command will not be treated fatal as some versions of qemu don't
* support this command */
if (ncpupids <= 0) {
virResetLastError();
ncpupids = 0;
goto done;
}
if (ncpupids != virDomainDefGetVcpus(vm->def)) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("got wrong number of vCPU pids from QEMU monitor. "
"got %d, wanted %d"),
ncpupids, virDomainDefGetVcpus(vm->def));
VIR_FREE(cpupids);
return -1;
}
done:
VIR_FREE(priv->vcpupids);
priv->nvcpupids = ncpupids;
priv->vcpupids = cpupids;
return ncpupids;
}
bool
qemuDomainSupportsNicdev(virDomainDefPtr def,
virDomainNetDefPtr net)
{
/* non-virtio ARM nics require legacy -net nic */
if (((def->os.arch == VIR_ARCH_ARMV7L) ||
(def->os.arch == VIR_ARCH_AARCH64)) &&
net->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_VIRTIO_MMIO &&
net->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI)
return false;
return true;
}
bool
qemuDomainSupportsNetdev(virDomainDefPtr def,
virQEMUCapsPtr qemuCaps,
virDomainNetDefPtr net)
{
if (!qemuDomainSupportsNicdev(def, net))
return false;
return virQEMUCapsGet(qemuCaps, QEMU_CAPS_NETDEV);
}
int
qemuDomainNetVLAN(virDomainNetDefPtr def)
{
return qemuDomainDeviceAliasIndex(&def->info, "net");
}
virDomainDiskDefPtr
qemuDomainDiskByName(virDomainDefPtr def,
const char *name)
{
virDomainDiskDefPtr ret;
if (!(ret = virDomainDiskByName(def, name, true))) {
virReportError(VIR_ERR_INVALID_ARG, "%s",
_("No device found for specified path"));
return NULL;
}
return ret;
}
/**
* qemuDomainDefValidateDiskLunSource:
* @src: disk source struct
*
* Validate whether the disk source is valid for disk device='lun'.
*
* Returns 0 if the configuration is valid -1 and a libvirt error if the soure
* is invalid.
*/
int
qemuDomainDefValidateDiskLunSource(const virStorageSource *src)
{
if (virStorageSourceGetActualType(src) == VIR_STORAGE_TYPE_NETWORK) {
if (src->protocol != VIR_STORAGE_NET_PROTOCOL_ISCSI) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
_("disk device='lun' is not supported "
"for protocol='%s'"),
virStorageNetProtocolTypeToString(src->protocol));
return -1;
}
} else if (!virStorageSourceIsBlockLocal(src)) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("disk device='lun' is only valid for block "
"type disk source"));
return -1;
}
return 0;
}
int
qemuDomainPrepareChannel(virDomainChrDefPtr channel,
const char *domainChannelTargetDir)
{
if (channel->targetType == VIR_DOMAIN_CHR_CHANNEL_TARGET_TYPE_VIRTIO &&
channel->source.type == VIR_DOMAIN_CHR_TYPE_UNIX &&
!channel->source.data.nix.path) {
if (virAsprintf(&channel->source.data.nix.path,
"%s/%s", domainChannelTargetDir,
channel->target.name ? channel->target.name
: "unknown.sock") < 0)
return -1;
channel->source.data.nix.listen = true;
}
return 0;
}