qemu: use generalized virDomainJobData instead of qemuDomainJobInfo

This patch includes:
* introducing new files: src/hypervisor/domain_job.c and src/hypervisor/domain_job.h
* new struct virDomainJobData, which is almost the same as
  qemuDomainJobInfo - the only differences are moving qemu specific
  job stats into the qemuDomainJobDataPrivate and adding jobType
  (possibly more attributes in the future if needed).
* moving qemuDomainJobStatus to the domain_job.h and renaming it
  as virDomainJobStatus
* moving and renaming qemuDomainJobStatusToType
* adding callback struct virDomainJobDataPrivateDataCallbacks
  taking care of allocation, copying and freeing of private data
  of virDomainJobData
* adding functions for virDomainJobDataPrivateDataCallbacks for
  qemu hypervisor
* adding 'public' (public between the different hypervisors) functions
  taking care of init, copy, free of virDomainJobData
* renaming every occurrence of qemuDomainJobInfo *info to
  virDomainJobData *data

Signed-off-by: Kristina Hanicova <khanicov@redhat.com>
Reviewed-by: Jiri Denemark <jdenemar@redhat.com>
This commit is contained in:
Kristina Hanicova 2022-02-11 14:49:05 +01:00 committed by Jiri Denemark
parent 79c4e4e5c4
commit f304de0df6
15 changed files with 520 additions and 362 deletions

View File

@ -0,0 +1,78 @@
/*
* Copyright (C) 2022 Red Hat, Inc.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#include <config.h>
#include <string.h>
#include "domain_job.h"
virDomainJobData *
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb)
{
virDomainJobData *ret = g_new0(virDomainJobData, 1);
ret->privateDataCb = cb;
if (ret->privateDataCb)
ret->privateData = ret->privateDataCb->allocPrivateData();
return ret;
}
virDomainJobData *
virDomainJobDataCopy(virDomainJobData *data)
{
virDomainJobData *ret = g_new0(virDomainJobData, 1);
memcpy(ret, data, sizeof(*data));
if (ret->privateDataCb)
ret->privateData = data->privateDataCb->copyPrivateData(data->privateData);
ret->errmsg = g_strdup(data->errmsg);
return ret;
}
void
virDomainJobDataFree(virDomainJobData *data)
{
if (!data)
return;
if (data->privateDataCb)
data->privateDataCb->freePrivateData(data->privateData);
g_free(data->errmsg);
g_free(data);
}
virDomainJobType
virDomainJobStatusToType(virDomainJobStatus status)
{
switch (status) {
case VIR_DOMAIN_JOB_STATUS_NONE:
break;
case VIR_DOMAIN_JOB_STATUS_ACTIVE:
case VIR_DOMAIN_JOB_STATUS_MIGRATING:
case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
case VIR_DOMAIN_JOB_STATUS_PAUSED:
return VIR_DOMAIN_JOB_UNBOUNDED;
case VIR_DOMAIN_JOB_STATUS_COMPLETED:
return VIR_DOMAIN_JOB_COMPLETED;
case VIR_DOMAIN_JOB_STATUS_FAILED:
return VIR_DOMAIN_JOB_FAILED;
case VIR_DOMAIN_JOB_STATUS_CANCELED:
return VIR_DOMAIN_JOB_CANCELLED;
}
return VIR_DOMAIN_JOB_NONE;
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (C) 2022 Red Hat, Inc.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#pragma once
#include "internal.h"
typedef enum {
VIR_DOMAIN_JOB_STATUS_NONE = 0,
VIR_DOMAIN_JOB_STATUS_ACTIVE,
VIR_DOMAIN_JOB_STATUS_MIGRATING,
VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED,
VIR_DOMAIN_JOB_STATUS_PAUSED,
VIR_DOMAIN_JOB_STATUS_POSTCOPY,
VIR_DOMAIN_JOB_STATUS_COMPLETED,
VIR_DOMAIN_JOB_STATUS_FAILED,
VIR_DOMAIN_JOB_STATUS_CANCELED,
} virDomainJobStatus;
typedef void *(*virDomainJobDataPrivateDataAlloc) (void);
typedef void *(*virDomainJobDataPrivateDataCopy) (void *);
typedef void (*virDomainJobDataPrivateDataFree) (void *);
typedef struct _virDomainJobDataPrivateDataCallbacks virDomainJobDataPrivateDataCallbacks;
struct _virDomainJobDataPrivateDataCallbacks {
virDomainJobDataPrivateDataAlloc allocPrivateData;
virDomainJobDataPrivateDataCopy copyPrivateData;
virDomainJobDataPrivateDataFree freePrivateData;
};
typedef struct _virDomainJobData virDomainJobData;
struct _virDomainJobData {
virDomainJobType jobType;
virDomainJobStatus status;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
destination (only for migrations). */
unsigned long long received; /* When the destination host received status
info from the source (migrations only). */
/* Computed values */
unsigned long long timeElapsed;
long long timeDelta; /* delta = received - sent, i.e., the difference between
the source and the destination time plus the time
between the end of Perform phase on the source and
the beginning of Finish phase on the destination. */
bool timeDeltaSet;
char *errmsg; /* optional error message for failed completed jobs */
void *privateData; /* private data of hypervisors */
virDomainJobDataPrivateDataCallbacks *privateDataCb; /* callbacks of private data, hypervisor based */
};
virDomainJobData *
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb);
void
virDomainJobDataFree(virDomainJobData *data);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(virDomainJobData, virDomainJobDataFree);
virDomainJobData *
virDomainJobDataCopy(virDomainJobData *data);
virDomainJobType
virDomainJobStatusToType(virDomainJobStatus status);

View File

@ -3,6 +3,7 @@ hypervisor_sources = [
'domain_driver.c',
'virclosecallbacks.c',
'virhostdev.c',
'domain_job.c',
]
stateful_driver_source_files += files(hypervisor_sources)

View File

@ -1577,6 +1577,13 @@ virDomainDriverParseBlkioDeviceStr;
virDomainDriverSetupPersistentDefBlkioParams;
# hypervisor/domain_job.h
virDomainJobDataCopy;
virDomainJobDataFree;
virDomainJobDataInit;
virDomainJobStatusToType;
# hypervisor/virclosecallbacks.h
virCloseCallbacksGet;
virCloseCallbacksGetConn;

View File

@ -555,7 +555,7 @@ qemuBackupBeginPullExportDisks(virDomainObj *vm,
void
qemuBackupJobTerminate(virDomainObj *vm,
qemuDomainJobStatus jobstatus)
virDomainJobStatus jobstatus)
{
qemuDomainObjPrivate *priv = vm->privateData;
@ -583,7 +583,7 @@ qemuBackupJobTerminate(virDomainObj *vm,
!(priv->backup->apiFlags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL) &&
(priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL ||
(priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH &&
jobstatus != QEMU_DOMAIN_JOB_STATUS_COMPLETED))) {
jobstatus != VIR_DOMAIN_JOB_STATUS_COMPLETED))) {
uid_t uid;
gid_t gid;
@ -600,15 +600,19 @@ qemuBackupJobTerminate(virDomainObj *vm,
}
if (priv->job.current) {
qemuDomainJobInfoUpdateTime(priv->job.current);
qemuDomainJobDataPrivate *privData = NULL;
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
priv->job.completed = qemuDomainJobInfoCopy(priv->job.current);
qemuDomainJobDataUpdateTime(priv->job.current);
priv->job.completed->stats.backup.total = priv->backup->push_total;
priv->job.completed->stats.backup.transferred = priv->backup->push_transferred;
priv->job.completed->stats.backup.tmp_used = priv->backup->pull_tmp_used;
priv->job.completed->stats.backup.tmp_total = priv->backup->pull_tmp_total;
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
priv->job.completed = virDomainJobDataCopy(priv->job.current);
privData = priv->job.completed->privateData;
privData->stats.backup.total = priv->backup->push_total;
privData->stats.backup.transferred = priv->backup->push_transferred;
privData->stats.backup.tmp_used = priv->backup->pull_tmp_used;
privData->stats.backup.tmp_total = priv->backup->pull_tmp_total;
priv->job.completed->status = jobstatus;
priv->job.completed->errmsg = g_strdup(priv->backup->errmsg);
@ -686,7 +690,7 @@ qemuBackupJobCancelBlockjobs(virDomainObj *vm,
}
if (terminatebackup && !has_active)
qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED);
qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED);
}
@ -741,6 +745,7 @@ qemuBackupBegin(virDomainObj *vm,
unsigned int flags)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privData = priv->job.current->privateData;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
g_autoptr(virDomainBackupDef) def = NULL;
g_autofree char *suffix = NULL;
@ -794,7 +799,7 @@ qemuBackupBegin(virDomainObj *vm,
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
JOB_MASK(QEMU_JOB_SUSPEND) |
JOB_MASK(QEMU_JOB_MODIFY)));
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
privData->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
@ -984,7 +989,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm,
bool has_cancelling = false;
bool has_cancelled = false;
bool has_failed = false;
qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
virDomainJobStatus jobstatus = VIR_DOMAIN_JOB_STATUS_COMPLETED;
virDomainBackupDef *backup = priv->backup;
size_t i;
@ -1081,9 +1086,9 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm,
/* all sub-jobs have stopped */
if (has_failed)
jobstatus = QEMU_DOMAIN_JOB_STATUS_FAILED;
jobstatus = VIR_DOMAIN_JOB_STATUS_FAILED;
else if (has_cancelled && backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH)
jobstatus = QEMU_DOMAIN_JOB_STATUS_CANCELED;
jobstatus = VIR_DOMAIN_JOB_STATUS_CANCELED;
qemuBackupJobTerminate(vm, jobstatus);
}
@ -1134,9 +1139,10 @@ qemuBackupGetJobInfoStatsUpdateOne(virDomainObj *vm,
int
qemuBackupGetJobInfoStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainJobInfo *jobInfo)
virDomainJobData *jobData)
{
qemuDomainBackupStats *stats = &jobInfo->stats.backup;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
qemuDomainBackupStats *stats = &privJob->stats.backup;
qemuDomainObjPrivate *priv = vm->privateData;
qemuMonitorJobInfo **blockjobs = NULL;
size_t nblockjobs = 0;
@ -1150,10 +1156,10 @@ qemuBackupGetJobInfoStats(virQEMUDriver *driver,
return -1;
}
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0)
if (qemuDomainJobDataUpdateTime(jobData) < 0)
return -1;
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
jobData->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
qemuDomainObjEnterMonitor(driver, vm);

View File

@ -45,12 +45,12 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm,
void
qemuBackupJobTerminate(virDomainObj *vm,
qemuDomainJobStatus jobstatus);
virDomainJobStatus jobstatus);
int
qemuBackupGetJobInfoStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainJobInfo *jobInfo);
virDomainJobData *jobData);
/* exported for testing */
int

View File

@ -63,6 +63,38 @@ VIR_ENUM_IMPL(qemuDomainAsyncJob,
"backup",
);
static void *
qemuJobDataAllocPrivateData(void)
{
return g_new0(qemuDomainJobDataPrivate, 1);
}
static void *
qemuJobDataCopyPrivateData(void *data)
{
qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1);
memcpy(ret, data, sizeof(qemuDomainJobDataPrivate));
return ret;
}
static void
qemuJobDataFreePrivateData(void *data)
{
g_free(data);
}
virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks = {
.allocPrivateData = qemuJobDataAllocPrivateData,
.copyPrivateData = qemuJobDataCopyPrivateData,
.freePrivateData = qemuJobDataFreePrivateData,
};
const char *
qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase G_GNUC_UNUSED)
@ -116,26 +148,6 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
}
void
qemuDomainJobInfoFree(qemuDomainJobInfo *info)
{
g_free(info->errmsg);
g_free(info);
}
qemuDomainJobInfo *
qemuDomainJobInfoCopy(qemuDomainJobInfo *info)
{
qemuDomainJobInfo *ret = g_new0(qemuDomainJobInfo, 1);
memcpy(ret, info, sizeof(*info));
ret->errmsg = g_strdup(info->errmsg);
return ret;
}
void
qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
virDomainObj *vm)
@ -149,7 +161,7 @@ qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
if (!priv->job.completed)
return;
if (qemuDomainJobInfoToParams(priv->job.completed, &type,
if (qemuDomainJobDataToParams(priv->job.completed, &type,
&params, &nparams) < 0) {
VIR_WARN("Could not get stats for completed job; domain %s",
vm->def->name);
@ -216,7 +228,7 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObj *job)
job->mask = QEMU_JOB_DEFAULT_MASK;
job->abortJob = false;
VIR_FREE(job->error);
g_clear_pointer(&job->current, qemuDomainJobInfoFree);
g_clear_pointer(&job->current, virDomainJobDataFree);
job->cb->resetJobPrivate(job->privateData);
job->apiFlags = 0;
}
@ -254,8 +266,8 @@ qemuDomainObjClearJob(qemuDomainJobObj *job)
qemuDomainObjResetJob(job);
qemuDomainObjResetAsyncJob(job);
g_clear_pointer(&job->privateData, job->cb->freeJobPrivate);
g_clear_pointer(&job->current, qemuDomainJobInfoFree);
g_clear_pointer(&job->completed, qemuDomainJobInfoFree);
g_clear_pointer(&job->current, virDomainJobDataFree);
g_clear_pointer(&job->completed, virDomainJobDataFree);
virCondDestroy(&job->cond);
virCondDestroy(&job->asyncCond);
}
@ -268,111 +280,87 @@ qemuDomainTrackJob(qemuDomainJob job)
int
qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo)
qemuDomainJobDataUpdateTime(virDomainJobData *jobData)
{
unsigned long long now;
if (!jobInfo->started)
if (!jobData->started)
return 0;
if (virTimeMillisNow(&now) < 0)
return -1;
if (now < jobInfo->started) {
if (now < jobData->started) {
VIR_WARN("Async job starts in the future");
jobInfo->started = 0;
jobData->started = 0;
return 0;
}
jobInfo->timeElapsed = now - jobInfo->started;
jobData->timeElapsed = now - jobData->started;
return 0;
}
int
qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo)
qemuDomainJobDataUpdateDowntime(virDomainJobData *jobData)
{
unsigned long long now;
qemuDomainJobDataPrivate *priv = jobData->privateData;
if (!jobInfo->stopped)
if (!jobData->stopped)
return 0;
if (virTimeMillisNow(&now) < 0)
return -1;
if (now < jobInfo->stopped) {
if (now < jobData->stopped) {
VIR_WARN("Guest's CPUs stopped in the future");
jobInfo->stopped = 0;
jobData->stopped = 0;
return 0;
}
jobInfo->stats.mig.downtime = now - jobInfo->stopped;
jobInfo->stats.mig.downtime_set = true;
priv->stats.mig.downtime = now - jobData->stopped;
priv->stats.mig.downtime_set = true;
return 0;
}
static virDomainJobType
qemuDomainJobStatusToType(qemuDomainJobStatus status)
{
switch (status) {
case QEMU_DOMAIN_JOB_STATUS_NONE:
break;
case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
case QEMU_DOMAIN_JOB_STATUS_PAUSED:
return VIR_DOMAIN_JOB_UNBOUNDED;
case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
return VIR_DOMAIN_JOB_COMPLETED;
case QEMU_DOMAIN_JOB_STATUS_FAILED:
return VIR_DOMAIN_JOB_FAILED;
case QEMU_DOMAIN_JOB_STATUS_CANCELED:
return VIR_DOMAIN_JOB_CANCELLED;
}
return VIR_DOMAIN_JOB_NONE;
}
int
qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo,
qemuDomainJobDataToInfo(virDomainJobData *jobData,
virDomainJobInfoPtr info)
{
info->type = qemuDomainJobStatusToType(jobInfo->status);
info->timeElapsed = jobInfo->timeElapsed;
qemuDomainJobDataPrivate *priv = jobData->privateData;
info->type = virDomainJobStatusToType(jobData->status);
info->timeElapsed = jobData->timeElapsed;
switch (jobInfo->statsType) {
switch (priv->statsType) {
case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
info->memTotal = jobInfo->stats.mig.ram_total;
info->memRemaining = jobInfo->stats.mig.ram_remaining;
info->memProcessed = jobInfo->stats.mig.ram_transferred;
info->fileTotal = jobInfo->stats.mig.disk_total +
jobInfo->mirrorStats.total;
info->fileRemaining = jobInfo->stats.mig.disk_remaining +
(jobInfo->mirrorStats.total -
jobInfo->mirrorStats.transferred);
info->fileProcessed = jobInfo->stats.mig.disk_transferred +
jobInfo->mirrorStats.transferred;
info->memTotal = priv->stats.mig.ram_total;
info->memRemaining = priv->stats.mig.ram_remaining;
info->memProcessed = priv->stats.mig.ram_transferred;
info->fileTotal = priv->stats.mig.disk_total +
priv->mirrorStats.total;
info->fileRemaining = priv->stats.mig.disk_remaining +
(priv->mirrorStats.total -
priv->mirrorStats.transferred);
info->fileProcessed = priv->stats.mig.disk_transferred +
priv->mirrorStats.transferred;
break;
case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
info->memTotal = jobInfo->stats.mig.ram_total;
info->memRemaining = jobInfo->stats.mig.ram_remaining;
info->memProcessed = jobInfo->stats.mig.ram_transferred;
info->memTotal = priv->stats.mig.ram_total;
info->memRemaining = priv->stats.mig.ram_remaining;
info->memProcessed = priv->stats.mig.ram_transferred;
break;
case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
info->memTotal = jobInfo->stats.dump.total;
info->memProcessed = jobInfo->stats.dump.completed;
info->memTotal = priv->stats.dump.total;
info->memProcessed = priv->stats.dump.completed;
info->memRemaining = info->memTotal - info->memProcessed;
break;
case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
info->fileTotal = jobInfo->stats.backup.total;
info->fileProcessed = jobInfo->stats.backup.transferred;
info->fileTotal = priv->stats.backup.total;
info->fileProcessed = priv->stats.backup.transferred;
info->fileRemaining = info->fileTotal - info->fileProcessed;
break;
@ -389,13 +377,14 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo,
static int
qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
qemuDomainMigrationJobDataToParams(virDomainJobData *jobData,
int *type,
virTypedParameterPtr *params,
int *nparams)
{
qemuMonitorMigrationStats *stats = &jobInfo->stats.mig;
qemuDomainMirrorStats *mirrorStats = &jobInfo->mirrorStats;
qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuMonitorMigrationStats *stats = &priv->stats.mig;
qemuDomainMirrorStats *mirrorStats = &priv->mirrorStats;
virTypedParameterPtr par = NULL;
int maxpar = 0;
int npar = 0;
@ -404,19 +393,19 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
if (virTypedParamsAddInt(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_OPERATION,
jobInfo->operation) < 0)
jobData->operation) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed) < 0)
jobData->timeElapsed) < 0)
goto error;
if (jobInfo->timeDeltaSet &&
jobInfo->timeElapsed > jobInfo->timeDelta &&
if (jobData->timeDeltaSet &&
jobData->timeElapsed > jobData->timeDelta &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED_NET,
jobInfo->timeElapsed - jobInfo->timeDelta) < 0)
jobData->timeElapsed - jobData->timeDelta) < 0)
goto error;
if (stats->downtime_set &&
@ -426,11 +415,11 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
goto error;
if (stats->downtime_set &&
jobInfo->timeDeltaSet &&
stats->downtime > jobInfo->timeDelta &&
jobData->timeDeltaSet &&
stats->downtime > jobData->timeDelta &&
virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DOWNTIME_NET,
stats->downtime - jobInfo->timeDelta) < 0)
stats->downtime - jobData->timeDelta) < 0)
goto error;
if (stats->setup_time_set &&
@ -505,7 +494,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
/* The remaining stats are disk, mirror, or migration specific
* so if this is a SAVEDUMP, we can just skip them */
if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
if (priv->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
goto done;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
@ -554,7 +543,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
goto error;
done:
*type = qemuDomainJobStatusToType(jobInfo->status);
*type = virDomainJobStatusToType(jobData->status);
*params = par;
*nparams = npar;
return 0;
@ -566,24 +555,25 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
static int
qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo,
qemuDomainDumpJobDataToParams(virDomainJobData *jobData,
int *type,
virTypedParameterPtr *params,
int *nparams)
{
qemuMonitorDumpStats *stats = &jobInfo->stats.dump;
qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuMonitorDumpStats *stats = &priv->stats.dump;
virTypedParameterPtr par = NULL;
int maxpar = 0;
int npar = 0;
if (virTypedParamsAddInt(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_OPERATION,
jobInfo->operation) < 0)
jobData->operation) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed) < 0)
jobData->timeElapsed) < 0)
goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar,
@ -597,7 +587,7 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo,
stats->total - stats->completed) < 0)
goto error;
*type = qemuDomainJobStatusToType(jobInfo->status);
*type = virDomainJobStatusToType(jobData->status);
*params = par;
*nparams = npar;
return 0;
@ -609,19 +599,20 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo,
static int
qemuDomainBackupJobInfoToParams(qemuDomainJobInfo *jobInfo,
qemuDomainBackupJobDataToParams(virDomainJobData *jobData,
int *type,
virTypedParameterPtr *params,
int *nparams)
{
qemuDomainBackupStats *stats = &jobInfo->stats.backup;
qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuDomainBackupStats *stats = &priv->stats.backup;
g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1);
if (virTypedParamListAddInt(par, jobInfo->operation,
if (virTypedParamListAddInt(par, jobData->operation,
VIR_DOMAIN_JOB_OPERATION) < 0)
return -1;
if (virTypedParamListAddULLong(par, jobInfo->timeElapsed,
if (virTypedParamListAddULLong(par, jobData->timeElapsed,
VIR_DOMAIN_JOB_TIME_ELAPSED) < 0)
return -1;
@ -649,38 +640,40 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfo *jobInfo,
return -1;
}
if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
if (jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
virTypedParamListAddBoolean(par,
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED,
jobData->status == VIR_DOMAIN_JOB_STATUS_COMPLETED,
VIR_DOMAIN_JOB_SUCCESS) < 0)
return -1;
if (jobInfo->errmsg &&
virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0)
if (jobData->errmsg &&
virTypedParamListAddString(par, jobData->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0)
return -1;
*nparams = virTypedParamListStealParams(par, params);
*type = qemuDomainJobStatusToType(jobInfo->status);
*type = virDomainJobStatusToType(jobData->status);
return 0;
}
int
qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo,
qemuDomainJobDataToParams(virDomainJobData *jobData,
int *type,
virTypedParameterPtr *params,
int *nparams)
{
switch (jobInfo->statsType) {
qemuDomainJobDataPrivate *priv = jobData->privateData;
switch (priv->statsType) {
case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams);
return qemuDomainMigrationJobDataToParams(jobData, type, params, nparams);
case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams);
return qemuDomainDumpJobDataToParams(jobData, type, params, nparams);
case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams);
return qemuDomainBackupJobDataToParams(jobData, type, params, nparams);
case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
@ -688,7 +681,7 @@ qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo,
break;
default:
virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType);
virReportEnumRangeError(qemuDomainJobStatsType, priv->statsType);
break;
}
@ -895,8 +888,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
qemuDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name);
qemuDomainObjResetAsyncJob(&priv->job);
priv->job.current = g_new0(qemuDomainJobInfo, 1);
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
priv->job.asyncJob = asyncJob;
priv->job.asyncOwner = virThreadSelfID();
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());

View File

@ -20,6 +20,7 @@
#include <glib-object.h>
#include "qemu_monitor.h"
#include "domain_job.h"
#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
#define QEMU_JOB_DEFAULT_MASK \
@ -79,17 +80,6 @@ typedef enum {
} qemuDomainAsyncJob;
VIR_ENUM_DECL(qemuDomainAsyncJob);
typedef enum {
QEMU_DOMAIN_JOB_STATUS_NONE = 0,
QEMU_DOMAIN_JOB_STATUS_ACTIVE,
QEMU_DOMAIN_JOB_STATUS_MIGRATING,
QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
QEMU_DOMAIN_JOB_STATUS_PAUSED,
QEMU_DOMAIN_JOB_STATUS_POSTCOPY,
QEMU_DOMAIN_JOB_STATUS_COMPLETED,
QEMU_DOMAIN_JOB_STATUS_FAILED,
QEMU_DOMAIN_JOB_STATUS_CANCELED,
} qemuDomainJobStatus;
typedef enum {
QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0,
@ -114,24 +104,8 @@ struct _qemuDomainBackupStats {
unsigned long long tmp_total;
};
typedef struct _qemuDomainJobInfo qemuDomainJobInfo;
struct _qemuDomainJobInfo {
qemuDomainJobStatus status;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
destination (only for migrations). */
unsigned long long received; /* When the destination host received status
info from the source (migrations only). */
/* Computed values */
unsigned long long timeElapsed;
long long timeDelta; /* delta = received - sent, i.e., the difference
between the source and the destination time plus
the time between the end of Perform phase on the
source and the beginning of Finish phase on the
destination. */
bool timeDeltaSet;
typedef struct _qemuDomainJobDataPrivate qemuDomainJobDataPrivate;
struct _qemuDomainJobDataPrivate {
/* Raw values from QEMU */
qemuDomainJobStatsType statsType;
union {
@ -140,17 +114,9 @@ struct _qemuDomainJobInfo {
qemuDomainBackupStats backup;
} stats;
qemuDomainMirrorStats mirrorStats;
char *errmsg; /* optional error message for failed completed jobs */
};
void
qemuDomainJobInfoFree(qemuDomainJobInfo *info);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree);
qemuDomainJobInfo *
qemuDomainJobInfoCopy(qemuDomainJobInfo *info);
extern virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks;
typedef struct _qemuDomainJobObj qemuDomainJobObj;
@ -198,8 +164,8 @@ struct _qemuDomainJobObj {
unsigned long long asyncStarted; /* When the current async job started */
int phase; /* Job phase (mainly for migrations) */
unsigned long long mask; /* Jobs allowed during async job */
qemuDomainJobInfo *current; /* async job progress data */
qemuDomainJobInfo *completed; /* statistics data of a recently completed job */
virDomainJobData *current; /* async job progress data */
virDomainJobData *completed; /* statistics data of a recently completed job */
bool abortJob; /* abort of the job requested */
char *error; /* job event completion error */
unsigned long apiFlags; /* flags passed to the API which started the async job */
@ -256,14 +222,14 @@ void qemuDomainObjDiscardAsyncJob(virQEMUDriver *driver,
virDomainObj *obj);
void qemuDomainObjReleaseAsyncJob(virDomainObj *obj);
int qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo)
int qemuDomainJobDataUpdateTime(virDomainJobData *jobData)
ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo)
int qemuDomainJobDataUpdateDowntime(virDomainJobData *jobData)
ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo,
int qemuDomainJobDataToInfo(virDomainJobData *jobData,
virDomainJobInfoPtr info)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo,
int qemuDomainJobDataToParams(virDomainJobData *jobData,
int *type,
virTypedParameterPtr *params,
int *nparams)

View File

@ -2637,6 +2637,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
int ret = -1;
virObjectEvent *event = NULL;
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
virQEMUSaveData *data = NULL;
g_autoptr(qemuDomainSaveCookie) cookie = NULL;
@ -2653,7 +2654,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
goto endjob;
}
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@ -2946,6 +2947,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
VIR_DEBUG("Waiting for dump completion");
while (!jobPriv->dumpCompleted && !priv->job.abortJob) {
@ -2953,7 +2955,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm)
return -1;
}
if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
if (privJobCurrent->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
if (priv->job.error)
virReportError(VIR_ERR_OPERATION_FAILED,
_("memory-only dump failed: %s"),
@ -2964,7 +2966,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm)
return -1;
}
qemuDomainJobInfoUpdateTime(priv->job.current);
qemuDomainJobDataUpdateTime(priv->job.current);
return 0;
}
@ -2992,10 +2994,13 @@ qemuDumpToFd(virQEMUDriver *driver,
if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
return -1;
if (detach)
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
else
g_clear_pointer(&priv->job.current, qemuDomainJobInfoFree);
if (detach) {
qemuDomainJobDataPrivate *privStats = priv->job.current->privateData;
privStats->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
} else {
g_clear_pointer(&priv->job.current, virDomainJobDataFree);
}
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
@ -3130,6 +3135,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
virQEMUDriver *driver = dom->conn->privateData;
virDomainObj *vm;
qemuDomainObjPrivate *priv = NULL;
qemuDomainJobDataPrivate *privJobCurrent = NULL;
bool resume = false, paused = false;
int ret = -1;
virObjectEvent *event = NULL;
@ -3154,7 +3160,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
goto endjob;
priv = vm->privateData;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
privJobCurrent = priv->job.current->privateData;
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* Migrate will always stop the VM, so the resume condition is
independent of whether the stop command is issued. */
@ -12422,28 +12429,30 @@ qemuConnectBaselineHypervisorCPU(virConnectPtr conn,
static int
qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainJobInfo *jobInfo)
virDomainJobData *jobData)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privStats = jobData->privateData;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE ||
jobData->status == VIR_DOMAIN_JOB_STATUS_MIGRATING ||
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED ||
jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events &&
jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
jobInfo, NULL) < 0)
jobData, NULL) < 0)
return -1;
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE &&
privStats->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
jobInfo) < 0)
jobData) < 0)
return -1;
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0)
if (qemuDomainJobDataUpdateTime(jobData) < 0)
return -1;
}
@ -12454,9 +12463,10 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver,
static int
qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainJobInfo *jobInfo)
virDomainJobData *jobData)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
qemuMonitorDumpStats stats = { 0 };
int rc;
@ -12469,33 +12479,33 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver,
if (rc < 0)
return -1;
jobInfo->stats.dump = stats;
privJob->stats.dump = stats;
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0)
if (qemuDomainJobDataUpdateTime(jobData) < 0)
return -1;
switch (jobInfo->stats.dump.status) {
switch (privJob->stats.dump.status) {
case QEMU_MONITOR_DUMP_STATUS_NONE:
case QEMU_MONITOR_DUMP_STATUS_FAILED:
case QEMU_MONITOR_DUMP_STATUS_LAST:
virReportError(VIR_ERR_OPERATION_FAILED,
_("dump query failed, status=%d"),
jobInfo->stats.dump.status);
privJob->stats.dump.status);
return -1;
break;
case QEMU_MONITOR_DUMP_STATUS_ACTIVE:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
jobData->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
VIR_DEBUG("dump active, bytes written='%llu' remaining='%llu'",
jobInfo->stats.dump.completed,
jobInfo->stats.dump.total -
jobInfo->stats.dump.completed);
privJob->stats.dump.completed,
privJob->stats.dump.total -
privJob->stats.dump.completed);
break;
case QEMU_MONITOR_DUMP_STATUS_COMPLETED:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
VIR_DEBUG("dump completed, bytes written='%llu'",
jobInfo->stats.dump.completed);
privJob->stats.dump.completed);
break;
}
@ -12507,16 +12517,17 @@ static int
qemuDomainGetJobStatsInternal(virQEMUDriver *driver,
virDomainObj *vm,
bool completed,
qemuDomainJobInfo **jobInfo)
virDomainJobData **jobData)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privStats = NULL;
int ret = -1;
*jobInfo = NULL;
*jobData = NULL;
if (completed) {
if (priv->job.completed && !priv->job.current)
*jobInfo = qemuDomainJobInfoCopy(priv->job.completed);
*jobData = virDomainJobDataCopy(priv->job.completed);
return 0;
}
@ -12538,22 +12549,24 @@ qemuDomainGetJobStatsInternal(virQEMUDriver *driver,
ret = 0;
goto cleanup;
}
*jobInfo = qemuDomainJobInfoCopy(priv->job.current);
*jobData = virDomainJobDataCopy(priv->job.current);
switch ((*jobInfo)->statsType) {
privStats = (*jobData)->privateData;
switch (privStats->statsType) {
case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobInfo) < 0)
if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobData) < 0)
goto cleanup;
break;
case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobInfo) < 0)
if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobData) < 0)
goto cleanup;
break;
case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
if (qemuBackupGetJobInfoStats(driver, vm, *jobInfo) < 0)
if (qemuBackupGetJobInfoStats(driver, vm, *jobData) < 0)
goto cleanup;
break;
@ -12574,7 +12587,7 @@ qemuDomainGetJobInfo(virDomainPtr dom,
virDomainJobInfoPtr info)
{
virQEMUDriver *driver = dom->conn->privateData;
g_autoptr(qemuDomainJobInfo) jobInfo = NULL;
g_autoptr(virDomainJobData) jobData = NULL;
virDomainObj *vm;
int ret = -1;
@ -12586,16 +12599,16 @@ qemuDomainGetJobInfo(virDomainPtr dom,
if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobInfo) < 0)
if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobData) < 0)
goto cleanup;
if (!jobInfo ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) {
if (!jobData ||
jobData->status == VIR_DOMAIN_JOB_STATUS_NONE) {
ret = 0;
goto cleanup;
}
ret = qemuDomainJobInfoToInfo(jobInfo, info);
ret = qemuDomainJobDataToInfo(jobData, info);
cleanup:
virDomainObjEndAPI(&vm);
@ -12613,7 +12626,7 @@ qemuDomainGetJobStats(virDomainPtr dom,
virQEMUDriver *driver = dom->conn->privateData;
virDomainObj *vm;
qemuDomainObjPrivate *priv;
g_autoptr(qemuDomainJobInfo) jobInfo = NULL;
g_autoptr(virDomainJobData) jobData = NULL;
bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED);
int ret = -1;
@ -12627,11 +12640,11 @@ qemuDomainGetJobStats(virDomainPtr dom,
goto cleanup;
priv = vm->privateData;
if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0)
if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobData) < 0)
goto cleanup;
if (!jobInfo ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) {
if (!jobData ||
jobData->status == VIR_DOMAIN_JOB_STATUS_NONE) {
*type = VIR_DOMAIN_JOB_NONE;
*params = NULL;
*nparams = 0;
@ -12639,10 +12652,10 @@ qemuDomainGetJobStats(virDomainPtr dom,
goto cleanup;
}
ret = qemuDomainJobInfoToParams(jobInfo, type, params, nparams);
ret = qemuDomainJobDataToParams(jobData, type, params, nparams);
if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED))
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
cleanup:
virDomainObjEndAPI(&vm);
@ -12708,7 +12721,7 @@ static int qemuDomainAbortJob(virDomainPtr dom)
break;
case QEMU_ASYNC_JOB_MIGRATION_OUT:
if ((priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY ||
if ((priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY ||
(virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY))) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s",

View File

@ -1199,7 +1199,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
return -1;
if (priv->job.abortJob) {
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
@ -1622,35 +1622,37 @@ qemuMigrationSrcWaitForSpice(virDomainObj *vm)
static void
qemuMigrationUpdateJobType(qemuDomainJobInfo *jobInfo)
qemuMigrationUpdateJobType(virDomainJobData *jobData)
{
switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) {
qemuDomainJobDataPrivate *priv = jobData->privateData;
switch ((qemuMonitorMigrationStatus) priv->stats.mig.status) {
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY;
jobData->status = VIR_DOMAIN_JOB_STATUS_POSTCOPY;
break;
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
jobData->status = VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE;
jobData->status = VIR_DOMAIN_JOB_STATUS_NONE;
break;
case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
jobData->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED;
jobData->status = VIR_DOMAIN_JOB_STATUS_PAUSED;
break;
case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
break;
case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
@ -1667,11 +1669,12 @@ int
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo,
virDomainJobData *jobData,
char **error)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuMonitorMigrationStats stats;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
int rv;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
@ -1683,7 +1686,7 @@ qemuMigrationAnyFetchStats(virQEMUDriver *driver,
if (rv < 0)
return -1;
jobInfo->stats.mig = stats;
privJob->stats.mig = stats;
return 0;
}
@ -1724,41 +1727,42 @@ qemuMigrationJobCheckStatus(virQEMUDriver *driver,
qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobInfo *jobInfo = priv->job.current;
virDomainJobData *jobData = priv->job.current;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
g_autofree char *error = NULL;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
if (!events ||
jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
privJob->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, &error) < 0)
return -1;
}
qemuMigrationUpdateJobType(jobInfo);
qemuMigrationUpdateJobType(jobData);
switch (jobInfo->status) {
case QEMU_DOMAIN_JOB_STATUS_NONE:
switch (jobData->status) {
case VIR_DOMAIN_JOB_STATUS_NONE:
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
qemuMigrationJobName(vm), _("is not active"));
return -1;
case QEMU_DOMAIN_JOB_STATUS_FAILED:
case VIR_DOMAIN_JOB_STATUS_FAILED:
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
qemuMigrationJobName(vm),
error ? error : _("unexpectedly failed"));
return -1;
case QEMU_DOMAIN_JOB_STATUS_CANCELED:
case VIR_DOMAIN_JOB_STATUS_CANCELED:
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuMigrationJobName(vm), _("canceled by client"));
return -1;
case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
case QEMU_DOMAIN_JOB_STATUS_PAUSED:
case VIR_DOMAIN_JOB_STATUS_COMPLETED:
case VIR_DOMAIN_JOB_STATUS_ACTIVE:
case VIR_DOMAIN_JOB_STATUS_MIGRATING:
case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
case VIR_DOMAIN_JOB_STATUS_PAUSED:
break;
}
@ -1789,7 +1793,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
unsigned int flags)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobInfo *jobInfo = priv->job.current;
virDomainJobData *jobData = priv->job.current;
int pauseReason;
if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
@ -1819,7 +1823,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
* wait again for the real end of the migration.
*/
if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
jobData->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
VIR_DEBUG("Migration paused before switchover");
return 1;
}
@ -1829,38 +1833,38 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
* will continue waiting until the migrate state changes to completed.
*/
if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
VIR_DEBUG("Migration switched to post-copy");
return 1;
}
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
if (jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
return 1;
else
return 0;
error:
switch (jobInfo->status) {
case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
case QEMU_DOMAIN_JOB_STATUS_PAUSED:
switch (jobData->status) {
case VIR_DOMAIN_JOB_STATUS_MIGRATING:
case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
case VIR_DOMAIN_JOB_STATUS_PAUSED:
/* The migration was aborted by us rather than QEMU itself. */
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -2;
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
/* Something failed after QEMU already finished the migration. */
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -1;
case QEMU_DOMAIN_JOB_STATUS_FAILED:
case QEMU_DOMAIN_JOB_STATUS_CANCELED:
case VIR_DOMAIN_JOB_STATUS_FAILED:
case VIR_DOMAIN_JOB_STATUS_CANCELED:
/* QEMU aborted the migration. */
return -1;
case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_NONE:
case VIR_DOMAIN_JOB_STATUS_ACTIVE:
case VIR_DOMAIN_JOB_STATUS_COMPLETED:
case VIR_DOMAIN_JOB_STATUS_NONE:
/* Impossible. */
break;
}
@ -1880,11 +1884,11 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
unsigned int flags)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobInfo *jobInfo = priv->job.current;
virDomainJobData *jobData = priv->job.current;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
int rv;
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
dconn, flags)) != 1) {
@ -1894,7 +1898,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
if (events) {
if (virDomainObjWait(vm) < 0) {
if (virDomainObjIsActive(vm))
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -2;
}
} else {
@ -1908,17 +1912,17 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
}
if (events)
ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, NULL));
qemuDomainJobInfoUpdateTime(jobInfo);
qemuDomainJobInfoUpdateDowntime(jobInfo);
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
priv->job.completed = qemuDomainJobInfoCopy(jobInfo);
priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
qemuDomainJobDataUpdateTime(jobData);
qemuDomainJobDataUpdateDowntime(jobData);
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
priv->job.completed = virDomainJobDataCopy(jobData);
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
return 0;
}
@ -3383,7 +3387,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
virObjectEvent *event;
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
qemuDomainJobInfo *jobInfo = NULL;
virDomainJobData *jobData = NULL;
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
"flags=0x%x, retcode=%d",
@ -3403,13 +3407,15 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
return -1;
if (retcode == 0)
jobInfo = priv->job.completed;
jobData = priv->job.completed;
else
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
/* Update times with the values sent by the destination daemon */
if (mig->jobInfo && jobInfo) {
if (mig->jobData && jobData) {
int reason;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
qemuDomainJobDataPrivate *privMigJob = mig->jobData->privateData;
/* We need to refresh migration statistics after a completed post-copy
* migration since priv->job.completed contains obsolete data from the
@ -3418,14 +3424,14 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
jobInfo, NULL) < 0)
jobData, NULL) < 0)
VIR_WARN("Could not refresh migration statistics");
qemuDomainJobInfoUpdateTime(jobInfo);
jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet;
jobInfo->timeDelta = mig->jobInfo->timeDelta;
jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set;
jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime;
qemuDomainJobDataUpdateTime(jobData);
jobData->timeDeltaSet = mig->jobData->timeDeltaSet;
jobData->timeDelta = mig->jobData->timeDelta;
privJob->stats.mig.downtime_set = privMigJob->stats.mig.downtime_set;
privJob->stats.mig.downtime = privMigJob->stats.mig.downtime;
}
if (flags & VIR_MIGRATE_OFFLINE)
@ -4194,7 +4200,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
/* explicitly do this *after* we entered the monitor,
* as this is a critical section so we are guaranteed
* priv->job.abortJob will not change */
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client"));
@ -4309,7 +4315,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
* resume it now once we finished all block jobs and wait for the real
* end of the migration.
*/
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
if (qemuMigrationSrcContinue(driver, vm,
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
@ -4339,8 +4345,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (priv->job.completed) {
priv->job.completed->stopped = priv->job.current->stopped;
qemuDomainJobInfoUpdateTime(priv->job.completed);
qemuDomainJobInfoUpdateDowntime(priv->job.completed);
qemuDomainJobDataUpdateTime(priv->job.completed);
qemuDomainJobDataUpdateDowntime(priv->job.completed);
ignore_value(virTimeMillisNow(&priv->job.completed->sent));
}
@ -4370,7 +4376,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (virDomainObjIsActive(vm)) {
if (cancel &&
priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED &&
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED &&
qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon);
@ -4385,8 +4391,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
}
if (iothread)
@ -5620,7 +5626,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
unsigned short port;
unsigned long long timeReceived = 0;
virObjectEvent *event;
qemuDomainJobInfo *jobInfo = NULL;
virDomainJobData *jobData = NULL;
bool inPostCopy = false;
bool doKill = true;
@ -5644,7 +5650,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
: QEMU_MIGRATION_PHASE_FINISH2);
qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
QEMU_MIGRATION_COOKIE_STATS |
@ -5736,7 +5742,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
goto endjob;
}
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
inPostCopy = true;
if (!(flags & VIR_MIGRATE_PAUSED)) {
@ -5772,16 +5778,16 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
doKill = false;
}
if (mig->jobInfo) {
jobInfo = g_steal_pointer(&mig->jobInfo);
if (mig->jobData) {
jobData = g_steal_pointer(&mig->jobData);
if (jobInfo->sent && timeReceived) {
jobInfo->timeDelta = timeReceived - jobInfo->sent;
jobInfo->received = timeReceived;
jobInfo->timeDeltaSet = true;
if (jobData->sent && timeReceived) {
jobData->timeDelta = timeReceived - jobData->sent;
jobData->received = timeReceived;
jobData->timeDeltaSet = true;
}
qemuDomainJobInfoUpdateTime(jobInfo);
qemuDomainJobInfoUpdateDowntime(jobInfo);
qemuDomainJobDataUpdateTime(jobData);
qemuDomainJobDataUpdateDowntime(jobData);
}
if (inPostCopy) {
@ -5846,10 +5852,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
}
if (dom) {
if (jobInfo) {
priv->job.completed = g_steal_pointer(&jobInfo);
priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
if (jobData) {
qemuDomainJobDataPrivate *privJob = jobData->privateData;
priv->job.completed = g_steal_pointer(&jobData);
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
}
if (qemuMigrationCookieFormat(mig, driver, vm,
@ -5862,7 +5870,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
* is obsolete anyway.
*/
if (inPostCopy)
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
}
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
@ -5873,7 +5881,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
qemuDomainRemoveInactiveJob(driver, vm);
cleanup:
g_clear_pointer(&jobInfo, qemuDomainJobInfoFree);
g_clear_pointer(&jobData, virDomainJobDataFree);
virPortAllocatorRelease(port);
if (priv->mon)
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
@ -6091,6 +6099,7 @@ qemuMigrationJobStart(virQEMUDriver *driver,
unsigned long apiFlags)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = priv->job.current->privateData;
virDomainJobOperation op;
unsigned long long mask;
@ -6107,7 +6116,7 @@ qemuMigrationJobStart(virQEMUDriver *driver,
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
return -1;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
qemuDomainObjSetAsyncJobMask(vm, mask);
return 0;
@ -6227,13 +6236,14 @@ int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo)
virDomainJobData *jobData)
{
size_t i;
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
bool nbd = false;
g_autoptr(GHashTable) blockinfo = NULL;
qemuDomainMirrorStats *stats = &jobInfo->mirrorStats;
qemuDomainMirrorStats *stats = &privJob->mirrorStats;
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDef *disk = vm->def->disks[i];

View File

@ -221,7 +221,7 @@ int
qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo,
virDomainJobData *jobData,
char **error);
int
@ -258,4 +258,4 @@ int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm,
qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo);
virDomainJobData *jobData);

View File

@ -166,7 +166,7 @@ qemuMigrationCookieFree(qemuMigrationCookie *mig)
g_free(mig->name);
g_free(mig->lockState);
g_free(mig->lockDriver);
g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree);
g_clear_pointer(&mig->jobData, virDomainJobDataFree);
virCPUDefFree(mig->cpu);
qemuMigrationCookieCapsFree(mig->caps);
if (mig->blockDirtyBitmaps)
@ -539,8 +539,8 @@ qemuMigrationCookieAddStatistics(qemuMigrationCookie *mig,
if (!priv->job.completed)
return 0;
g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree);
mig->jobInfo = qemuDomainJobInfoCopy(priv->job.completed);
g_clear_pointer(&mig->jobData, virDomainJobDataFree);
mig->jobData = virDomainJobDataCopy(priv->job.completed);
mig->flags |= QEMU_MIGRATION_COOKIE_STATS;
@ -640,22 +640,23 @@ qemuMigrationCookieNetworkXMLFormat(virBuffer *buf,
static void
qemuMigrationCookieStatisticsXMLFormat(virBuffer *buf,
qemuDomainJobInfo *jobInfo)
virDomainJobData *jobData)
{
qemuMonitorMigrationStats *stats = &jobInfo->stats.mig;
qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuMonitorMigrationStats *stats = &priv->stats.mig;
virBufferAddLit(buf, "<statistics>\n");
virBufferAdjustIndent(buf, 2);
virBufferAsprintf(buf, "<started>%llu</started>\n", jobInfo->started);
virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobInfo->stopped);
virBufferAsprintf(buf, "<sent>%llu</sent>\n", jobInfo->sent);
if (jobInfo->timeDeltaSet)
virBufferAsprintf(buf, "<delta>%lld</delta>\n", jobInfo->timeDelta);
virBufferAsprintf(buf, "<started>%llu</started>\n", jobData->started);
virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobData->stopped);
virBufferAsprintf(buf, "<sent>%llu</sent>\n", jobData->sent);
if (jobData->timeDeltaSet)
virBufferAsprintf(buf, "<delta>%lld</delta>\n", jobData->timeDelta);
virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed);
jobData->timeElapsed);
if (stats->downtime_set)
virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
VIR_DOMAIN_JOB_DOWNTIME,
@ -892,8 +893,8 @@ qemuMigrationCookieXMLFormat(virQEMUDriver *driver,
if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd)
qemuMigrationCookieNBDXMLFormat(mig->nbd, buf);
if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo)
qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobInfo);
if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData)
qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobData);
if (mig->flags & QEMU_MIGRATION_COOKIE_CPU && mig->cpu)
virCPUDefFormatBufFull(buf, mig->cpu, NULL);
@ -1039,29 +1040,30 @@ qemuMigrationCookieNBDXMLParse(xmlXPathContextPtr ctxt)
}
static qemuDomainJobInfo *
static virDomainJobData *
qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
{
qemuDomainJobInfo *jobInfo = NULL;
virDomainJobData *jobData = NULL;
qemuMonitorMigrationStats *stats;
qemuDomainJobDataPrivate *priv = NULL;
VIR_XPATH_NODE_AUTORESTORE(ctxt)
if (!(ctxt->node = virXPathNode("./statistics", ctxt)))
return NULL;
jobInfo = g_new0(qemuDomainJobInfo, 1);
jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
priv = jobData->privateData;
stats = &priv->stats.mig;
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
stats = &jobInfo->stats.mig;
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started);
virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped);
virXPathULongLong("string(./sent[1])", ctxt, &jobInfo->sent);
if (virXPathLongLong("string(./delta[1])", ctxt, &jobInfo->timeDelta) == 0)
jobInfo->timeDeltaSet = true;
virXPathULongLong("string(./started[1])", ctxt, &jobData->started);
virXPathULongLong("string(./stopped[1])", ctxt, &jobData->stopped);
virXPathULongLong("string(./sent[1])", ctxt, &jobData->sent);
if (virXPathLongLong("string(./delta[1])", ctxt, &jobData->timeDelta) == 0)
jobData->timeDeltaSet = true;
virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_ELAPSED "[1])",
ctxt, &jobInfo->timeElapsed);
ctxt, &jobData->timeElapsed);
if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])",
ctxt, &stats->downtime) == 0)
@ -1121,7 +1123,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
virXPathInt("string(./" VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE "[1])",
ctxt, &stats->cpu_throttle_percentage);
return jobInfo;
return jobData;
}
@ -1393,7 +1395,7 @@ qemuMigrationCookieXMLParse(qemuMigrationCookie *mig,
if (flags & QEMU_MIGRATION_COOKIE_STATS &&
virXPathBoolean("boolean(./statistics)", ctxt) &&
(!(mig->jobInfo = qemuMigrationCookieStatisticsXMLParse(ctxt))))
(!(mig->jobData = qemuMigrationCookieStatisticsXMLParse(ctxt))))
return -1;
if (flags & QEMU_MIGRATION_COOKIE_CPU &&
@ -1554,8 +1556,8 @@ qemuMigrationCookieParse(virQEMUDriver *driver,
}
}
if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo && priv->job.current)
mig->jobInfo->operation = priv->job.current->operation;
if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && priv->job.current)
mig->jobData->operation = priv->job.current->operation;
return g_steal_pointer(&mig);
}

View File

@ -162,7 +162,7 @@ struct _qemuMigrationCookie {
qemuMigrationCookieNBD *nbd;
/* If (flags & QEMU_MIGRATION_COOKIE_STATS) */
qemuDomainJobInfo *jobInfo;
virDomainJobData *jobData;
/* If flags & QEMU_MIGRATION_COOKIE_CPU */
virCPUDef *cpu;

View File

@ -651,7 +651,7 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
!priv->pausedShutdown) {
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
else
reason = VIR_DOMAIN_PAUSED_MIGRATION;
@ -1545,6 +1545,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
void *opaque)
{
qemuDomainObjPrivate *priv;
qemuDomainJobDataPrivate *privJob = NULL;
virQEMUDriver *driver = opaque;
virObjectEvent *event = NULL;
int reason;
@ -1561,7 +1562,9 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
goto cleanup;
}
priv->job.current->stats.mig.status = status;
privJob = priv->job.current->privateData;
privJob->stats.mig.status = status;
virDomainObjBroadcast(vm);
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
@ -1623,6 +1626,7 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
{
qemuDomainObjPrivate *priv;
qemuDomainJobPrivate *jobPriv;
qemuDomainJobDataPrivate *privJobCurrent = NULL;
virObjectLock(vm);
@ -1631,18 +1635,19 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
priv = vm->privateData;
jobPriv = priv->job.privateData;
privJobCurrent = priv->job.current->privateData;
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
goto cleanup;
}
jobPriv->dumpCompleted = true;
priv->job.current->stats.dump = *stats;
privJobCurrent->stats.dump = *stats;
priv->job.error = g_strdup(error);
/* Force error if extracting the DUMP_COMPLETED status failed */
if (!error && status < 0) {
priv->job.error = g_strdup(virGetLastErrorMessage());
priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED;
privJobCurrent->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED;
}
virDomainObjBroadcast(vm);
@ -3592,6 +3597,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
unsigned int *stopFlags)
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privDataJobCurrent = NULL;
virDomainState state;
int reason;
unsigned long long now;
@ -3659,10 +3665,12 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
/* We reset the job parameters for backup so that the job will look
* active. This is possible because we are able to recover the state
* of blockjobs and also the backup job allows all sub-job types */
priv->job.current = g_new0(qemuDomainJobInfo, 1);
priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
privDataJobCurrent = priv->job.current->privateData;
priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE;
privDataJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
priv->job.current->started = now;
break;
@ -8311,7 +8319,7 @@ void qemuProcessStop(virQEMUDriver *driver,
/* clean up a possible backup job */
if (priv->backup)
qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED);
qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED);
/* Do this explicitly after vm->pid is reset so that security drivers don't
* try to enter the domain's namespace which is non-existent by now as qemu

View File

@ -1414,11 +1414,13 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
/* do the memory snapshot if necessary */
if (memory) {
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
/* check if migration is possible */
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* allow the migration job to be cancelled or the domain to be paused */
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |