qemu: use generalized virDomainJobData instead of qemuDomainJobInfo

This patch includes:
* introducing new files: src/hypervisor/domain_job.c and src/hypervisor/domain_job.h
* new struct virDomainJobData, which is almost the same as
  qemuDomainJobInfo - the only differences are moving qemu specific
  job stats into the qemuDomainJobDataPrivate and adding jobType
  (possibly more attributes in the future if needed).
* moving qemuDomainJobStatus to the domain_job.h and renaming it
  as virDomainJobStatus
* moving and renaming qemuDomainJobStatusToType
* adding callback struct virDomainJobDataPrivateDataCallbacks
  taking care of allocation, copying and freeing of private data
  of virDomainJobData
* adding functions for virDomainJobDataPrivateDataCallbacks for
  qemu hypervisor
* adding 'public' (public between the different hypervisors) functions
  taking care of init, copy, free of virDomainJobData
* renaming every occurrence of qemuDomainJobInfo *info to
  virDomainJobData *data

Signed-off-by: Kristina Hanicova <khanicov@redhat.com>
Reviewed-by: Jiri Denemark <jdenemar@redhat.com>
This commit is contained in:
Kristina Hanicova 2022-02-11 14:49:05 +01:00 committed by Jiri Denemark
parent 79c4e4e5c4
commit f304de0df6
15 changed files with 520 additions and 362 deletions

View File

@ -0,0 +1,78 @@
/*
* Copyright (C) 2022 Red Hat, Inc.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#include <config.h>
#include <string.h>
#include "domain_job.h"
virDomainJobData *
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb)
{
virDomainJobData *ret = g_new0(virDomainJobData, 1);
ret->privateDataCb = cb;
if (ret->privateDataCb)
ret->privateData = ret->privateDataCb->allocPrivateData();
return ret;
}
virDomainJobData *
virDomainJobDataCopy(virDomainJobData *data)
{
virDomainJobData *ret = g_new0(virDomainJobData, 1);
memcpy(ret, data, sizeof(*data));
if (ret->privateDataCb)
ret->privateData = data->privateDataCb->copyPrivateData(data->privateData);
ret->errmsg = g_strdup(data->errmsg);
return ret;
}
void
virDomainJobDataFree(virDomainJobData *data)
{
if (!data)
return;
if (data->privateDataCb)
data->privateDataCb->freePrivateData(data->privateData);
g_free(data->errmsg);
g_free(data);
}
virDomainJobType
virDomainJobStatusToType(virDomainJobStatus status)
{
switch (status) {
case VIR_DOMAIN_JOB_STATUS_NONE:
break;
case VIR_DOMAIN_JOB_STATUS_ACTIVE:
case VIR_DOMAIN_JOB_STATUS_MIGRATING:
case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
case VIR_DOMAIN_JOB_STATUS_PAUSED:
return VIR_DOMAIN_JOB_UNBOUNDED;
case VIR_DOMAIN_JOB_STATUS_COMPLETED:
return VIR_DOMAIN_JOB_COMPLETED;
case VIR_DOMAIN_JOB_STATUS_FAILED:
return VIR_DOMAIN_JOB_FAILED;
case VIR_DOMAIN_JOB_STATUS_CANCELED:
return VIR_DOMAIN_JOB_CANCELLED;
}
return VIR_DOMAIN_JOB_NONE;
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (C) 2022 Red Hat, Inc.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#pragma once
#include "internal.h"
typedef enum {
VIR_DOMAIN_JOB_STATUS_NONE = 0,
VIR_DOMAIN_JOB_STATUS_ACTIVE,
VIR_DOMAIN_JOB_STATUS_MIGRATING,
VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED,
VIR_DOMAIN_JOB_STATUS_PAUSED,
VIR_DOMAIN_JOB_STATUS_POSTCOPY,
VIR_DOMAIN_JOB_STATUS_COMPLETED,
VIR_DOMAIN_JOB_STATUS_FAILED,
VIR_DOMAIN_JOB_STATUS_CANCELED,
} virDomainJobStatus;
typedef void *(*virDomainJobDataPrivateDataAlloc) (void);
typedef void *(*virDomainJobDataPrivateDataCopy) (void *);
typedef void (*virDomainJobDataPrivateDataFree) (void *);
typedef struct _virDomainJobDataPrivateDataCallbacks virDomainJobDataPrivateDataCallbacks;
struct _virDomainJobDataPrivateDataCallbacks {
virDomainJobDataPrivateDataAlloc allocPrivateData;
virDomainJobDataPrivateDataCopy copyPrivateData;
virDomainJobDataPrivateDataFree freePrivateData;
};
typedef struct _virDomainJobData virDomainJobData;
struct _virDomainJobData {
virDomainJobType jobType;
virDomainJobStatus status;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
destination (only for migrations). */
unsigned long long received; /* When the destination host received status
info from the source (migrations only). */
/* Computed values */
unsigned long long timeElapsed;
long long timeDelta; /* delta = received - sent, i.e., the difference between
the source and the destination time plus the time
between the end of Perform phase on the source and
the beginning of Finish phase on the destination. */
bool timeDeltaSet;
char *errmsg; /* optional error message for failed completed jobs */
void *privateData; /* private data of hypervisors */
virDomainJobDataPrivateDataCallbacks *privateDataCb; /* callbacks of private data, hypervisor based */
};
virDomainJobData *
virDomainJobDataInit(virDomainJobDataPrivateDataCallbacks *cb);
void
virDomainJobDataFree(virDomainJobData *data);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(virDomainJobData, virDomainJobDataFree);
virDomainJobData *
virDomainJobDataCopy(virDomainJobData *data);
virDomainJobType
virDomainJobStatusToType(virDomainJobStatus status);

View File

@ -3,6 +3,7 @@ hypervisor_sources = [
'domain_driver.c', 'domain_driver.c',
'virclosecallbacks.c', 'virclosecallbacks.c',
'virhostdev.c', 'virhostdev.c',
'domain_job.c',
] ]
stateful_driver_source_files += files(hypervisor_sources) stateful_driver_source_files += files(hypervisor_sources)

View File

@ -1577,6 +1577,13 @@ virDomainDriverParseBlkioDeviceStr;
virDomainDriverSetupPersistentDefBlkioParams; virDomainDriverSetupPersistentDefBlkioParams;
# hypervisor/domain_job.h
virDomainJobDataCopy;
virDomainJobDataFree;
virDomainJobDataInit;
virDomainJobStatusToType;
# hypervisor/virclosecallbacks.h # hypervisor/virclosecallbacks.h
virCloseCallbacksGet; virCloseCallbacksGet;
virCloseCallbacksGetConn; virCloseCallbacksGetConn;

View File

@ -555,7 +555,7 @@ qemuBackupBeginPullExportDisks(virDomainObj *vm,
void void
qemuBackupJobTerminate(virDomainObj *vm, qemuBackupJobTerminate(virDomainObj *vm,
qemuDomainJobStatus jobstatus) virDomainJobStatus jobstatus)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
@ -583,7 +583,7 @@ qemuBackupJobTerminate(virDomainObj *vm,
!(priv->backup->apiFlags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL) && !(priv->backup->apiFlags & VIR_DOMAIN_BACKUP_BEGIN_REUSE_EXTERNAL) &&
(priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL || (priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PULL ||
(priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH && (priv->backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH &&
jobstatus != QEMU_DOMAIN_JOB_STATUS_COMPLETED))) { jobstatus != VIR_DOMAIN_JOB_STATUS_COMPLETED))) {
uid_t uid; uid_t uid;
gid_t gid; gid_t gid;
@ -600,15 +600,19 @@ qemuBackupJobTerminate(virDomainObj *vm,
} }
if (priv->job.current) { if (priv->job.current) {
qemuDomainJobInfoUpdateTime(priv->job.current); qemuDomainJobDataPrivate *privData = NULL;
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); qemuDomainJobDataUpdateTime(priv->job.current);
priv->job.completed = qemuDomainJobInfoCopy(priv->job.current);
priv->job.completed->stats.backup.total = priv->backup->push_total; g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
priv->job.completed->stats.backup.transferred = priv->backup->push_transferred; priv->job.completed = virDomainJobDataCopy(priv->job.current);
priv->job.completed->stats.backup.tmp_used = priv->backup->pull_tmp_used;
priv->job.completed->stats.backup.tmp_total = priv->backup->pull_tmp_total; privData = priv->job.completed->privateData;
privData->stats.backup.total = priv->backup->push_total;
privData->stats.backup.transferred = priv->backup->push_transferred;
privData->stats.backup.tmp_used = priv->backup->pull_tmp_used;
privData->stats.backup.tmp_total = priv->backup->pull_tmp_total;
priv->job.completed->status = jobstatus; priv->job.completed->status = jobstatus;
priv->job.completed->errmsg = g_strdup(priv->backup->errmsg); priv->job.completed->errmsg = g_strdup(priv->backup->errmsg);
@ -686,7 +690,7 @@ qemuBackupJobCancelBlockjobs(virDomainObj *vm,
} }
if (terminatebackup && !has_active) if (terminatebackup && !has_active)
qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED); qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED);
} }
@ -741,6 +745,7 @@ qemuBackupBegin(virDomainObj *vm,
unsigned int flags) unsigned int flags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privData = priv->job.current->privateData;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
g_autoptr(virDomainBackupDef) def = NULL; g_autoptr(virDomainBackupDef) def = NULL;
g_autofree char *suffix = NULL; g_autofree char *suffix = NULL;
@ -794,7 +799,7 @@ qemuBackupBegin(virDomainObj *vm,
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_SUSPEND) |
JOB_MASK(QEMU_JOB_MODIFY))); JOB_MASK(QEMU_JOB_MODIFY)));
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; privData->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
@ -984,7 +989,7 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm,
bool has_cancelling = false; bool has_cancelling = false;
bool has_cancelled = false; bool has_cancelled = false;
bool has_failed = false; bool has_failed = false;
qemuDomainJobStatus jobstatus = QEMU_DOMAIN_JOB_STATUS_COMPLETED; virDomainJobStatus jobstatus = VIR_DOMAIN_JOB_STATUS_COMPLETED;
virDomainBackupDef *backup = priv->backup; virDomainBackupDef *backup = priv->backup;
size_t i; size_t i;
@ -1081,9 +1086,9 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm,
/* all sub-jobs have stopped */ /* all sub-jobs have stopped */
if (has_failed) if (has_failed)
jobstatus = QEMU_DOMAIN_JOB_STATUS_FAILED; jobstatus = VIR_DOMAIN_JOB_STATUS_FAILED;
else if (has_cancelled && backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH) else if (has_cancelled && backup->type == VIR_DOMAIN_BACKUP_TYPE_PUSH)
jobstatus = QEMU_DOMAIN_JOB_STATUS_CANCELED; jobstatus = VIR_DOMAIN_JOB_STATUS_CANCELED;
qemuBackupJobTerminate(vm, jobstatus); qemuBackupJobTerminate(vm, jobstatus);
} }
@ -1134,9 +1139,10 @@ qemuBackupGetJobInfoStatsUpdateOne(virDomainObj *vm,
int int
qemuBackupGetJobInfoStats(virQEMUDriver *driver, qemuBackupGetJobInfoStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainJobInfo *jobInfo) virDomainJobData *jobData)
{ {
qemuDomainBackupStats *stats = &jobInfo->stats.backup; qemuDomainJobDataPrivate *privJob = jobData->privateData;
qemuDomainBackupStats *stats = &privJob->stats.backup;
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuMonitorJobInfo **blockjobs = NULL; qemuMonitorJobInfo **blockjobs = NULL;
size_t nblockjobs = 0; size_t nblockjobs = 0;
@ -1150,10 +1156,10 @@ qemuBackupGetJobInfoStats(virQEMUDriver *driver,
return -1; return -1;
} }
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) if (qemuDomainJobDataUpdateTime(jobData) < 0)
return -1; return -1;
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; jobData->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
qemuDomainObjEnterMonitor(driver, vm); qemuDomainObjEnterMonitor(driver, vm);

View File

@ -45,12 +45,12 @@ qemuBackupNotifyBlockjobEnd(virDomainObj *vm,
void void
qemuBackupJobTerminate(virDomainObj *vm, qemuBackupJobTerminate(virDomainObj *vm,
qemuDomainJobStatus jobstatus); virDomainJobStatus jobstatus);
int int
qemuBackupGetJobInfoStats(virQEMUDriver *driver, qemuBackupGetJobInfoStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainJobInfo *jobInfo); virDomainJobData *jobData);
/* exported for testing */ /* exported for testing */
int int

View File

@ -63,6 +63,38 @@ VIR_ENUM_IMPL(qemuDomainAsyncJob,
"backup", "backup",
); );
static void *
qemuJobDataAllocPrivateData(void)
{
return g_new0(qemuDomainJobDataPrivate, 1);
}
static void *
qemuJobDataCopyPrivateData(void *data)
{
qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1);
memcpy(ret, data, sizeof(qemuDomainJobDataPrivate));
return ret;
}
static void
qemuJobDataFreePrivateData(void *data)
{
g_free(data);
}
virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks = {
.allocPrivateData = qemuJobDataAllocPrivateData,
.copyPrivateData = qemuJobDataCopyPrivateData,
.freePrivateData = qemuJobDataFreePrivateData,
};
const char * const char *
qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase G_GNUC_UNUSED) int phase G_GNUC_UNUSED)
@ -116,26 +148,6 @@ qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,
} }
void
qemuDomainJobInfoFree(qemuDomainJobInfo *info)
{
g_free(info->errmsg);
g_free(info);
}
qemuDomainJobInfo *
qemuDomainJobInfoCopy(qemuDomainJobInfo *info)
{
qemuDomainJobInfo *ret = g_new0(qemuDomainJobInfo, 1);
memcpy(ret, info, sizeof(*info));
ret->errmsg = g_strdup(info->errmsg);
return ret;
}
void void
qemuDomainEventEmitJobCompleted(virQEMUDriver *driver, qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
virDomainObj *vm) virDomainObj *vm)
@ -149,7 +161,7 @@ qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
if (!priv->job.completed) if (!priv->job.completed)
return; return;
if (qemuDomainJobInfoToParams(priv->job.completed, &type, if (qemuDomainJobDataToParams(priv->job.completed, &type,
&params, &nparams) < 0) { &params, &nparams) < 0) {
VIR_WARN("Could not get stats for completed job; domain %s", VIR_WARN("Could not get stats for completed job; domain %s",
vm->def->name); vm->def->name);
@ -216,7 +228,7 @@ qemuDomainObjResetAsyncJob(qemuDomainJobObj *job)
job->mask = QEMU_JOB_DEFAULT_MASK; job->mask = QEMU_JOB_DEFAULT_MASK;
job->abortJob = false; job->abortJob = false;
VIR_FREE(job->error); VIR_FREE(job->error);
g_clear_pointer(&job->current, qemuDomainJobInfoFree); g_clear_pointer(&job->current, virDomainJobDataFree);
job->cb->resetJobPrivate(job->privateData); job->cb->resetJobPrivate(job->privateData);
job->apiFlags = 0; job->apiFlags = 0;
} }
@ -254,8 +266,8 @@ qemuDomainObjClearJob(qemuDomainJobObj *job)
qemuDomainObjResetJob(job); qemuDomainObjResetJob(job);
qemuDomainObjResetAsyncJob(job); qemuDomainObjResetAsyncJob(job);
g_clear_pointer(&job->privateData, job->cb->freeJobPrivate); g_clear_pointer(&job->privateData, job->cb->freeJobPrivate);
g_clear_pointer(&job->current, qemuDomainJobInfoFree); g_clear_pointer(&job->current, virDomainJobDataFree);
g_clear_pointer(&job->completed, qemuDomainJobInfoFree); g_clear_pointer(&job->completed, virDomainJobDataFree);
virCondDestroy(&job->cond); virCondDestroy(&job->cond);
virCondDestroy(&job->asyncCond); virCondDestroy(&job->asyncCond);
} }
@ -268,111 +280,87 @@ qemuDomainTrackJob(qemuDomainJob job)
int int
qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo) qemuDomainJobDataUpdateTime(virDomainJobData *jobData)
{ {
unsigned long long now; unsigned long long now;
if (!jobInfo->started) if (!jobData->started)
return 0; return 0;
if (virTimeMillisNow(&now) < 0) if (virTimeMillisNow(&now) < 0)
return -1; return -1;
if (now < jobInfo->started) { if (now < jobData->started) {
VIR_WARN("Async job starts in the future"); VIR_WARN("Async job starts in the future");
jobInfo->started = 0; jobData->started = 0;
return 0; return 0;
} }
jobInfo->timeElapsed = now - jobInfo->started; jobData->timeElapsed = now - jobData->started;
return 0; return 0;
} }
int int
qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo) qemuDomainJobDataUpdateDowntime(virDomainJobData *jobData)
{ {
unsigned long long now; unsigned long long now;
qemuDomainJobDataPrivate *priv = jobData->privateData;
if (!jobInfo->stopped) if (!jobData->stopped)
return 0; return 0;
if (virTimeMillisNow(&now) < 0) if (virTimeMillisNow(&now) < 0)
return -1; return -1;
if (now < jobInfo->stopped) { if (now < jobData->stopped) {
VIR_WARN("Guest's CPUs stopped in the future"); VIR_WARN("Guest's CPUs stopped in the future");
jobInfo->stopped = 0; jobData->stopped = 0;
return 0; return 0;
} }
jobInfo->stats.mig.downtime = now - jobInfo->stopped; priv->stats.mig.downtime = now - jobData->stopped;
jobInfo->stats.mig.downtime_set = true; priv->stats.mig.downtime_set = true;
return 0; return 0;
} }
static virDomainJobType
qemuDomainJobStatusToType(qemuDomainJobStatus status)
{
switch (status) {
case QEMU_DOMAIN_JOB_STATUS_NONE:
break;
case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
case QEMU_DOMAIN_JOB_STATUS_PAUSED:
return VIR_DOMAIN_JOB_UNBOUNDED;
case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
return VIR_DOMAIN_JOB_COMPLETED;
case QEMU_DOMAIN_JOB_STATUS_FAILED:
return VIR_DOMAIN_JOB_FAILED;
case QEMU_DOMAIN_JOB_STATUS_CANCELED:
return VIR_DOMAIN_JOB_CANCELLED;
}
return VIR_DOMAIN_JOB_NONE;
}
int int
qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo, qemuDomainJobDataToInfo(virDomainJobData *jobData,
virDomainJobInfoPtr info) virDomainJobInfoPtr info)
{ {
info->type = qemuDomainJobStatusToType(jobInfo->status); qemuDomainJobDataPrivate *priv = jobData->privateData;
info->timeElapsed = jobInfo->timeElapsed; info->type = virDomainJobStatusToType(jobData->status);
info->timeElapsed = jobData->timeElapsed;
switch (jobInfo->statsType) { switch (priv->statsType) {
case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
info->memTotal = jobInfo->stats.mig.ram_total; info->memTotal = priv->stats.mig.ram_total;
info->memRemaining = jobInfo->stats.mig.ram_remaining; info->memRemaining = priv->stats.mig.ram_remaining;
info->memProcessed = jobInfo->stats.mig.ram_transferred; info->memProcessed = priv->stats.mig.ram_transferred;
info->fileTotal = jobInfo->stats.mig.disk_total + info->fileTotal = priv->stats.mig.disk_total +
jobInfo->mirrorStats.total; priv->mirrorStats.total;
info->fileRemaining = jobInfo->stats.mig.disk_remaining + info->fileRemaining = priv->stats.mig.disk_remaining +
(jobInfo->mirrorStats.total - (priv->mirrorStats.total -
jobInfo->mirrorStats.transferred); priv->mirrorStats.transferred);
info->fileProcessed = jobInfo->stats.mig.disk_transferred + info->fileProcessed = priv->stats.mig.disk_transferred +
jobInfo->mirrorStats.transferred; priv->mirrorStats.transferred;
break; break;
case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
info->memTotal = jobInfo->stats.mig.ram_total; info->memTotal = priv->stats.mig.ram_total;
info->memRemaining = jobInfo->stats.mig.ram_remaining; info->memRemaining = priv->stats.mig.ram_remaining;
info->memProcessed = jobInfo->stats.mig.ram_transferred; info->memProcessed = priv->stats.mig.ram_transferred;
break; break;
case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
info->memTotal = jobInfo->stats.dump.total; info->memTotal = priv->stats.dump.total;
info->memProcessed = jobInfo->stats.dump.completed; info->memProcessed = priv->stats.dump.completed;
info->memRemaining = info->memTotal - info->memProcessed; info->memRemaining = info->memTotal - info->memProcessed;
break; break;
case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
info->fileTotal = jobInfo->stats.backup.total; info->fileTotal = priv->stats.backup.total;
info->fileProcessed = jobInfo->stats.backup.transferred; info->fileProcessed = priv->stats.backup.transferred;
info->fileRemaining = info->fileTotal - info->fileProcessed; info->fileRemaining = info->fileTotal - info->fileProcessed;
break; break;
@ -389,13 +377,14 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo,
static int static int
qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo, qemuDomainMigrationJobDataToParams(virDomainJobData *jobData,
int *type, int *type,
virTypedParameterPtr *params, virTypedParameterPtr *params,
int *nparams) int *nparams)
{ {
qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuDomainMirrorStats *mirrorStats = &jobInfo->mirrorStats; qemuMonitorMigrationStats *stats = &priv->stats.mig;
qemuDomainMirrorStats *mirrorStats = &priv->mirrorStats;
virTypedParameterPtr par = NULL; virTypedParameterPtr par = NULL;
int maxpar = 0; int maxpar = 0;
int npar = 0; int npar = 0;
@ -404,19 +393,19 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
if (virTypedParamsAddInt(&par, &npar, &maxpar, if (virTypedParamsAddInt(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_OPERATION, VIR_DOMAIN_JOB_OPERATION,
jobInfo->operation) < 0) jobData->operation) < 0)
goto error; goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar, if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED, VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed) < 0) jobData->timeElapsed) < 0)
goto error; goto error;
if (jobInfo->timeDeltaSet && if (jobData->timeDeltaSet &&
jobInfo->timeElapsed > jobInfo->timeDelta && jobData->timeElapsed > jobData->timeDelta &&
virTypedParamsAddULLong(&par, &npar, &maxpar, virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED_NET, VIR_DOMAIN_JOB_TIME_ELAPSED_NET,
jobInfo->timeElapsed - jobInfo->timeDelta) < 0) jobData->timeElapsed - jobData->timeDelta) < 0)
goto error; goto error;
if (stats->downtime_set && if (stats->downtime_set &&
@ -426,11 +415,11 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
goto error; goto error;
if (stats->downtime_set && if (stats->downtime_set &&
jobInfo->timeDeltaSet && jobData->timeDeltaSet &&
stats->downtime > jobInfo->timeDelta && stats->downtime > jobData->timeDelta &&
virTypedParamsAddULLong(&par, &npar, &maxpar, virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_DOWNTIME_NET, VIR_DOMAIN_JOB_DOWNTIME_NET,
stats->downtime - jobInfo->timeDelta) < 0) stats->downtime - jobData->timeDelta) < 0)
goto error; goto error;
if (stats->setup_time_set && if (stats->setup_time_set &&
@ -505,7 +494,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
/* The remaining stats are disk, mirror, or migration specific /* The remaining stats are disk, mirror, or migration specific
* so if this is a SAVEDUMP, we can just skip them */ * so if this is a SAVEDUMP, we can just skip them */
if (jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP) if (priv->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP)
goto done; goto done;
if (virTypedParamsAddULLong(&par, &npar, &maxpar, if (virTypedParamsAddULLong(&par, &npar, &maxpar,
@ -554,7 +543,7 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
goto error; goto error;
done: done:
*type = qemuDomainJobStatusToType(jobInfo->status); *type = virDomainJobStatusToType(jobData->status);
*params = par; *params = par;
*nparams = npar; *nparams = npar;
return 0; return 0;
@ -566,24 +555,25 @@ qemuDomainMigrationJobInfoToParams(qemuDomainJobInfo *jobInfo,
static int static int
qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo, qemuDomainDumpJobDataToParams(virDomainJobData *jobData,
int *type, int *type,
virTypedParameterPtr *params, virTypedParameterPtr *params,
int *nparams) int *nparams)
{ {
qemuMonitorDumpStats *stats = &jobInfo->stats.dump; qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuMonitorDumpStats *stats = &priv->stats.dump;
virTypedParameterPtr par = NULL; virTypedParameterPtr par = NULL;
int maxpar = 0; int maxpar = 0;
int npar = 0; int npar = 0;
if (virTypedParamsAddInt(&par, &npar, &maxpar, if (virTypedParamsAddInt(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_OPERATION, VIR_DOMAIN_JOB_OPERATION,
jobInfo->operation) < 0) jobData->operation) < 0)
goto error; goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar, if (virTypedParamsAddULLong(&par, &npar, &maxpar,
VIR_DOMAIN_JOB_TIME_ELAPSED, VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed) < 0) jobData->timeElapsed) < 0)
goto error; goto error;
if (virTypedParamsAddULLong(&par, &npar, &maxpar, if (virTypedParamsAddULLong(&par, &npar, &maxpar,
@ -597,7 +587,7 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo,
stats->total - stats->completed) < 0) stats->total - stats->completed) < 0)
goto error; goto error;
*type = qemuDomainJobStatusToType(jobInfo->status); *type = virDomainJobStatusToType(jobData->status);
*params = par; *params = par;
*nparams = npar; *nparams = npar;
return 0; return 0;
@ -609,19 +599,20 @@ qemuDomainDumpJobInfoToParams(qemuDomainJobInfo *jobInfo,
static int static int
qemuDomainBackupJobInfoToParams(qemuDomainJobInfo *jobInfo, qemuDomainBackupJobDataToParams(virDomainJobData *jobData,
int *type, int *type,
virTypedParameterPtr *params, virTypedParameterPtr *params,
int *nparams) int *nparams)
{ {
qemuDomainBackupStats *stats = &jobInfo->stats.backup; qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuDomainBackupStats *stats = &priv->stats.backup;
g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1); g_autoptr(virTypedParamList) par = g_new0(virTypedParamList, 1);
if (virTypedParamListAddInt(par, jobInfo->operation, if (virTypedParamListAddInt(par, jobData->operation,
VIR_DOMAIN_JOB_OPERATION) < 0) VIR_DOMAIN_JOB_OPERATION) < 0)
return -1; return -1;
if (virTypedParamListAddULLong(par, jobInfo->timeElapsed, if (virTypedParamListAddULLong(par, jobData->timeElapsed,
VIR_DOMAIN_JOB_TIME_ELAPSED) < 0) VIR_DOMAIN_JOB_TIME_ELAPSED) < 0)
return -1; return -1;
@ -649,38 +640,40 @@ qemuDomainBackupJobInfoToParams(qemuDomainJobInfo *jobInfo,
return -1; return -1;
} }
if (jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && if (jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
virTypedParamListAddBoolean(par, virTypedParamListAddBoolean(par,
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED, jobData->status == VIR_DOMAIN_JOB_STATUS_COMPLETED,
VIR_DOMAIN_JOB_SUCCESS) < 0) VIR_DOMAIN_JOB_SUCCESS) < 0)
return -1; return -1;
if (jobInfo->errmsg && if (jobData->errmsg &&
virTypedParamListAddString(par, jobInfo->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0) virTypedParamListAddString(par, jobData->errmsg, VIR_DOMAIN_JOB_ERRMSG) < 0)
return -1; return -1;
*nparams = virTypedParamListStealParams(par, params); *nparams = virTypedParamListStealParams(par, params);
*type = qemuDomainJobStatusToType(jobInfo->status); *type = virDomainJobStatusToType(jobData->status);
return 0; return 0;
} }
int int
qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo, qemuDomainJobDataToParams(virDomainJobData *jobData,
int *type, int *type,
virTypedParameterPtr *params, virTypedParameterPtr *params,
int *nparams) int *nparams)
{ {
switch (jobInfo->statsType) { qemuDomainJobDataPrivate *priv = jobData->privateData;
switch (priv->statsType) {
case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
return qemuDomainMigrationJobInfoToParams(jobInfo, type, params, nparams); return qemuDomainMigrationJobDataToParams(jobData, type, params, nparams);
case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
return qemuDomainDumpJobInfoToParams(jobInfo, type, params, nparams); return qemuDomainDumpJobDataToParams(jobData, type, params, nparams);
case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
return qemuDomainBackupJobInfoToParams(jobInfo, type, params, nparams); return qemuDomainBackupJobDataToParams(jobData, type, params, nparams);
case QEMU_DOMAIN_JOB_STATS_TYPE_NONE: case QEMU_DOMAIN_JOB_STATS_TYPE_NONE:
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
@ -688,7 +681,7 @@ qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo,
break; break;
default: default:
virReportEnumRangeError(qemuDomainJobStatsType, jobInfo->statsType); virReportEnumRangeError(qemuDomainJobStatsType, priv->statsType);
break; break;
} }
@ -895,8 +888,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriver *driver,
qemuDomainAsyncJobTypeToString(asyncJob), qemuDomainAsyncJobTypeToString(asyncJob),
obj, obj->def->name); obj, obj->def->name);
qemuDomainObjResetAsyncJob(&priv->job); qemuDomainObjResetAsyncJob(&priv->job);
priv->job.current = g_new0(qemuDomainJobInfo, 1); priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
priv->job.asyncJob = asyncJob; priv->job.asyncJob = asyncJob;
priv->job.asyncOwner = virThreadSelfID(); priv->job.asyncOwner = virThreadSelfID();
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet()); priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());

View File

@ -20,6 +20,7 @@
#include <glib-object.h> #include <glib-object.h>
#include "qemu_monitor.h" #include "qemu_monitor.h"
#include "domain_job.h"
#define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1)) #define JOB_MASK(job) (job == 0 ? 0 : 1 << (job - 1))
#define QEMU_JOB_DEFAULT_MASK \ #define QEMU_JOB_DEFAULT_MASK \
@ -79,17 +80,6 @@ typedef enum {
} qemuDomainAsyncJob; } qemuDomainAsyncJob;
VIR_ENUM_DECL(qemuDomainAsyncJob); VIR_ENUM_DECL(qemuDomainAsyncJob);
typedef enum {
QEMU_DOMAIN_JOB_STATUS_NONE = 0,
QEMU_DOMAIN_JOB_STATUS_ACTIVE,
QEMU_DOMAIN_JOB_STATUS_MIGRATING,
QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED,
QEMU_DOMAIN_JOB_STATUS_PAUSED,
QEMU_DOMAIN_JOB_STATUS_POSTCOPY,
QEMU_DOMAIN_JOB_STATUS_COMPLETED,
QEMU_DOMAIN_JOB_STATUS_FAILED,
QEMU_DOMAIN_JOB_STATUS_CANCELED,
} qemuDomainJobStatus;
typedef enum { typedef enum {
QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0, QEMU_DOMAIN_JOB_STATS_TYPE_NONE = 0,
@ -114,24 +104,8 @@ struct _qemuDomainBackupStats {
unsigned long long tmp_total; unsigned long long tmp_total;
}; };
typedef struct _qemuDomainJobInfo qemuDomainJobInfo; typedef struct _qemuDomainJobDataPrivate qemuDomainJobDataPrivate;
struct _qemuDomainJobInfo { struct _qemuDomainJobDataPrivate {
qemuDomainJobStatus status;
virDomainJobOperation operation;
unsigned long long started; /* When the async job started */
unsigned long long stopped; /* When the domain's CPUs were stopped */
unsigned long long sent; /* When the source sent status info to the
destination (only for migrations). */
unsigned long long received; /* When the destination host received status
info from the source (migrations only). */
/* Computed values */
unsigned long long timeElapsed;
long long timeDelta; /* delta = received - sent, i.e., the difference
between the source and the destination time plus
the time between the end of Perform phase on the
source and the beginning of Finish phase on the
destination. */
bool timeDeltaSet;
/* Raw values from QEMU */ /* Raw values from QEMU */
qemuDomainJobStatsType statsType; qemuDomainJobStatsType statsType;
union { union {
@ -140,17 +114,9 @@ struct _qemuDomainJobInfo {
qemuDomainBackupStats backup; qemuDomainBackupStats backup;
} stats; } stats;
qemuDomainMirrorStats mirrorStats; qemuDomainMirrorStats mirrorStats;
char *errmsg; /* optional error message for failed completed jobs */
}; };
void extern virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks;
qemuDomainJobInfoFree(qemuDomainJobInfo *info);
G_DEFINE_AUTOPTR_CLEANUP_FUNC(qemuDomainJobInfo, qemuDomainJobInfoFree);
qemuDomainJobInfo *
qemuDomainJobInfoCopy(qemuDomainJobInfo *info);
typedef struct _qemuDomainJobObj qemuDomainJobObj; typedef struct _qemuDomainJobObj qemuDomainJobObj;
@ -198,8 +164,8 @@ struct _qemuDomainJobObj {
unsigned long long asyncStarted; /* When the current async job started */ unsigned long long asyncStarted; /* When the current async job started */
int phase; /* Job phase (mainly for migrations) */ int phase; /* Job phase (mainly for migrations) */
unsigned long long mask; /* Jobs allowed during async job */ unsigned long long mask; /* Jobs allowed during async job */
qemuDomainJobInfo *current; /* async job progress data */ virDomainJobData *current; /* async job progress data */
qemuDomainJobInfo *completed; /* statistics data of a recently completed job */ virDomainJobData *completed; /* statistics data of a recently completed job */
bool abortJob; /* abort of the job requested */ bool abortJob; /* abort of the job requested */
char *error; /* job event completion error */ char *error; /* job event completion error */
unsigned long apiFlags; /* flags passed to the API which started the async job */ unsigned long apiFlags; /* flags passed to the API which started the async job */
@ -256,14 +222,14 @@ void qemuDomainObjDiscardAsyncJob(virQEMUDriver *driver,
virDomainObj *obj); virDomainObj *obj);
void qemuDomainObjReleaseAsyncJob(virDomainObj *obj); void qemuDomainObjReleaseAsyncJob(virDomainObj *obj);
int qemuDomainJobInfoUpdateTime(qemuDomainJobInfo *jobInfo) int qemuDomainJobDataUpdateTime(virDomainJobData *jobData)
ATTRIBUTE_NONNULL(1); ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfo *jobInfo) int qemuDomainJobDataUpdateDowntime(virDomainJobData *jobData)
ATTRIBUTE_NONNULL(1); ATTRIBUTE_NONNULL(1);
int qemuDomainJobInfoToInfo(qemuDomainJobInfo *jobInfo, int qemuDomainJobDataToInfo(virDomainJobData *jobData,
virDomainJobInfoPtr info) virDomainJobInfoPtr info)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2); ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
int qemuDomainJobInfoToParams(qemuDomainJobInfo *jobInfo, int qemuDomainJobDataToParams(virDomainJobData *jobData,
int *type, int *type,
virTypedParameterPtr *params, virTypedParameterPtr *params,
int *nparams) int *nparams)

View File

@ -2637,6 +2637,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
int ret = -1; int ret = -1;
virObjectEvent *event = NULL; virObjectEvent *event = NULL;
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
virQEMUSaveData *data = NULL; virQEMUSaveData *data = NULL;
g_autoptr(qemuDomainSaveCookie) cookie = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL;
@ -2653,7 +2654,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
goto endjob; goto endjob;
} }
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* Pause */ /* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@ -2946,6 +2947,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = priv->job.privateData; qemuDomainJobPrivate *jobPriv = priv->job.privateData;
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
VIR_DEBUG("Waiting for dump completion"); VIR_DEBUG("Waiting for dump completion");
while (!jobPriv->dumpCompleted && !priv->job.abortJob) { while (!jobPriv->dumpCompleted && !priv->job.abortJob) {
@ -2953,7 +2955,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm)
return -1; return -1;
} }
if (priv->job.current->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) { if (privJobCurrent->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
if (priv->job.error) if (priv->job.error)
virReportError(VIR_ERR_OPERATION_FAILED, virReportError(VIR_ERR_OPERATION_FAILED,
_("memory-only dump failed: %s"), _("memory-only dump failed: %s"),
@ -2964,7 +2966,7 @@ qemuDumpWaitForCompletion(virDomainObj *vm)
return -1; return -1;
} }
qemuDomainJobInfoUpdateTime(priv->job.current); qemuDomainJobDataUpdateTime(priv->job.current);
return 0; return 0;
} }
@ -2992,10 +2994,13 @@ qemuDumpToFd(virQEMUDriver *driver,
if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0) if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def, fd) < 0)
return -1; return -1;
if (detach) if (detach) {
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP; qemuDomainJobDataPrivate *privStats = priv->job.current->privateData;
else
g_clear_pointer(&priv->job.current, qemuDomainJobInfoFree); privStats->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
} else {
g_clear_pointer(&priv->job.current, virDomainJobDataFree);
}
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1; return -1;
@ -3130,6 +3135,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
virQEMUDriver *driver = dom->conn->privateData; virQEMUDriver *driver = dom->conn->privateData;
virDomainObj *vm; virDomainObj *vm;
qemuDomainObjPrivate *priv = NULL; qemuDomainObjPrivate *priv = NULL;
qemuDomainJobDataPrivate *privJobCurrent = NULL;
bool resume = false, paused = false; bool resume = false, paused = false;
int ret = -1; int ret = -1;
virObjectEvent *event = NULL; virObjectEvent *event = NULL;
@ -3154,7 +3160,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
goto endjob; goto endjob;
priv = vm->privateData; priv = vm->privateData;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; privJobCurrent = priv->job.current->privateData;
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* Migrate will always stop the VM, so the resume condition is /* Migrate will always stop the VM, so the resume condition is
independent of whether the stop command is issued. */ independent of whether the stop command is issued. */
@ -12422,28 +12429,30 @@ qemuConnectBaselineHypervisorCPU(virConnectPtr conn,
static int static int
qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver, qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainJobInfo *jobInfo) virDomainJobData *jobData)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privStats = jobData->privateData;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || jobData->status == VIR_DOMAIN_JOB_STATUS_MIGRATING ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED || jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events && if (events &&
jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE && jobData->status != VIR_DOMAIN_JOB_STATUS_ACTIVE &&
qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE, qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
jobInfo, NULL) < 0) jobData, NULL) < 0)
return -1; return -1;
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && if (jobData->status == VIR_DOMAIN_JOB_STATUS_ACTIVE &&
jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION && privStats->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE, qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
jobInfo) < 0) jobData) < 0)
return -1; return -1;
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) if (qemuDomainJobDataUpdateTime(jobData) < 0)
return -1; return -1;
} }
@ -12454,9 +12463,10 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriver *driver,
static int static int
qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver, qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainJobInfo *jobInfo) virDomainJobData *jobData)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
qemuMonitorDumpStats stats = { 0 }; qemuMonitorDumpStats stats = { 0 };
int rc; int rc;
@ -12469,33 +12479,33 @@ qemuDomainGetJobInfoDumpStats(virQEMUDriver *driver,
if (rc < 0) if (rc < 0)
return -1; return -1;
jobInfo->stats.dump = stats; privJob->stats.dump = stats;
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0) if (qemuDomainJobDataUpdateTime(jobData) < 0)
return -1; return -1;
switch (jobInfo->stats.dump.status) { switch (privJob->stats.dump.status) {
case QEMU_MONITOR_DUMP_STATUS_NONE: case QEMU_MONITOR_DUMP_STATUS_NONE:
case QEMU_MONITOR_DUMP_STATUS_FAILED: case QEMU_MONITOR_DUMP_STATUS_FAILED:
case QEMU_MONITOR_DUMP_STATUS_LAST: case QEMU_MONITOR_DUMP_STATUS_LAST:
virReportError(VIR_ERR_OPERATION_FAILED, virReportError(VIR_ERR_OPERATION_FAILED,
_("dump query failed, status=%d"), _("dump query failed, status=%d"),
jobInfo->stats.dump.status); privJob->stats.dump.status);
return -1; return -1;
break; break;
case QEMU_MONITOR_DUMP_STATUS_ACTIVE: case QEMU_MONITOR_DUMP_STATUS_ACTIVE:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; jobData->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
VIR_DEBUG("dump active, bytes written='%llu' remaining='%llu'", VIR_DEBUG("dump active, bytes written='%llu' remaining='%llu'",
jobInfo->stats.dump.completed, privJob->stats.dump.completed,
jobInfo->stats.dump.total - privJob->stats.dump.total -
jobInfo->stats.dump.completed); privJob->stats.dump.completed);
break; break;
case QEMU_MONITOR_DUMP_STATUS_COMPLETED: case QEMU_MONITOR_DUMP_STATUS_COMPLETED:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
VIR_DEBUG("dump completed, bytes written='%llu'", VIR_DEBUG("dump completed, bytes written='%llu'",
jobInfo->stats.dump.completed); privJob->stats.dump.completed);
break; break;
} }
@ -12507,16 +12517,17 @@ static int
qemuDomainGetJobStatsInternal(virQEMUDriver *driver, qemuDomainGetJobStatsInternal(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
bool completed, bool completed,
qemuDomainJobInfo **jobInfo) virDomainJobData **jobData)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privStats = NULL;
int ret = -1; int ret = -1;
*jobInfo = NULL; *jobData = NULL;
if (completed) { if (completed) {
if (priv->job.completed && !priv->job.current) if (priv->job.completed && !priv->job.current)
*jobInfo = qemuDomainJobInfoCopy(priv->job.completed); *jobData = virDomainJobDataCopy(priv->job.completed);
return 0; return 0;
} }
@ -12538,22 +12549,24 @@ qemuDomainGetJobStatsInternal(virQEMUDriver *driver,
ret = 0; ret = 0;
goto cleanup; goto cleanup;
} }
*jobInfo = qemuDomainJobInfoCopy(priv->job.current); *jobData = virDomainJobDataCopy(priv->job.current);
switch ((*jobInfo)->statsType) { privStats = (*jobData)->privateData;
switch (privStats->statsType) {
case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION: case QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION:
case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP: case QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP:
if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobInfo) < 0) if (qemuDomainGetJobInfoMigrationStats(driver, vm, *jobData) < 0)
goto cleanup; goto cleanup;
break; break;
case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP: case QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP:
if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobInfo) < 0) if (qemuDomainGetJobInfoDumpStats(driver, vm, *jobData) < 0)
goto cleanup; goto cleanup;
break; break;
case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP: case QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP:
if (qemuBackupGetJobInfoStats(driver, vm, *jobInfo) < 0) if (qemuBackupGetJobInfoStats(driver, vm, *jobData) < 0)
goto cleanup; goto cleanup;
break; break;
@ -12574,7 +12587,7 @@ qemuDomainGetJobInfo(virDomainPtr dom,
virDomainJobInfoPtr info) virDomainJobInfoPtr info)
{ {
virQEMUDriver *driver = dom->conn->privateData; virQEMUDriver *driver = dom->conn->privateData;
g_autoptr(qemuDomainJobInfo) jobInfo = NULL; g_autoptr(virDomainJobData) jobData = NULL;
virDomainObj *vm; virDomainObj *vm;
int ret = -1; int ret = -1;
@ -12586,16 +12599,16 @@ qemuDomainGetJobInfo(virDomainPtr dom,
if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0) if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0)
goto cleanup; goto cleanup;
if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobInfo) < 0) if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobData) < 0)
goto cleanup; goto cleanup;
if (!jobInfo || if (!jobData ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) { jobData->status == VIR_DOMAIN_JOB_STATUS_NONE) {
ret = 0; ret = 0;
goto cleanup; goto cleanup;
} }
ret = qemuDomainJobInfoToInfo(jobInfo, info); ret = qemuDomainJobDataToInfo(jobData, info);
cleanup: cleanup:
virDomainObjEndAPI(&vm); virDomainObjEndAPI(&vm);
@ -12613,7 +12626,7 @@ qemuDomainGetJobStats(virDomainPtr dom,
virQEMUDriver *driver = dom->conn->privateData; virQEMUDriver *driver = dom->conn->privateData;
virDomainObj *vm; virDomainObj *vm;
qemuDomainObjPrivate *priv; qemuDomainObjPrivate *priv;
g_autoptr(qemuDomainJobInfo) jobInfo = NULL; g_autoptr(virDomainJobData) jobData = NULL;
bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED); bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED);
int ret = -1; int ret = -1;
@ -12627,11 +12640,11 @@ qemuDomainGetJobStats(virDomainPtr dom,
goto cleanup; goto cleanup;
priv = vm->privateData; priv = vm->privateData;
if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobData) < 0)
goto cleanup; goto cleanup;
if (!jobInfo || if (!jobData ||
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_NONE) { jobData->status == VIR_DOMAIN_JOB_STATUS_NONE) {
*type = VIR_DOMAIN_JOB_NONE; *type = VIR_DOMAIN_JOB_NONE;
*params = NULL; *params = NULL;
*nparams = 0; *nparams = 0;
@ -12639,10 +12652,10 @@ qemuDomainGetJobStats(virDomainPtr dom,
goto cleanup; goto cleanup;
} }
ret = qemuDomainJobInfoToParams(jobInfo, type, params, nparams); ret = qemuDomainJobDataToParams(jobData, type, params, nparams);
if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED)) if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED))
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
cleanup: cleanup:
virDomainObjEndAPI(&vm); virDomainObjEndAPI(&vm);
@ -12708,7 +12721,7 @@ static int qemuDomainAbortJob(virDomainPtr dom)
break; break;
case QEMU_ASYNC_JOB_MIGRATION_OUT: case QEMU_ASYNC_JOB_MIGRATION_OUT:
if ((priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || if ((priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY ||
(virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY))) { reason == VIR_DOMAIN_PAUSED_POSTCOPY))) {
virReportError(VIR_ERR_OPERATION_INVALID, "%s", virReportError(VIR_ERR_OPERATION_INVALID, "%s",

View File

@ -1199,7 +1199,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
return -1; return -1;
if (priv->job.abortJob) { if (priv->job.abortJob) {
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob), qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client")); _("canceled by client"));
@ -1622,35 +1622,37 @@ qemuMigrationSrcWaitForSpice(virDomainObj *vm)
static void static void
qemuMigrationUpdateJobType(qemuDomainJobInfo *jobInfo) qemuMigrationUpdateJobType(virDomainJobData *jobData)
{ {
switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) { qemuDomainJobDataPrivate *priv = jobData->privateData;
switch ((qemuMonitorMigrationStatus) priv->stats.mig.status) {
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY: case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY; jobData->status = VIR_DOMAIN_JOB_STATUS_POSTCOPY;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED; jobData->status = VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE; jobData->status = VIR_DOMAIN_JOB_STATUS_NONE;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_ERROR: case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; jobData->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER: case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED; jobData->status = VIR_DOMAIN_JOB_STATUS_PAUSED;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_DEVICE: case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
break; break;
case QEMU_MONITOR_MIGRATION_STATUS_SETUP: case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
@ -1667,11 +1669,12 @@ int
qemuMigrationAnyFetchStats(virQEMUDriver *driver, qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainAsyncJob asyncJob, qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo, virDomainJobData *jobData,
char **error) char **error)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuMonitorMigrationStats stats; qemuMonitorMigrationStats stats;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
int rv; int rv;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
@ -1683,7 +1686,7 @@ qemuMigrationAnyFetchStats(virQEMUDriver *driver,
if (rv < 0) if (rv < 0)
return -1; return -1;
jobInfo->stats.mig = stats; privJob->stats.mig = stats;
return 0; return 0;
} }
@ -1724,41 +1727,42 @@ qemuMigrationJobCheckStatus(virQEMUDriver *driver,
qemuDomainAsyncJob asyncJob) qemuDomainAsyncJob asyncJob)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobInfo *jobInfo = priv->job.current; virDomainJobData *jobData = priv->job.current;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
g_autofree char *error = NULL; g_autofree char *error = NULL;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
if (!events || if (!events ||
jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) { privJob->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0) if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, &error) < 0)
return -1; return -1;
} }
qemuMigrationUpdateJobType(jobInfo); qemuMigrationUpdateJobType(jobData);
switch (jobInfo->status) { switch (jobData->status) {
case QEMU_DOMAIN_JOB_STATUS_NONE: case VIR_DOMAIN_JOB_STATUS_NONE:
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
qemuMigrationJobName(vm), _("is not active")); qemuMigrationJobName(vm), _("is not active"));
return -1; return -1;
case QEMU_DOMAIN_JOB_STATUS_FAILED: case VIR_DOMAIN_JOB_STATUS_FAILED:
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
qemuMigrationJobName(vm), qemuMigrationJobName(vm),
error ? error : _("unexpectedly failed")); error ? error : _("unexpectedly failed"));
return -1; return -1;
case QEMU_DOMAIN_JOB_STATUS_CANCELED: case VIR_DOMAIN_JOB_STATUS_CANCELED:
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuMigrationJobName(vm), _("canceled by client")); qemuMigrationJobName(vm), _("canceled by client"));
return -1; return -1;
case QEMU_DOMAIN_JOB_STATUS_COMPLETED: case VIR_DOMAIN_JOB_STATUS_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_ACTIVE: case VIR_DOMAIN_JOB_STATUS_ACTIVE:
case QEMU_DOMAIN_JOB_STATUS_MIGRATING: case VIR_DOMAIN_JOB_STATUS_MIGRATING:
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
case QEMU_DOMAIN_JOB_STATUS_PAUSED: case VIR_DOMAIN_JOB_STATUS_PAUSED:
break; break;
} }
@ -1789,7 +1793,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
unsigned int flags) unsigned int flags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobInfo *jobInfo = priv->job.current; virDomainJobData *jobData = priv->job.current;
int pauseReason; int pauseReason;
if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0) if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
@ -1819,7 +1823,7 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
* wait again for the real end of the migration. * wait again for the real end of the migration.
*/ */
if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER && if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { jobData->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
VIR_DEBUG("Migration paused before switchover"); VIR_DEBUG("Migration paused before switchover");
return 1; return 1;
} }
@ -1829,38 +1833,38 @@ qemuMigrationAnyCompleted(virQEMUDriver *driver,
* will continue waiting until the migrate state changes to completed. * will continue waiting until the migrate state changes to completed.
*/ */
if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY && if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { jobData->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY) {
VIR_DEBUG("Migration switched to post-copy"); VIR_DEBUG("Migration switched to post-copy");
return 1; return 1;
} }
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) if (jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
return 1; return 1;
else else
return 0; return 0;
error: error:
switch (jobInfo->status) { switch (jobData->status) {
case QEMU_DOMAIN_JOB_STATUS_MIGRATING: case VIR_DOMAIN_JOB_STATUS_MIGRATING:
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: case VIR_DOMAIN_JOB_STATUS_POSTCOPY:
case QEMU_DOMAIN_JOB_STATUS_PAUSED: case VIR_DOMAIN_JOB_STATUS_PAUSED:
/* The migration was aborted by us rather than QEMU itself. */ /* The migration was aborted by us rather than QEMU itself. */
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -2; return -2;
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: case VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED:
/* Something failed after QEMU already finished the migration. */ /* Something failed after QEMU already finished the migration. */
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -1; return -1;
case QEMU_DOMAIN_JOB_STATUS_FAILED: case VIR_DOMAIN_JOB_STATUS_FAILED:
case QEMU_DOMAIN_JOB_STATUS_CANCELED: case VIR_DOMAIN_JOB_STATUS_CANCELED:
/* QEMU aborted the migration. */ /* QEMU aborted the migration. */
return -1; return -1;
case QEMU_DOMAIN_JOB_STATUS_ACTIVE: case VIR_DOMAIN_JOB_STATUS_ACTIVE:
case QEMU_DOMAIN_JOB_STATUS_COMPLETED: case VIR_DOMAIN_JOB_STATUS_COMPLETED:
case QEMU_DOMAIN_JOB_STATUS_NONE: case VIR_DOMAIN_JOB_STATUS_NONE:
/* Impossible. */ /* Impossible. */
break; break;
} }
@ -1880,11 +1884,11 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
unsigned int flags) unsigned int flags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobInfo *jobInfo = priv->job.current; virDomainJobData *jobData = priv->job.current;
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT); bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
int rv; int rv;
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob, while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
dconn, flags)) != 1) { dconn, flags)) != 1) {
@ -1894,7 +1898,7 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
if (events) { if (events) {
if (virDomainObjWait(vm) < 0) { if (virDomainObjWait(vm) < 0) {
if (virDomainObjIsActive(vm)) if (virDomainObjIsActive(vm))
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; jobData->status = VIR_DOMAIN_JOB_STATUS_FAILED;
return -2; return -2;
} }
} else { } else {
@ -1908,17 +1912,17 @@ qemuMigrationSrcWaitForCompletion(virQEMUDriver *driver,
} }
if (events) if (events)
ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL)); ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobData, NULL));
qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobDataUpdateTime(jobData);
qemuDomainJobInfoUpdateDowntime(jobInfo); qemuDomainJobDataUpdateDowntime(jobData);
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
priv->job.completed = qemuDomainJobInfoCopy(jobInfo); priv->job.completed = virDomainJobDataCopy(jobData);
priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
return 0; return 0;
} }
@ -3383,7 +3387,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
virObjectEvent *event; virObjectEvent *event;
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobPrivate *jobPriv = priv->job.privateData; qemuDomainJobPrivate *jobPriv = priv->job.privateData;
qemuDomainJobInfo *jobInfo = NULL; virDomainJobData *jobData = NULL;
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, " VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
"flags=0x%x, retcode=%d", "flags=0x%x, retcode=%d",
@ -3403,13 +3407,15 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
return -1; return -1;
if (retcode == 0) if (retcode == 0)
jobInfo = priv->job.completed; jobData = priv->job.completed;
else else
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
/* Update times with the values sent by the destination daemon */ /* Update times with the values sent by the destination daemon */
if (mig->jobInfo && jobInfo) { if (mig->jobData && jobData) {
int reason; int reason;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
qemuDomainJobDataPrivate *privMigJob = mig->jobData->privateData;
/* We need to refresh migration statistics after a completed post-copy /* We need to refresh migration statistics after a completed post-copy
* migration since priv->job.completed contains obsolete data from the * migration since priv->job.completed contains obsolete data from the
@ -3418,14 +3424,14 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY && reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
jobInfo, NULL) < 0) jobData, NULL) < 0)
VIR_WARN("Could not refresh migration statistics"); VIR_WARN("Could not refresh migration statistics");
qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobDataUpdateTime(jobData);
jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet; jobData->timeDeltaSet = mig->jobData->timeDeltaSet;
jobInfo->timeDelta = mig->jobInfo->timeDelta; jobData->timeDelta = mig->jobData->timeDelta;
jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set; privJob->stats.mig.downtime_set = privMigJob->stats.mig.downtime_set;
jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime; privJob->stats.mig.downtime = privMigJob->stats.mig.downtime;
} }
if (flags & VIR_MIGRATE_OFFLINE) if (flags & VIR_MIGRATE_OFFLINE)
@ -4194,7 +4200,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
/* explicitly do this *after* we entered the monitor, /* explicitly do this *after* we entered the monitor,
* as this is a critical section so we are guaranteed * as this is a critical section so we are guaranteed
* priv->job.abortJob will not change */ * priv->job.abortJob will not change */
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob), qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
_("canceled by client")); _("canceled by client"));
@ -4309,7 +4315,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
* resume it now once we finished all block jobs and wait for the real * resume it now once we finished all block jobs and wait for the real
* end of the migration. * end of the migration.
*/ */
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) { if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
if (qemuMigrationSrcContinue(driver, vm, if (qemuMigrationSrcContinue(driver, vm,
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER, QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
@ -4339,8 +4345,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (priv->job.completed) { if (priv->job.completed) {
priv->job.completed->stopped = priv->job.current->stopped; priv->job.completed->stopped = priv->job.current->stopped;
qemuDomainJobInfoUpdateTime(priv->job.completed); qemuDomainJobDataUpdateTime(priv->job.completed);
qemuDomainJobInfoUpdateDowntime(priv->job.completed); qemuDomainJobDataUpdateDowntime(priv->job.completed);
ignore_value(virTimeMillisNow(&priv->job.completed->sent)); ignore_value(virTimeMillisNow(&priv->job.completed->sent));
} }
@ -4370,7 +4376,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
if (cancel && if (cancel &&
priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED && priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED &&
qemuDomainObjEnterMonitorAsync(driver, vm, qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) { QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
qemuMonitorMigrateCancel(priv->mon); qemuMonitorMigrateCancel(priv->mon);
@ -4385,8 +4391,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT); qemuMigrationSrcCancelRemoveTempBitmaps(vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED) if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
} }
if (iothread) if (iothread)
@ -5620,7 +5626,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
unsigned short port; unsigned short port;
unsigned long long timeReceived = 0; unsigned long long timeReceived = 0;
virObjectEvent *event; virObjectEvent *event;
qemuDomainJobInfo *jobInfo = NULL; virDomainJobData *jobData = NULL;
bool inPostCopy = false; bool inPostCopy = false;
bool doKill = true; bool doKill = true;
@ -5644,7 +5650,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
: QEMU_MIGRATION_PHASE_FINISH2); : QEMU_MIGRATION_PHASE_FINISH2);
qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup); qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK | cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
QEMU_MIGRATION_COOKIE_STATS | QEMU_MIGRATION_COOKIE_STATS |
@ -5736,7 +5742,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
goto endjob; goto endjob;
} }
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
inPostCopy = true; inPostCopy = true;
if (!(flags & VIR_MIGRATE_PAUSED)) { if (!(flags & VIR_MIGRATE_PAUSED)) {
@ -5772,16 +5778,16 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
doKill = false; doKill = false;
} }
if (mig->jobInfo) { if (mig->jobData) {
jobInfo = g_steal_pointer(&mig->jobInfo); jobData = g_steal_pointer(&mig->jobData);
if (jobInfo->sent && timeReceived) { if (jobData->sent && timeReceived) {
jobInfo->timeDelta = timeReceived - jobInfo->sent; jobData->timeDelta = timeReceived - jobData->sent;
jobInfo->received = timeReceived; jobData->received = timeReceived;
jobInfo->timeDeltaSet = true; jobData->timeDeltaSet = true;
} }
qemuDomainJobInfoUpdateTime(jobInfo); qemuDomainJobDataUpdateTime(jobData);
qemuDomainJobInfoUpdateDowntime(jobInfo); qemuDomainJobDataUpdateDowntime(jobData);
} }
if (inPostCopy) { if (inPostCopy) {
@ -5846,10 +5852,12 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
} }
if (dom) { if (dom) {
if (jobInfo) { if (jobData) {
priv->job.completed = g_steal_pointer(&jobInfo); qemuDomainJobDataPrivate *privJob = jobData->privateData;
priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; priv->job.completed = g_steal_pointer(&jobData);
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
} }
if (qemuMigrationCookieFormat(mig, driver, vm, if (qemuMigrationCookieFormat(mig, driver, vm,
@ -5862,7 +5870,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
* is obsolete anyway. * is obsolete anyway.
*/ */
if (inPostCopy) if (inPostCopy)
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree); g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
} }
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
@ -5873,7 +5881,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
qemuDomainRemoveInactiveJob(driver, vm); qemuDomainRemoveInactiveJob(driver, vm);
cleanup: cleanup:
g_clear_pointer(&jobInfo, qemuDomainJobInfoFree); g_clear_pointer(&jobData, virDomainJobDataFree);
virPortAllocatorRelease(port); virPortAllocatorRelease(port);
if (priv->mon) if (priv->mon)
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL); qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
@ -6091,6 +6099,7 @@ qemuMigrationJobStart(virQEMUDriver *driver,
unsigned long apiFlags) unsigned long apiFlags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = priv->job.current->privateData;
virDomainJobOperation op; virDomainJobOperation op;
unsigned long long mask; unsigned long long mask;
@ -6107,7 +6116,7 @@ qemuMigrationJobStart(virQEMUDriver *driver,
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
return -1; return -1;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
qemuDomainObjSetAsyncJobMask(vm, mask); qemuDomainObjSetAsyncJobMask(vm, mask);
return 0; return 0;
@ -6227,13 +6236,14 @@ int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver, qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainAsyncJob asyncJob, qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo) virDomainJobData *jobData)
{ {
size_t i; size_t i;
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = jobData->privateData;
bool nbd = false; bool nbd = false;
g_autoptr(GHashTable) blockinfo = NULL; g_autoptr(GHashTable) blockinfo = NULL;
qemuDomainMirrorStats *stats = &jobInfo->mirrorStats; qemuDomainMirrorStats *stats = &privJob->mirrorStats;
for (i = 0; i < vm->def->ndisks; i++) { for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDef *disk = vm->def->disks[i]; virDomainDiskDef *disk = vm->def->disks[i];

View File

@ -221,7 +221,7 @@ int
qemuMigrationAnyFetchStats(virQEMUDriver *driver, qemuMigrationAnyFetchStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainAsyncJob asyncJob, qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo, virDomainJobData *jobData,
char **error); char **error);
int int
@ -258,4 +258,4 @@ int
qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver, qemuMigrationSrcFetchMirrorStats(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
qemuDomainAsyncJob asyncJob, qemuDomainAsyncJob asyncJob,
qemuDomainJobInfo *jobInfo); virDomainJobData *jobData);

View File

@ -166,7 +166,7 @@ qemuMigrationCookieFree(qemuMigrationCookie *mig)
g_free(mig->name); g_free(mig->name);
g_free(mig->lockState); g_free(mig->lockState);
g_free(mig->lockDriver); g_free(mig->lockDriver);
g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree); g_clear_pointer(&mig->jobData, virDomainJobDataFree);
virCPUDefFree(mig->cpu); virCPUDefFree(mig->cpu);
qemuMigrationCookieCapsFree(mig->caps); qemuMigrationCookieCapsFree(mig->caps);
if (mig->blockDirtyBitmaps) if (mig->blockDirtyBitmaps)
@ -539,8 +539,8 @@ qemuMigrationCookieAddStatistics(qemuMigrationCookie *mig,
if (!priv->job.completed) if (!priv->job.completed)
return 0; return 0;
g_clear_pointer(&mig->jobInfo, qemuDomainJobInfoFree); g_clear_pointer(&mig->jobData, virDomainJobDataFree);
mig->jobInfo = qemuDomainJobInfoCopy(priv->job.completed); mig->jobData = virDomainJobDataCopy(priv->job.completed);
mig->flags |= QEMU_MIGRATION_COOKIE_STATS; mig->flags |= QEMU_MIGRATION_COOKIE_STATS;
@ -640,22 +640,23 @@ qemuMigrationCookieNetworkXMLFormat(virBuffer *buf,
static void static void
qemuMigrationCookieStatisticsXMLFormat(virBuffer *buf, qemuMigrationCookieStatisticsXMLFormat(virBuffer *buf,
qemuDomainJobInfo *jobInfo) virDomainJobData *jobData)
{ {
qemuMonitorMigrationStats *stats = &jobInfo->stats.mig; qemuDomainJobDataPrivate *priv = jobData->privateData;
qemuMonitorMigrationStats *stats = &priv->stats.mig;
virBufferAddLit(buf, "<statistics>\n"); virBufferAddLit(buf, "<statistics>\n");
virBufferAdjustIndent(buf, 2); virBufferAdjustIndent(buf, 2);
virBufferAsprintf(buf, "<started>%llu</started>\n", jobInfo->started); virBufferAsprintf(buf, "<started>%llu</started>\n", jobData->started);
virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobInfo->stopped); virBufferAsprintf(buf, "<stopped>%llu</stopped>\n", jobData->stopped);
virBufferAsprintf(buf, "<sent>%llu</sent>\n", jobInfo->sent); virBufferAsprintf(buf, "<sent>%llu</sent>\n", jobData->sent);
if (jobInfo->timeDeltaSet) if (jobData->timeDeltaSet)
virBufferAsprintf(buf, "<delta>%lld</delta>\n", jobInfo->timeDelta); virBufferAsprintf(buf, "<delta>%lld</delta>\n", jobData->timeDelta);
virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
VIR_DOMAIN_JOB_TIME_ELAPSED, VIR_DOMAIN_JOB_TIME_ELAPSED,
jobInfo->timeElapsed); jobData->timeElapsed);
if (stats->downtime_set) if (stats->downtime_set)
virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n", virBufferAsprintf(buf, "<%1$s>%2$llu</%1$s>\n",
VIR_DOMAIN_JOB_DOWNTIME, VIR_DOMAIN_JOB_DOWNTIME,
@ -892,8 +893,8 @@ qemuMigrationCookieXMLFormat(virQEMUDriver *driver,
if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd) if ((mig->flags & QEMU_MIGRATION_COOKIE_NBD) && mig->nbd)
qemuMigrationCookieNBDXMLFormat(mig->nbd, buf); qemuMigrationCookieNBDXMLFormat(mig->nbd, buf);
if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo) if (mig->flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData)
qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobInfo); qemuMigrationCookieStatisticsXMLFormat(buf, mig->jobData);
if (mig->flags & QEMU_MIGRATION_COOKIE_CPU && mig->cpu) if (mig->flags & QEMU_MIGRATION_COOKIE_CPU && mig->cpu)
virCPUDefFormatBufFull(buf, mig->cpu, NULL); virCPUDefFormatBufFull(buf, mig->cpu, NULL);
@ -1039,29 +1040,30 @@ qemuMigrationCookieNBDXMLParse(xmlXPathContextPtr ctxt)
} }
static qemuDomainJobInfo * static virDomainJobData *
qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
{ {
qemuDomainJobInfo *jobInfo = NULL; virDomainJobData *jobData = NULL;
qemuMonitorMigrationStats *stats; qemuMonitorMigrationStats *stats;
qemuDomainJobDataPrivate *priv = NULL;
VIR_XPATH_NODE_AUTORESTORE(ctxt) VIR_XPATH_NODE_AUTORESTORE(ctxt)
if (!(ctxt->node = virXPathNode("./statistics", ctxt))) if (!(ctxt->node = virXPathNode("./statistics", ctxt)))
return NULL; return NULL;
jobInfo = g_new0(qemuDomainJobInfo, 1); jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
priv = jobData->privateData;
stats = &priv->stats.mig;
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
stats = &jobInfo->stats.mig; virXPathULongLong("string(./started[1])", ctxt, &jobData->started);
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; virXPathULongLong("string(./stopped[1])", ctxt, &jobData->stopped);
virXPathULongLong("string(./sent[1])", ctxt, &jobData->sent);
virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started); if (virXPathLongLong("string(./delta[1])", ctxt, &jobData->timeDelta) == 0)
virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped); jobData->timeDeltaSet = true;
virXPathULongLong("string(./sent[1])", ctxt, &jobInfo->sent);
if (virXPathLongLong("string(./delta[1])", ctxt, &jobInfo->timeDelta) == 0)
jobInfo->timeDeltaSet = true;
virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_ELAPSED "[1])", virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_ELAPSED "[1])",
ctxt, &jobInfo->timeElapsed); ctxt, &jobData->timeElapsed);
if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])", if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])",
ctxt, &stats->downtime) == 0) ctxt, &stats->downtime) == 0)
@ -1121,7 +1123,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
virXPathInt("string(./" VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE "[1])", virXPathInt("string(./" VIR_DOMAIN_JOB_AUTO_CONVERGE_THROTTLE "[1])",
ctxt, &stats->cpu_throttle_percentage); ctxt, &stats->cpu_throttle_percentage);
return jobInfo; return jobData;
} }
@ -1393,7 +1395,7 @@ qemuMigrationCookieXMLParse(qemuMigrationCookie *mig,
if (flags & QEMU_MIGRATION_COOKIE_STATS && if (flags & QEMU_MIGRATION_COOKIE_STATS &&
virXPathBoolean("boolean(./statistics)", ctxt) && virXPathBoolean("boolean(./statistics)", ctxt) &&
(!(mig->jobInfo = qemuMigrationCookieStatisticsXMLParse(ctxt)))) (!(mig->jobData = qemuMigrationCookieStatisticsXMLParse(ctxt))))
return -1; return -1;
if (flags & QEMU_MIGRATION_COOKIE_CPU && if (flags & QEMU_MIGRATION_COOKIE_CPU &&
@ -1554,8 +1556,8 @@ qemuMigrationCookieParse(virQEMUDriver *driver,
} }
} }
if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobInfo && priv->job.current) if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && priv->job.current)
mig->jobInfo->operation = priv->job.current->operation; mig->jobData->operation = priv->job.current->operation;
return g_steal_pointer(&mig); return g_steal_pointer(&mig);
} }

View File

@ -162,7 +162,7 @@ struct _qemuMigrationCookie {
qemuMigrationCookieNBD *nbd; qemuMigrationCookieNBD *nbd;
/* If (flags & QEMU_MIGRATION_COOKIE_STATS) */ /* If (flags & QEMU_MIGRATION_COOKIE_STATS) */
qemuDomainJobInfo *jobInfo; virDomainJobData *jobData;
/* If flags & QEMU_MIGRATION_COOKIE_CPU */ /* If flags & QEMU_MIGRATION_COOKIE_CPU */
virCPUDef *cpu; virCPUDef *cpu;

View File

@ -651,7 +651,7 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING && if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
!priv->pausedShutdown) { !priv->pausedShutdown) {
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) { if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) {
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
reason = VIR_DOMAIN_PAUSED_POSTCOPY; reason = VIR_DOMAIN_PAUSED_POSTCOPY;
else else
reason = VIR_DOMAIN_PAUSED_MIGRATION; reason = VIR_DOMAIN_PAUSED_MIGRATION;
@ -1545,6 +1545,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
void *opaque) void *opaque)
{ {
qemuDomainObjPrivate *priv; qemuDomainObjPrivate *priv;
qemuDomainJobDataPrivate *privJob = NULL;
virQEMUDriver *driver = opaque; virQEMUDriver *driver = opaque;
virObjectEvent *event = NULL; virObjectEvent *event = NULL;
int reason; int reason;
@ -1561,7 +1562,9 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
goto cleanup; goto cleanup;
} }
priv->job.current->stats.mig.status = status; privJob = priv->job.current->privateData;
privJob->stats.mig.status = status;
virDomainObjBroadcast(vm); virDomainObjBroadcast(vm);
if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY && if (status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY &&
@ -1623,6 +1626,7 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
{ {
qemuDomainObjPrivate *priv; qemuDomainObjPrivate *priv;
qemuDomainJobPrivate *jobPriv; qemuDomainJobPrivate *jobPriv;
qemuDomainJobDataPrivate *privJobCurrent = NULL;
virObjectLock(vm); virObjectLock(vm);
@ -1631,18 +1635,19 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
priv = vm->privateData; priv = vm->privateData;
jobPriv = priv->job.privateData; jobPriv = priv->job.privateData;
privJobCurrent = priv->job.current->privateData;
if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) { if (priv->job.asyncJob == QEMU_ASYNC_JOB_NONE) {
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job"); VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
goto cleanup; goto cleanup;
} }
jobPriv->dumpCompleted = true; jobPriv->dumpCompleted = true;
priv->job.current->stats.dump = *stats; privJobCurrent->stats.dump = *stats;
priv->job.error = g_strdup(error); priv->job.error = g_strdup(error);
/* Force error if extracting the DUMP_COMPLETED status failed */ /* Force error if extracting the DUMP_COMPLETED status failed */
if (!error && status < 0) { if (!error && status < 0) {
priv->job.error = g_strdup(virGetLastErrorMessage()); priv->job.error = g_strdup(virGetLastErrorMessage());
priv->job.current->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED; privJobCurrent->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED;
} }
virDomainObjBroadcast(vm); virDomainObjBroadcast(vm);
@ -3592,6 +3597,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
unsigned int *stopFlags) unsigned int *stopFlags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privDataJobCurrent = NULL;
virDomainState state; virDomainState state;
int reason; int reason;
unsigned long long now; unsigned long long now;
@ -3659,10 +3665,12 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
/* We reset the job parameters for backup so that the job will look /* We reset the job parameters for backup so that the job will look
* active. This is possible because we are able to recover the state * active. This is possible because we are able to recover the state
* of blockjobs and also the backup job allows all sub-job types */ * of blockjobs and also the backup job allows all sub-job types */
priv->job.current = g_new0(qemuDomainJobInfo, 1); priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
privDataJobCurrent = priv->job.current->privateData;
priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; privDataJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
priv->job.current->started = now; priv->job.current->started = now;
break; break;
@ -8311,7 +8319,7 @@ void qemuProcessStop(virQEMUDriver *driver,
/* clean up a possible backup job */ /* clean up a possible backup job */
if (priv->backup) if (priv->backup)
qemuBackupJobTerminate(vm, QEMU_DOMAIN_JOB_STATUS_CANCELED); qemuBackupJobTerminate(vm, VIR_DOMAIN_JOB_STATUS_CANCELED);
/* Do this explicitly after vm->pid is reset so that security drivers don't /* Do this explicitly after vm->pid is reset so that security drivers don't
* try to enter the domain's namespace which is non-existent by now as qemu * try to enter the domain's namespace which is non-existent by now as qemu

View File

@ -1414,11 +1414,13 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
/* do the memory snapshot if necessary */ /* do the memory snapshot if necessary */
if (memory) { if (memory) {
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
/* check if migration is possible */ /* check if migration is possible */
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup; goto cleanup;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP;
/* allow the migration job to be cancelled or the domain to be paused */ /* allow the migration job to be cancelled or the domain to be paused */
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |