qemu: make separate function for setting statsType of privateData

We only need to set statsType in almost every case of setting
something from private data, so it seems unnecessary to pull
privateData out of current / completed job for just this one
thing every time. I think this patch keeps the code cleaner
without variables used just once.

Signed-off-by: Kristina Hanicova <khanicov@redhat.com>
Reviewed-by: Jiri Denemark <jdenemar@redhat.com>
This commit is contained in:
Kristina Hanicova 2022-02-11 14:49:06 +01:00 committed by Jiri Denemark
parent f304de0df6
commit 0301db44e2
7 changed files with 29 additions and 21 deletions

View File

@ -745,7 +745,6 @@ qemuBackupBegin(virDomainObj *vm,
unsigned int flags) unsigned int flags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privData = priv->job.current->privateData;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver); g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(priv->driver);
g_autoptr(virDomainBackupDef) def = NULL; g_autoptr(virDomainBackupDef) def = NULL;
g_autofree char *suffix = NULL; g_autofree char *suffix = NULL;
@ -799,7 +798,8 @@ qemuBackupBegin(virDomainObj *vm,
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |
JOB_MASK(QEMU_JOB_SUSPEND) | JOB_MASK(QEMU_JOB_SUSPEND) |
JOB_MASK(QEMU_JOB_MODIFY))); JOB_MASK(QEMU_JOB_MODIFY)));
privData->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP; qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
if (!virDomainObjIsActive(vm)) { if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",

View File

@ -95,6 +95,16 @@ virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks = {
}; };
void
qemuDomainJobSetStatsType(virDomainJobData *jobData,
qemuDomainJobStatsType type)
{
qemuDomainJobDataPrivate *privData = jobData->privateData;
privData->statsType = type;
}
const char * const char *
qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase G_GNUC_UNUSED) int phase G_GNUC_UNUSED)

View File

@ -174,6 +174,9 @@ struct _qemuDomainJobObj {
qemuDomainObjPrivateJobCallbacks *cb; qemuDomainObjPrivateJobCallbacks *cb;
}; };
void qemuDomainJobSetStatsType(virDomainJobData *jobData,
qemuDomainJobStatsType type);
const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job, const char *qemuDomainAsyncJobPhaseToString(qemuDomainAsyncJob job,
int phase); int phase);
int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job, int qemuDomainAsyncJobPhaseFromString(qemuDomainAsyncJob job,

View File

@ -2637,7 +2637,6 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
int ret = -1; int ret = -1;
virObjectEvent *event = NULL; virObjectEvent *event = NULL;
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
virQEMUSaveData *data = NULL; virQEMUSaveData *data = NULL;
g_autoptr(qemuDomainSaveCookie) cookie = NULL; g_autoptr(qemuDomainSaveCookie) cookie = NULL;
@ -2654,7 +2653,8 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
goto endjob; goto endjob;
} }
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
/* Pause */ /* Pause */
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@ -2995,9 +2995,8 @@ qemuDumpToFd(virQEMUDriver *driver,
return -1; return -1;
if (detach) { if (detach) {
qemuDomainJobDataPrivate *privStats = priv->job.current->privateData; qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP);
privStats->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP;
} else { } else {
g_clear_pointer(&priv->job.current, virDomainJobDataFree); g_clear_pointer(&priv->job.current, virDomainJobDataFree);
} }
@ -3135,7 +3134,6 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
virQEMUDriver *driver = dom->conn->privateData; virQEMUDriver *driver = dom->conn->privateData;
virDomainObj *vm; virDomainObj *vm;
qemuDomainObjPrivate *priv = NULL; qemuDomainObjPrivate *priv = NULL;
qemuDomainJobDataPrivate *privJobCurrent = NULL;
bool resume = false, paused = false; bool resume = false, paused = false;
int ret = -1; int ret = -1;
virObjectEvent *event = NULL; virObjectEvent *event = NULL;
@ -3160,8 +3158,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
goto endjob; goto endjob;
priv = vm->privateData; priv = vm->privateData;
privJobCurrent = priv->job.current->privateData; qemuDomainJobSetStatsType(priv->job.current,
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
/* Migrate will always stop the VM, so the resume condition is /* Migrate will always stop the VM, so the resume condition is
independent of whether the stop command is issued. */ independent of whether the stop command is issued. */

View File

@ -5853,11 +5853,10 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
if (dom) { if (dom) {
if (jobData) { if (jobData) {
qemuDomainJobDataPrivate *privJob = jobData->privateData;
priv->job.completed = g_steal_pointer(&jobData); priv->job.completed = g_steal_pointer(&jobData);
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED; priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; qemuDomainJobSetStatsType(jobData,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION);
} }
if (qemuMigrationCookieFormat(mig, driver, vm, if (qemuMigrationCookieFormat(mig, driver, vm,
@ -6099,7 +6098,6 @@ qemuMigrationJobStart(virQEMUDriver *driver,
unsigned long apiFlags) unsigned long apiFlags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privJob = priv->job.current->privateData;
virDomainJobOperation op; virDomainJobOperation op;
unsigned long long mask; unsigned long long mask;
@ -6116,7 +6114,8 @@ qemuMigrationJobStart(virQEMUDriver *driver,
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0) if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
return -1; return -1;
privJob->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION; qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION);
qemuDomainObjSetAsyncJobMask(vm, mask); qemuDomainObjSetAsyncJobMask(vm, mask);
return 0; return 0;

View File

@ -3597,7 +3597,6 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
unsigned int *stopFlags) unsigned int *stopFlags)
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobDataPrivate *privDataJobCurrent = NULL;
virDomainState state; virDomainState state;
int reason; int reason;
unsigned long long now; unsigned long long now;
@ -3666,10 +3665,10 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
* active. This is possible because we are able to recover the state * active. This is possible because we are able to recover the state
* of blockjobs and also the backup job allows all sub-job types */ * of blockjobs and also the backup job allows all sub-job types */
priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks); priv->job.current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
privDataJobCurrent = priv->job.current->privateData;
qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP; priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_BACKUP;
privDataJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP;
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE; priv->job.current->status = VIR_DOMAIN_JOB_STATUS_ACTIVE;
priv->job.current->started = now; priv->job.current->started = now;
break; break;

View File

@ -1414,13 +1414,12 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
/* do the memory snapshot if necessary */ /* do the memory snapshot if necessary */
if (memory) { if (memory) {
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
/* check if migration is possible */ /* check if migration is possible */
if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0)) if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup; goto cleanup;
privJobCurrent->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP; qemuDomainJobSetStatsType(priv->job.current,
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
/* allow the migration job to be cancelled or the domain to be paused */ /* allow the migration job to be cancelled or the domain to be paused */
qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK | qemuDomainObjSetAsyncJobMask(vm, (QEMU_JOB_DEFAULT_MASK |