qemu: Restore async job start timestamp on reconnect

Jobs that are supposed to remain active even when libvirt daemon
restarts were reported as started at the time the daemon was restarted.
This is not very helpful, we should restore the original timestamp.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
This commit is contained in:
Jiri Denemark 2022-05-13 14:15:00 +02:00
parent 013d3091e0
commit 7c36e5004c
8 changed files with 25 additions and 11 deletions

View File

@ -235,6 +235,7 @@ qemuDomainObjPreserveJob(virDomainObj *obj,
job->owner = priv->job.owner;
job->asyncJob = priv->job.asyncJob;
job->asyncOwner = priv->job.asyncOwner;
job->asyncStarted = priv->job.asyncStarted;
job->phase = priv->job.phase;
job->privateData = g_steal_pointer(&priv->job.privateData);
job->apiFlags = priv->job.apiFlags;
@ -254,6 +255,7 @@ void
qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
virDomainAsyncJob asyncJob,
int phase,
unsigned long long started,
virDomainJobOperation operation,
qemuDomainJobStatsType statsType,
virDomainJobStatus status,
@ -261,18 +263,18 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
{
qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobObj *job = &priv->job;
unsigned long long now;
VIR_DEBUG("Restoring %s async job for domain %s",
virDomainAsyncJobTypeToString(asyncJob), vm->def->name);
ignore_value(virTimeMillisNow(&now));
if (started == 0)
ignore_value(virTimeMillisNow(&started));
job->jobsQueued++;
job->asyncJob = asyncJob;
job->phase = phase;
job->asyncOwnerAPI = g_strdup(virThreadJobGet());
job->asyncStarted = now;
job->asyncStarted = started;
qemuDomainObjSetAsyncJobMask(vm, allowedJobs);
@ -280,7 +282,7 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
qemuDomainJobSetStatsType(priv->job.current, statsType);
job->current->operation = operation;
job->current->status = status;
job->current->started = now;
job->current->started = started;
}
@ -1250,8 +1252,10 @@ qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
priv->job.phase));
}
if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE)
if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE) {
virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", priv->job.asyncStarted);
}
if (priv->job.cb &&
priv->job.cb->formatJob(&childBuf, &priv->job, vm) < 0)
@ -1307,6 +1311,13 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
}
VIR_FREE(tmp);
}
if (virXPathULongLong("string(@asyncStarted)", ctxt,
&priv->job.asyncStarted) == -2) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Invalid async job start"));
return -1;
}
}
if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) {

View File

@ -164,6 +164,7 @@ void
qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
virDomainAsyncJob asyncJob,
int phase,
unsigned long long started,
virDomainJobOperation operation,
qemuDomainJobStatsType statsType,
virDomainJobStatus status,

View File

@ -3402,7 +3402,8 @@ qemuProcessRestoreMigrationJob(virDomainObj *vm,
allowedJobs = VIR_JOB_DEFAULT_MASK | JOB_MASK(VIR_JOB_MIGRATION_OP);
}
qemuDomainObjRestoreAsyncJob(vm, job->asyncJob, job->phase, op,
qemuDomainObjRestoreAsyncJob(vm, job->asyncJob, job->phase,
job->asyncStarted, op,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
VIR_DOMAIN_JOB_STATUS_PAUSED,
allowedJobs);
@ -3675,6 +3676,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
case VIR_ASYNC_JOB_BACKUP:
/* Restore the config of the async job which is not persisted */
qemuDomainObjRestoreAsyncJob(vm, VIR_ASYNC_JOB_BACKUP, 0,
job->asyncStarted,
VIR_DOMAIN_JOB_OPERATION_BACKUP,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP,
VIR_DOMAIN_JOB_STATUS_ACTIVE,

View File

@ -238,7 +238,7 @@
<flag name='dump-completed'/>
<flag name='hda-output'/>
</qemuCaps>
<job type='none' async='migration in' phase='prepare' flags='0x900'>
<job type='none' async='migration in' phase='prepare' flags='0x900' asyncStarted='0'>
<migParams>
<param name='compress-level' value='1'/>
<param name='compress-threads' value='8'/>

View File

@ -262,7 +262,7 @@
<flag name='cpu-max'/>
<flag name='migration-param.block-bitmap-mapping'/>
</qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x42'>
<job type='none' async='migration out' phase='perform3' flags='0x42' asyncStarted='0'>
<disk dev='hda' migrating='no'/>
<disk dev='vda' migrating='yes'>
<migrationSource type='network' format='raw'>

View File

@ -231,7 +231,7 @@
<flag name='dump-completed'/>
<flag name='hda-output'/>
</qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x0'>
<job type='none' async='migration out' phase='perform3' flags='0x0' asyncStarted='0'>
<disk dev='vdb' migrating='yes'/>
<disk dev='hda' migrating='no'/>
</job>

View File

@ -235,7 +235,7 @@
<flag name='nbd-tls'/>
<flag name='blockdev-del'/>
</qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x0'>
<job type='none' async='migration out' phase='perform3' flags='0x0' asyncStarted='0'>
<disk dev='vdb' migrating='yes'>
<migrationSource type='network' format='raw'>
<source protocol='nbd' name='drive-virtio-disk1' tlsFromConfig='0'>

View File

@ -238,7 +238,7 @@
<flag name='dump-completed'/>
<flag name='hda-output'/>
</qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x802'>
<job type='none' async='migration out' phase='perform3' flags='0x802' asyncStarted='0'>
<disk dev='vda' migrating='no'/>
<migParams>
<param name='compress-level' value='1'/>