qemu: Restore async job start timestamp on reconnect

Jobs that are supposed to remain active even when libvirt daemon
restarts were reported as started at the time the daemon was restarted.
This is not very helpful, we should restore the original timestamp.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
This commit is contained in:
Jiri Denemark 2022-05-13 14:15:00 +02:00
parent 013d3091e0
commit 7c36e5004c
8 changed files with 25 additions and 11 deletions

View File

@ -235,6 +235,7 @@ qemuDomainObjPreserveJob(virDomainObj *obj,
job->owner = priv->job.owner; job->owner = priv->job.owner;
job->asyncJob = priv->job.asyncJob; job->asyncJob = priv->job.asyncJob;
job->asyncOwner = priv->job.asyncOwner; job->asyncOwner = priv->job.asyncOwner;
job->asyncStarted = priv->job.asyncStarted;
job->phase = priv->job.phase; job->phase = priv->job.phase;
job->privateData = g_steal_pointer(&priv->job.privateData); job->privateData = g_steal_pointer(&priv->job.privateData);
job->apiFlags = priv->job.apiFlags; job->apiFlags = priv->job.apiFlags;
@ -254,6 +255,7 @@ void
qemuDomainObjRestoreAsyncJob(virDomainObj *vm, qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
virDomainAsyncJob asyncJob, virDomainAsyncJob asyncJob,
int phase, int phase,
unsigned long long started,
virDomainJobOperation operation, virDomainJobOperation operation,
qemuDomainJobStatsType statsType, qemuDomainJobStatsType statsType,
virDomainJobStatus status, virDomainJobStatus status,
@ -261,18 +263,18 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
{ {
qemuDomainObjPrivate *priv = vm->privateData; qemuDomainObjPrivate *priv = vm->privateData;
qemuDomainJobObj *job = &priv->job; qemuDomainJobObj *job = &priv->job;
unsigned long long now;
VIR_DEBUG("Restoring %s async job for domain %s", VIR_DEBUG("Restoring %s async job for domain %s",
virDomainAsyncJobTypeToString(asyncJob), vm->def->name); virDomainAsyncJobTypeToString(asyncJob), vm->def->name);
ignore_value(virTimeMillisNow(&now)); if (started == 0)
ignore_value(virTimeMillisNow(&started));
job->jobsQueued++; job->jobsQueued++;
job->asyncJob = asyncJob; job->asyncJob = asyncJob;
job->phase = phase; job->phase = phase;
job->asyncOwnerAPI = g_strdup(virThreadJobGet()); job->asyncOwnerAPI = g_strdup(virThreadJobGet());
job->asyncStarted = now; job->asyncStarted = started;
qemuDomainObjSetAsyncJobMask(vm, allowedJobs); qemuDomainObjSetAsyncJobMask(vm, allowedJobs);
@ -280,7 +282,7 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
qemuDomainJobSetStatsType(priv->job.current, statsType); qemuDomainJobSetStatsType(priv->job.current, statsType);
job->current->operation = operation; job->current->operation = operation;
job->current->status = status; job->current->status = status;
job->current->started = now; job->current->started = started;
} }
@ -1250,8 +1252,10 @@ qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
priv->job.phase)); priv->job.phase));
} }
if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE) if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE) {
virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags); virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", priv->job.asyncStarted);
}
if (priv->job.cb && if (priv->job.cb &&
priv->job.cb->formatJob(&childBuf, &priv->job, vm) < 0) priv->job.cb->formatJob(&childBuf, &priv->job, vm) < 0)
@ -1307,6 +1311,13 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
} }
VIR_FREE(tmp); VIR_FREE(tmp);
} }
if (virXPathULongLong("string(@asyncStarted)", ctxt,
&priv->job.asyncStarted) == -2) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Invalid async job start"));
return -1;
}
} }
if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) { if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) {

View File

@ -164,6 +164,7 @@ void
qemuDomainObjRestoreAsyncJob(virDomainObj *vm, qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
virDomainAsyncJob asyncJob, virDomainAsyncJob asyncJob,
int phase, int phase,
unsigned long long started,
virDomainJobOperation operation, virDomainJobOperation operation,
qemuDomainJobStatsType statsType, qemuDomainJobStatsType statsType,
virDomainJobStatus status, virDomainJobStatus status,

View File

@ -3402,7 +3402,8 @@ qemuProcessRestoreMigrationJob(virDomainObj *vm,
allowedJobs = VIR_JOB_DEFAULT_MASK | JOB_MASK(VIR_JOB_MIGRATION_OP); allowedJobs = VIR_JOB_DEFAULT_MASK | JOB_MASK(VIR_JOB_MIGRATION_OP);
} }
qemuDomainObjRestoreAsyncJob(vm, job->asyncJob, job->phase, op, qemuDomainObjRestoreAsyncJob(vm, job->asyncJob, job->phase,
job->asyncStarted, op,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION, QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
VIR_DOMAIN_JOB_STATUS_PAUSED, VIR_DOMAIN_JOB_STATUS_PAUSED,
allowedJobs); allowedJobs);
@ -3675,6 +3676,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
case VIR_ASYNC_JOB_BACKUP: case VIR_ASYNC_JOB_BACKUP:
/* Restore the config of the async job which is not persisted */ /* Restore the config of the async job which is not persisted */
qemuDomainObjRestoreAsyncJob(vm, VIR_ASYNC_JOB_BACKUP, 0, qemuDomainObjRestoreAsyncJob(vm, VIR_ASYNC_JOB_BACKUP, 0,
job->asyncStarted,
VIR_DOMAIN_JOB_OPERATION_BACKUP, VIR_DOMAIN_JOB_OPERATION_BACKUP,
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP, QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP,
VIR_DOMAIN_JOB_STATUS_ACTIVE, VIR_DOMAIN_JOB_STATUS_ACTIVE,

View File

@ -238,7 +238,7 @@
<flag name='dump-completed'/> <flag name='dump-completed'/>
<flag name='hda-output'/> <flag name='hda-output'/>
</qemuCaps> </qemuCaps>
<job type='none' async='migration in' phase='prepare' flags='0x900'> <job type='none' async='migration in' phase='prepare' flags='0x900' asyncStarted='0'>
<migParams> <migParams>
<param name='compress-level' value='1'/> <param name='compress-level' value='1'/>
<param name='compress-threads' value='8'/> <param name='compress-threads' value='8'/>

View File

@ -262,7 +262,7 @@
<flag name='cpu-max'/> <flag name='cpu-max'/>
<flag name='migration-param.block-bitmap-mapping'/> <flag name='migration-param.block-bitmap-mapping'/>
</qemuCaps> </qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x42'> <job type='none' async='migration out' phase='perform3' flags='0x42' asyncStarted='0'>
<disk dev='hda' migrating='no'/> <disk dev='hda' migrating='no'/>
<disk dev='vda' migrating='yes'> <disk dev='vda' migrating='yes'>
<migrationSource type='network' format='raw'> <migrationSource type='network' format='raw'>

View File

@ -231,7 +231,7 @@
<flag name='dump-completed'/> <flag name='dump-completed'/>
<flag name='hda-output'/> <flag name='hda-output'/>
</qemuCaps> </qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x0'> <job type='none' async='migration out' phase='perform3' flags='0x0' asyncStarted='0'>
<disk dev='vdb' migrating='yes'/> <disk dev='vdb' migrating='yes'/>
<disk dev='hda' migrating='no'/> <disk dev='hda' migrating='no'/>
</job> </job>

View File

@ -235,7 +235,7 @@
<flag name='nbd-tls'/> <flag name='nbd-tls'/>
<flag name='blockdev-del'/> <flag name='blockdev-del'/>
</qemuCaps> </qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x0'> <job type='none' async='migration out' phase='perform3' flags='0x0' asyncStarted='0'>
<disk dev='vdb' migrating='yes'> <disk dev='vdb' migrating='yes'>
<migrationSource type='network' format='raw'> <migrationSource type='network' format='raw'>
<source protocol='nbd' name='drive-virtio-disk1' tlsFromConfig='0'> <source protocol='nbd' name='drive-virtio-disk1' tlsFromConfig='0'>

View File

@ -238,7 +238,7 @@
<flag name='dump-completed'/> <flag name='dump-completed'/>
<flag name='hda-output'/> <flag name='hda-output'/>
</qemuCaps> </qemuCaps>
<job type='none' async='migration out' phase='perform3' flags='0x802'> <job type='none' async='migration out' phase='perform3' flags='0x802' asyncStarted='0'>
<disk dev='vda' migrating='no'/> <disk dev='vda' migrating='no'/>
<migParams> <migParams>
<param name='compress-level' value='1'/> <param name='compress-level' value='1'/>