qemu: Store API flags for async jobs in qemuDomainJobObj

When an async job is running, we sometimes need to know how it was
started to distinguish between several types of the job, e.g., post-copy
vs. normal migration. So far we added a specific bool item to
qemuDomainJobObj for such cases, which doesn't scale very well and
storing such bools in status XML would be painful so we didn't do it.

A better approach is to store the flags passed to the API which started
the async job, which can be easily stored in status XML.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
Jiri Denemark 2018-03-21 13:01:59 +01:00
parent f3a89d7c01
commit d634f7d759
6 changed files with 48 additions and 25 deletions

View File

@ -339,6 +339,7 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
VIR_FREE(job->current);
qemuMigrationParamsFree(job->migParams);
job->migParams = NULL;
job->apiFlags = 0;
}
void
@ -354,6 +355,7 @@ qemuDomainObjRestoreJob(virDomainObjPtr obj,
job->asyncOwner = priv->job.asyncOwner;
job->phase = priv->job.phase;
VIR_STEAL_PTR(job->migParams, priv->job.migParams);
job->apiFlags = priv->job.apiFlags;
qemuDomainObjResetJob(priv);
qemuDomainObjResetAsyncJob(priv);
@ -5805,7 +5807,8 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
asyncDuration = now - priv->job.asyncStarted;
VIR_WARN("Cannot start job (%s, %s) for domain %s; "
"current job is (%s, %s) owned by (%llu %s, %llu %s) "
"current job is (%s, %s) "
"owned by (%llu %s, %llu %s (flags=0x%lx)) "
"for (%llus, %llus)",
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(asyncJob),
@ -5814,6 +5817,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver,
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
priv->job.owner, NULLSTR(priv->job.ownerAPI),
priv->job.asyncOwner, NULLSTR(priv->job.asyncOwnerAPI),
priv->job.apiFlags,
duration / 1000, asyncDuration / 1000);
if (nested || qemuDomainNestedJobAllowed(priv, job))
@ -5877,7 +5881,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob,
virDomainJobOperation operation)
virDomainJobOperation operation,
unsigned long apiFlags)
{
qemuDomainObjPrivatePtr priv;
@ -5887,6 +5892,7 @@ int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
priv = obj->privateData;
priv->job.current->operation = operation;
priv->job.apiFlags = apiFlags;
return 0;
}

View File

@ -182,6 +182,7 @@ struct _qemuDomainJobObj {
bool dumpCompleted; /* dump completed */
qemuMigrationParamsPtr migParams;
unsigned long apiFlags; /* flags passed to the API which started the async job */
};
typedef void (*qemuDomainCleanupCallback)(virQEMUDriverPtr driver,
@ -493,7 +494,8 @@ int qemuDomainObjBeginJob(virQEMUDriverPtr driver,
int qemuDomainObjBeginAsyncJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,
qemuDomainAsyncJob asyncJob,
virDomainJobOperation operation)
virDomainJobOperation operation,
unsigned long apiFlags)
ATTRIBUTE_RETURN_CHECK;
int qemuDomainObjBeginNestedJob(virQEMUDriverPtr driver,
virDomainObjPtr obj,

View File

@ -267,7 +267,7 @@ qemuAutostartDomain(virDomainObjPtr vm,
if (vm->autostart &&
!virDomainObjIsActive(vm)) {
if (qemuProcessBeginJob(driver, vm,
VIR_DOMAIN_JOB_OPERATION_START) < 0) {
VIR_DOMAIN_JOB_OPERATION_START, flags) < 0) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("Failed to start job on VM '%s': %s"),
vm->def->name, virGetLastErrorMessage());
@ -1767,7 +1767,8 @@ static virDomainPtr qemuDomainCreateXML(virConnectPtr conn,
virObjectRef(vm);
def = NULL;
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < 0) {
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START,
flags) < 0) {
qemuDomainRemoveInactiveJob(driver, vm);
goto cleanup;
}
@ -3323,7 +3324,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE,
VIR_DOMAIN_JOB_OPERATION_SAVE) < 0)
VIR_DOMAIN_JOB_OPERATION_SAVE, flags) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -3931,7 +3932,8 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
if (qemuDomainObjBeginAsyncJob(driver, vm,
QEMU_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP) < 0)
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -4156,7 +4158,8 @@ processWatchdogEvent(virQEMUDriverPtr driver,
case VIR_DOMAIN_WATCHDOG_ACTION_DUMP:
if (qemuDomainObjBeginAsyncJob(driver, vm,
QEMU_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP) < 0) {
VIR_DOMAIN_JOB_OPERATION_DUMP,
flags) < 0) {
goto cleanup;
}
@ -4242,9 +4245,10 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
virObjectEventPtr event = NULL;
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
bool removeInactive = false;
unsigned long flags = VIR_DUMP_MEMORY_ONLY;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_DUMP,
VIR_DOMAIN_JOB_OPERATION_DUMP) < 0)
VIR_DOMAIN_JOB_OPERATION_DUMP, flags) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm)) {
@ -4275,7 +4279,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
switch (action) {
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_DESTROY:
if (doCoreDumpToAutoDumpPath(driver, vm, VIR_DUMP_MEMORY_ONLY) < 0)
if (doCoreDumpToAutoDumpPath(driver, vm, flags) < 0)
goto endjob;
ATTRIBUTE_FALLTHROUGH;
@ -4292,7 +4296,7 @@ processGuestPanicEvent(virQEMUDriverPtr driver,
break;
case VIR_DOMAIN_LIFECYCLE_ACTION_COREDUMP_RESTART:
if (doCoreDumpToAutoDumpPath(driver, vm, VIR_DUMP_MEMORY_ONLY) < 0)
if (doCoreDumpToAutoDumpPath(driver, vm, flags) < 0)
goto endjob;
ATTRIBUTE_FALLTHROUGH;
@ -6747,7 +6751,8 @@ qemuDomainRestoreFlags(virConnectPtr conn,
priv->hookRun = true;
}
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE) < 0)
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_RESTORE,
flags) < 0)
goto cleanup;
ret = qemuDomainSaveImageStartVM(conn, driver, vm, &fd, data, path,
@ -7335,7 +7340,8 @@ qemuDomainCreateWithFlags(virDomainPtr dom, unsigned int flags)
if (virDomainCreateWithFlagsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START) < 0)
if (qemuProcessBeginJob(driver, vm, VIR_DOMAIN_JOB_OPERATION_START,
flags) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
@ -15186,7 +15192,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
* 'savevm' blocks the monitor. External snapshot will then modify the
* job mask appropriately. */
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SNAPSHOT,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT) < 0)
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT, flags) < 0)
goto cleanup;
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
@ -15785,7 +15791,8 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
}
if (qemuProcessBeginJob(driver, vm,
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT) < 0)
VIR_DOMAIN_JOB_OPERATION_SNAPSHOT_REVERT,
flags) < 0)
goto cleanup;
if (!(snap = qemuSnapObjFromSnapshot(vm, snapshot)))

View File

@ -80,7 +80,8 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
static int
qemuMigrationJobStart(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob job)
qemuDomainAsyncJob job,
unsigned long apiFlags)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
static void
@ -1985,7 +1986,8 @@ qemuMigrationSrcBegin(virConnectPtr conn,
qemuDomainAsyncJob asyncJob;
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
} else {
@ -2320,7 +2322,8 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
!!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
goto cleanup;
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
flags) < 0)
goto cleanup;
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
@ -4440,7 +4443,8 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
qemuDomainObjPrivatePtr priv = vm->privateData;
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
@ -4552,7 +4556,8 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
/* If we didn't start the job in the begin phase, start it now. */
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
flags) < 0)
goto cleanup;
} else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
goto cleanup;
@ -5291,7 +5296,8 @@ qemuMigrationSrcCancel(virQEMUDriverPtr driver,
static int
qemuMigrationJobStart(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob job)
qemuDomainAsyncJob job,
unsigned long apiFlags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainJobOperation op;
@ -5307,7 +5313,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver,
JOB_MASK(QEMU_JOB_MIGRATION_OP);
}
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op) < 0)
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
return -1;
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;

View File

@ -4183,10 +4183,11 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
int
qemuProcessBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainJobOperation operation)
virDomainJobOperation operation,
unsigned long apiFlags)
{
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START,
operation) < 0)
operation, apiFlags) < 0)
return -1;
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);

View File

@ -68,7 +68,8 @@ void qemuProcessIncomingDefFree(qemuProcessIncomingDefPtr inc);
int qemuProcessBeginJob(virQEMUDriverPtr driver,
virDomainObjPtr vm,
virDomainJobOperation operation);
virDomainJobOperation operation,
unsigned long apiFlags);
void qemuProcessEndJob(virQEMUDriverPtr driver,
virDomainObjPtr vm);