mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-03-07 17:28:15 +00:00
qemu: Reset all migration parameters
Restore the original values of all migration parameters we store in qemuDomainJobObj instead of explicitly resting only a limited set of them. The result is not strictly equivalent to the previous code wrt reseting TLS state because the previous code would only reset it if we changed it before while the new code will reset it always if QEMU supports TLS migration. This is not a problem for the parameters themselves, but it can cause spurious errors about missing TLS objects being logged at the end of non-TLS migration. This issue will be fixed ~50 patches later. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
parent
71cc5d3283
commit
eb54cb473a
@ -1834,7 +1834,8 @@ qemuMigrationSrcCleanup(virDomainObjPtr vm,
|
||||
VIR_WARN("Migration of domain %s finished but we don't know if the"
|
||||
" domain was successfully started on destination or not",
|
||||
vm->def->name);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
priv->job.migParams);
|
||||
/* clear the job and let higher levels decide what to do */
|
||||
qemuDomainObjDiscardAsyncJob(driver, vm);
|
||||
break;
|
||||
@ -2593,7 +2594,8 @@ qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
|
||||
return ret;
|
||||
|
||||
stopjob:
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
priv->job.migParams);
|
||||
|
||||
if (stopProcess) {
|
||||
unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
@ -2969,7 +2971,8 @@ qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
|
||||
qemuDomainEventQueue(driver, event);
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
priv->job.migParams);
|
||||
|
||||
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
|
||||
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
||||
@ -4581,6 +4584,7 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
|
||||
int ret = -1;
|
||||
virErrorPtr orig_err = NULL;
|
||||
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
|
||||
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto cleanup;
|
||||
@ -4641,7 +4645,8 @@ qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
|
||||
* here
|
||||
*/
|
||||
if (!v3proto && ret < 0)
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
priv->job.migParams);
|
||||
|
||||
if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
@ -4691,6 +4696,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
|
||||
unsigned long flags,
|
||||
unsigned long resource)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
virObjectEventPtr event = NULL;
|
||||
int ret = -1;
|
||||
|
||||
@ -4731,7 +4737,8 @@ qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
|
||||
|
||||
endjob:
|
||||
if (ret < 0) {
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||
priv->job.migParams);
|
||||
qemuMigrationJobFinish(driver, vm);
|
||||
} else {
|
||||
qemuMigrationJobContinue(vm);
|
||||
@ -5187,7 +5194,8 @@ qemuMigrationDstFinish(virQEMUDriverPtr driver,
|
||||
VIR_FREE(priv->job.completed);
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
priv->job.migParams);
|
||||
|
||||
qemuMigrationJobFinish(driver, vm);
|
||||
if (!virDomainObjIsActive(vm))
|
||||
|
@ -378,30 +378,19 @@ qemuMigrationParamsSetCompression(virQEMUDriverPtr driver,
|
||||
*
|
||||
* Deconstruct all the setup possibly done for TLS - delete the TLS and
|
||||
* security objects, free the secinfo, and reset the migration params to "".
|
||||
*
|
||||
* Returns 0 on success, -1 on failure
|
||||
*/
|
||||
static int
|
||||
static void
|
||||
qemuMigrationParamsResetTLS(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
int asyncJob)
|
||||
int asyncJob,
|
||||
qemuMigrationParamsPtr origParams)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
char *tlsAlias = NULL;
|
||||
char *secAlias = NULL;
|
||||
qemuMigrationParamsPtr migParams = NULL;
|
||||
int ret = -1;
|
||||
|
||||
if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
/* If the tls-creds doesn't exist or if they're set to "" then there's
|
||||
* nothing to do since we never set anything up */
|
||||
if (!priv->migTLSAlias || !*priv->migTLSAlias)
|
||||
return 0;
|
||||
|
||||
if (!(migParams = qemuMigrationParamsNew()))
|
||||
goto cleanup;
|
||||
/* If QEMU does not support TLS migration we didn't set the aliases. */
|
||||
if (!origParams->params.tlsCreds)
|
||||
return;
|
||||
|
||||
/* NB: If either or both fail to allocate memory we can still proceed
|
||||
* since the next time we migrate another deletion attempt will be
|
||||
@ -410,21 +399,10 @@ qemuMigrationParamsResetTLS(virQEMUDriverPtr driver,
|
||||
secAlias = qemuDomainGetSecretAESAlias(QEMU_MIGRATION_TLS_ALIAS_BASE, false);
|
||||
|
||||
qemuDomainDelTLSObjects(driver, vm, asyncJob, secAlias, tlsAlias);
|
||||
qemuDomainSecretInfoFree(&priv->migSecinfo);
|
||||
qemuDomainSecretInfoFree(&QEMU_DOMAIN_PRIVATE(vm)->migSecinfo);
|
||||
|
||||
if (VIR_STRDUP(migParams->params.tlsCreds, "") < 0 ||
|
||||
VIR_STRDUP(migParams->params.tlsHostname, "") < 0 ||
|
||||
qemuMigrationParamsSet(driver, vm, asyncJob, migParams) < 0)
|
||||
goto cleanup;
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
VIR_FREE(tlsAlias);
|
||||
VIR_FREE(secAlias);
|
||||
qemuMigrationParamsFree(migParams);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -475,16 +453,22 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver,
|
||||
void
|
||||
qemuMigrationParamsReset(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
int asyncJob)
|
||||
int asyncJob,
|
||||
qemuMigrationParamsPtr origParams)
|
||||
{
|
||||
qemuMonitorMigrationCaps cap;
|
||||
virErrorPtr err = virSaveLastError();
|
||||
|
||||
VIR_DEBUG("Resetting migration parameters %p", origParams);
|
||||
|
||||
if (!virDomainObjIsActive(vm))
|
||||
goto cleanup;
|
||||
|
||||
if (qemuMigrationParamsResetTLS(driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
if (origParams) {
|
||||
if (qemuMigrationParamsSet(driver, vm, asyncJob, origParams) < 0)
|
||||
goto cleanup;
|
||||
qemuMigrationParamsResetTLS(driver, vm, asyncJob, origParams);
|
||||
}
|
||||
|
||||
for (cap = 0; cap < QEMU_MONITOR_MIGRATION_CAPS_LAST; cap++) {
|
||||
if (qemuMigrationCapsGet(vm, cap) &&
|
||||
|
@ -108,7 +108,8 @@ qemuMigrationParamsCheck(virQEMUDriverPtr driver,
|
||||
void
|
||||
qemuMigrationParamsReset(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
int asyncJob);
|
||||
int asyncJob,
|
||||
qemuMigrationParamsPtr origParams);
|
||||
|
||||
int
|
||||
qemuMigrationCapsCheck(virQEMUDriverPtr driver,
|
||||
|
@ -3079,7 +3079,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
|
||||
break;
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, job->migParams);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3173,7 +3173,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
|
||||
}
|
||||
}
|
||||
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE);
|
||||
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE, job->migParams);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user