1
0
mirror of https://gitlab.com/libvirt/libvirt.git synced 2025-03-07 17:28:15 +00:00

qemu: Do not crash when canceling migration on reconnect

When libvirtd is restarted during an active outgoing migration (or
snapshot, save, or dump which are internally implemented as migration)
it wants to cancel the migration. But by a mistake in commit
v8.7.0-57-g2d7b22b561 the qemuMigrationSrcCancel function is called with
wait == true, which leads to an instant crash by dereferencing NULL
pointer stored in priv->job.current.

When canceling migration to file (snapshot, save, dump), we don't need
to wait until it is really canceled as no migration capabilities or
parameters need to be restored.

On the other hand we need to wait when canceling outgoing migration and
since we don't have virDomainJobData at this point, we have to
temporarily restore the migration job to make sure we can process
MIGRATION events from QEMU.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
Jiri Denemark 2022-10-12 17:45:38 +02:00
parent 4dd86f334b
commit 1a570f9712
3 changed files with 27 additions and 8 deletions

View File

@ -4633,8 +4633,7 @@ qemuMigrationSrcIsCanceled(virDomainObj *vm)
* cancellation to complete. * cancellation to complete.
* *
* The thread (the caller itself in most cases) which is watching the migration * The thread (the caller itself in most cases) which is watching the migration
* will do all the cleanup once migration is canceled. If no thread is watching * will do all the cleanup once migration is canceled.
* the migration, use qemuMigrationSrcCancelUnattended instead.
*/ */
int int
qemuMigrationSrcCancel(virDomainObj *vm, qemuMigrationSrcCancel(virDomainObj *vm,
@ -6979,11 +6978,12 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
/** /**
* This function is supposed to be used only when no other thread is watching * This function is supposed to be used only to while reconnecting to a domain
* the migration. * with an active migration job.
*/ */
int int
qemuMigrationSrcCancelUnattended(virDomainObj *vm) qemuMigrationSrcCancelUnattended(virDomainObj *vm,
virDomainJobObj *oldJob)
{ {
bool storage = false; bool storage = false;
size_t i; size_t i;
@ -6991,8 +6991,26 @@ qemuMigrationSrcCancelUnattended(virDomainObj *vm)
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name); vm->def->name);
/* Make sure MIGRATION event handler can store the current migration state
* in the job.
*/
if (!vm->job->current) {
qemuDomainObjRestoreAsyncJob(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
oldJob->phase, oldJob->asyncStarted,
VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT,
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION,
VIR_DOMAIN_JOB_STATUS_FAILED,
VIR_JOB_NONE);
}
/* We're inside a MODIFY job and the restored MIGRATION_OUT async job is
* used only for processing migration events from QEMU. Thus we don't want
* to start a nested job for talking to QEMU.
*/
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true); qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true);
virDomainObjEndAsyncJob(vm);
for (i = 0; i < vm->def->ndisks; i++) { for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDef *disk = vm->def->disks[i]; virDomainDiskDef *disk = vm->def->disks[i];
qemuDomainDiskPrivate *diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); qemuDomainDiskPrivate *diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);

View File

@ -241,7 +241,8 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT; ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
int int
qemuMigrationSrcCancelUnattended(virDomainObj *vm); qemuMigrationSrcCancelUnattended(virDomainObj *vm,
virDomainJobObj *oldJob);
int int
qemuMigrationSrcCancel(virDomainObj *vm, qemuMigrationSrcCancel(virDomainObj *vm,

View File

@ -3541,7 +3541,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriver *driver,
*/ */
VIR_DEBUG("Cancelling unfinished migration of domain %s", VIR_DEBUG("Cancelling unfinished migration of domain %s",
vm->def->name); vm->def->name);
if (qemuMigrationSrcCancelUnattended(vm) < 0) { if (qemuMigrationSrcCancelUnattended(vm, job) < 0) {
VIR_WARN("Could not cancel ongoing migration of domain %s", VIR_WARN("Could not cancel ongoing migration of domain %s",
vm->def->name); vm->def->name);
} }
@ -3700,7 +3700,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
case VIR_ASYNC_JOB_SAVE: case VIR_ASYNC_JOB_SAVE:
case VIR_ASYNC_JOB_DUMP: case VIR_ASYNC_JOB_DUMP:
case VIR_ASYNC_JOB_SNAPSHOT: case VIR_ASYNC_JOB_SNAPSHOT:
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, true); qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE, false);
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
* running a migration-to-file operation. Although we are * running a migration-to-file operation. Although we are
* recovering an async job, this function is run at startup * recovering an async job, this function is run at startup