qemu: Cancel storage migration in parallel

Instead of cancelling disk mirrors sequentially, let's just call
block-job-cancel for all migrating disks and then wait until all
disappear.

In case we cancel disk mirrors at the end of successful migration we
also need to check all block jobs completed successfully. Otherwise we
have to abort the migration.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
This commit is contained in:
Jiri Denemark 2015-06-09 23:50:36 +02:00
parent 4172b96a3e
commit cebb110f73

View File

@ -1793,76 +1793,122 @@ qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver,
} }
/** /*
* qemuMigrationCancelOneDriveMirror: * If @check is true, the function will report an error and return a different
* @driver: qemu driver * code in case a block job fails. This way we can properly abort migration in
* @vm: domain * case some block jobs failed once all memory has already been transferred.
* *
* Cancel all drive-mirrors started by qemuMigrationDriveMirror. * Returns 1 if all mirrors are gone,
* Any pending block job events for the mirrored disks will be * 0 if some mirrors are still active,
* processed. * -1 some mirrors failed but some are still active,
* * -2 all mirrors are gone but some of them failed.
* Returns 0 on success, -1 otherwise. */
static int
qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver,
virDomainObjPtr vm,
bool check)
{
size_t i;
size_t active = 0;
int status;
bool failed = false;
for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (!diskPriv->migrating)
continue;
status = qemuBlockJobUpdate(driver, vm, disk);
switch (status) {
case VIR_DOMAIN_BLOCK_JOB_FAILED:
if (check) {
virReportError(VIR_ERR_OPERATION_FAILED,
_("migration of disk %s failed"),
disk->dst);
failed = true;
}
/* fallthrough */
case VIR_DOMAIN_BLOCK_JOB_CANCELED:
case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
qemuBlockJobSyncEnd(driver, vm, disk);
diskPriv->migrating = false;
break;
default:
active++;
}
}
if (failed) {
if (active) {
VIR_DEBUG("Some disk mirrors failed; still waiting for %zu "
"disk mirrors to finish", active);
return -1;
} else {
VIR_DEBUG("All disk mirrors are gone; some of them failed");
return -2;
}
} else {
if (active) {
VIR_DEBUG("Waiting for %zu disk mirrors to finish", active);
return 0;
} else {
VIR_DEBUG("All disk mirrors are gone");
return 1;
}
}
}
/*
* Returns 0 on success,
* 1 when job is already completed or it failed and failNoJob is false,
* -1 on error or when job failed and failNoJob is true.
*/ */
static int static int
qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver, qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virDomainDiskDefPtr disk) virDomainDiskDefPtr disk,
bool failNoJob)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData; qemuDomainObjPrivatePtr priv = vm->privateData;
char *diskAlias = NULL; char *diskAlias = NULL;
int ret = -1; int ret = -1;
int status;
int rv;
/* No need to cancel if mirror already aborted */ status = qemuBlockJobUpdate(driver, vm, disk);
if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_ABORT) { switch (status) {
ret = 0; case VIR_DOMAIN_BLOCK_JOB_FAILED:
} else { case VIR_DOMAIN_BLOCK_JOB_CANCELED:
virConnectDomainEventBlockJobStatus status = -1; if (failNoJob) {
virReportError(VIR_ERR_OPERATION_FAILED,
if (virAsprintf(&diskAlias, "%s%s", _("migration of disk %s failed"),
QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0) disk->dst);
goto cleanup; return -1;
if (qemuDomainObjEnterMonitorAsync(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto endjob;
ret = qemuMonitorBlockJobCancel(priv->mon, diskAlias, true);
if (qemuDomainObjExitMonitor(driver, vm) < 0)
goto endjob;
if (ret < 0) {
virDomainBlockJobInfo info;
/* block-job-cancel can fail if QEMU simultaneously
* aborted the job; probe for it again to detect this */
if (qemuMonitorBlockJobInfo(priv->mon, diskAlias,
&info, NULL) == 0) {
ret = 0;
} else {
virReportError(VIR_ERR_OPERATION_FAILED,
_("could not cancel migration of disk %s"),
disk->dst);
}
goto endjob;
} }
return 1;
/* Mirror may become ready before cancellation takes case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
* effect; loop if we get that event first */ return 1;
while (1) {
status = qemuBlockJobUpdate(driver, vm, disk);
if (status != -1 && status != VIR_DOMAIN_BLOCK_JOB_READY)
break;
if ((ret = virDomainObjWait(vm)) < 0)
goto endjob;
}
} }
endjob: if (virAsprintf(&diskAlias, "%s%s",
qemuBlockJobSyncEnd(driver, vm, disk); QEMU_DRIVE_HOST_PREFIX, disk->info.alias) < 0)
return -1;
if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_ABORT) if (qemuDomainObjEnterMonitorAsync(driver, vm,
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE; QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
rv = qemuMonitorBlockJobCancel(priv->mon, diskAlias, true);
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
goto cleanup;
ret = 0;
cleanup: cleanup:
VIR_FREE(diskAlias); VIR_FREE(diskAlias);
@ -1874,6 +1920,7 @@ qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
* qemuMigrationCancelDriveMirror: * qemuMigrationCancelDriveMirror:
* @driver: qemu driver * @driver: qemu driver
* @vm: domain * @vm: domain
* @check: if true report an error when some of the mirrors fails
* *
* Cancel all drive-mirrors started by qemuMigrationDriveMirror. * Cancel all drive-mirrors started by qemuMigrationDriveMirror.
* Any pending block job events for the affected disks will be * Any pending block job events for the affected disks will be
@ -1883,28 +1930,53 @@ qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
*/ */
static int static int
qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver, qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver,
virDomainObjPtr vm) virDomainObjPtr vm,
bool check)
{ {
virErrorPtr err = NULL; virErrorPtr err = NULL;
int ret = 0; int ret = -1;
size_t i; size_t i;
int rv;
bool failed = false;
VIR_DEBUG("Cancelling drive mirrors for domain %s", vm->def->name);
for (i = 0; i < vm->def->ndisks; i++) { for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDefPtr disk = vm->def->disks[i]; virDomainDiskDefPtr disk = vm->def->disks[i];
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
if (!diskPriv->migrating || !diskPriv->blockJobSync) if (!diskPriv->migrating)
continue; continue;
if (qemuMigrationCancelOneDriveMirror(driver, vm, disk) < 0) { rv = qemuMigrationCancelOneDriveMirror(driver, vm, disk, check);
ret = -1; if (rv != 0) {
if (!err) if (rv < 0) {
err = virSaveLastError(); if (!err)
err = virSaveLastError();
failed = true;
}
qemuBlockJobSyncEnd(driver, vm, disk);
diskPriv->migrating = false;
} }
diskPriv->migrating = false;
} }
while ((rv = qemuMigrationDriveMirrorCancelled(driver, vm, check)) != 1) {
if (rv < 0) {
failed = true;
if (rv == -2)
break;
}
if (failed && !err)
err = virSaveLastError();
if (virDomainObjWait(vm) < 0)
goto cleanup;
}
ret = failed ? -1 : 0;
cleanup:
if (err) { if (err) {
virSetError(err); virSetError(err);
virFreeError(err); virFreeError(err);
@ -3610,7 +3682,7 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
virErrorPtr orig_err = virSaveLastError(); virErrorPtr orig_err = virSaveLastError();
/* cancel any outstanding NBD jobs */ /* cancel any outstanding NBD jobs */
qemuMigrationCancelDriveMirror(driver, vm); qemuMigrationCancelDriveMirror(driver, vm, false);
virSetError(orig_err); virSetError(orig_err);
virFreeError(orig_err); virFreeError(orig_err);
@ -4196,7 +4268,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
/* cancel any outstanding NBD jobs */ /* cancel any outstanding NBD jobs */
if (mig && mig->nbd) { if (mig && mig->nbd) {
if (qemuMigrationCancelDriveMirror(driver, vm) < 0) if (qemuMigrationCancelDriveMirror(driver, vm, ret == 0) < 0)
ret = -1; ret = -1;
} }