mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-26 15:45:28 +00:00
qemu: Send migrate_cancel when aborting migration
When QEMU reports failed or cancelled migration, we don't need to send it migrate_cancel QMP command. But in all other error paths, such as if we detect broken connection to a destination daemon or something else happens inside libvirt, we need to explicitly send migrate_cancel command instead of relying on the migration to be implicitly cancelled when destination QEMU is killed. Because we were not doing so, one could end up with a paused domain after failed migration. https://bugzilla.redhat.com/show_bug.cgi?id=1098833
This commit is contained in:
parent
7bdc7702f3
commit
e27d28970f
@ -1724,10 +1724,9 @@ qemuMigrationUpdateJobStatus(virQEMUDriverPtr driver,
|
|||||||
|
|
||||||
priv->job.status = status;
|
priv->job.status = status;
|
||||||
|
|
||||||
if (ret < 0 || virTimeMillisNow(&priv->job.info.timeElapsed) < 0) {
|
if (ret < 0 || virTimeMillisNow(&priv->job.info.timeElapsed) < 0)
|
||||||
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
|
||||||
priv->job.info.timeElapsed -= priv->job.start;
|
priv->job.info.timeElapsed -= priv->job.start;
|
||||||
|
|
||||||
ret = -1;
|
ret = -1;
|
||||||
@ -1784,6 +1783,9 @@ qemuMigrationUpdateJobStatus(virQEMUDriverPtr driver,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
|
||||||
|
* QEMU reports failed migration.
|
||||||
|
*/
|
||||||
static int
|
static int
|
||||||
qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
||||||
enum qemuDomainAsyncJob asyncJob,
|
enum qemuDomainAsyncJob asyncJob,
|
||||||
@ -1814,18 +1816,21 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
|||||||
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
||||||
|
|
||||||
if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) == -1)
|
if (qemuMigrationUpdateJobStatus(driver, vm, job, asyncJob) == -1)
|
||||||
goto cleanup;
|
break;
|
||||||
|
|
||||||
/* cancel migration if disk I/O error is emitted while migrating */
|
/* cancel migration if disk I/O error is emitted while migrating */
|
||||||
if (abort_on_error &&
|
if (abort_on_error &&
|
||||||
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
|
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
|
||||||
pauseReason == VIR_DOMAIN_PAUSED_IOERROR)
|
pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
|
||||||
goto cancel;
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
||||||
|
_("%s: %s"), job, _("failed due to I/O error"));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (dconn && virConnectIsAlive(dconn) <= 0) {
|
if (dconn && virConnectIsAlive(dconn) <= 0) {
|
||||||
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
||||||
_("Lost connection to destination host"));
|
_("Lost connection to destination host"));
|
||||||
goto cleanup;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
virObjectUnlock(vm);
|
virObjectUnlock(vm);
|
||||||
@ -1835,25 +1840,17 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
|||||||
virObjectLock(vm);
|
virObjectLock(vm);
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup:
|
if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED) {
|
||||||
if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED)
|
|
||||||
return 0;
|
return 0;
|
||||||
else
|
} else if (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
|
||||||
return -1;
|
/* The migration was aborted by us rather than QEMU itself so let's
|
||||||
|
* update the job type and notify the caller to send migrate_cancel.
|
||||||
cancel:
|
*/
|
||||||
if (virDomainObjIsActive(vm)) {
|
|
||||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
||||||
priv->job.asyncJob) == 0) {
|
|
||||||
qemuMonitorMigrateCancel(priv->mon);
|
|
||||||
qemuDomainObjExitMonitor(driver, vm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
|
priv->job.info.type = VIR_DOMAIN_JOB_FAILED;
|
||||||
virReportError(VIR_ERR_OPERATION_FAILED,
|
return -2;
|
||||||
_("%s: %s"), job, _("failed due to I/O error"));
|
} else {
|
||||||
return -1;
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -3229,6 +3226,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
|
|||||||
virErrorPtr orig_err = NULL;
|
virErrorPtr orig_err = NULL;
|
||||||
unsigned int cookieFlags = 0;
|
unsigned int cookieFlags = 0;
|
||||||
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
||||||
|
int rc;
|
||||||
|
|
||||||
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
||||||
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
|
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu, "
|
||||||
@ -3385,9 +3383,12 @@ qemuMigrationRun(virQEMUDriverPtr driver,
|
|||||||
!(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
|
!(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
|
||||||
goto cancel;
|
goto cancel;
|
||||||
|
|
||||||
if (qemuMigrationWaitForCompletion(driver, vm,
|
rc = qemuMigrationWaitForCompletion(driver, vm,
|
||||||
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
||||||
dconn, abort_on_error) < 0)
|
dconn, abort_on_error);
|
||||||
|
if (rc == -2)
|
||||||
|
goto cancel;
|
||||||
|
else if (rc == -1)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
/* When migration completed, QEMU will have paused the
|
/* When migration completed, QEMU will have paused the
|
||||||
|
Loading…
Reference in New Issue
Block a user