mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-23 22:25:25 +00:00
qemu: Create wrapper for qemuMonitorMigrateCancel
We will need a little bit more code around qemuMonitorMigrateCancel to make sure it works as expected. The new qemuMigrationSrcCancel helper will avoid repeating the code in several places. Signed-off-by: Jiri Denemark <jdenemar@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com>
This commit is contained in:
parent
0ff8c175f7
commit
4e55fe21b5
@ -12810,17 +12810,10 @@ qemuDomainGetJobStats(virDomainPtr dom,
|
||||
static int
|
||||
qemuDomainAbortJobMigration(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret;
|
||||
|
||||
VIR_DEBUG("Cancelling migration job at client request");
|
||||
|
||||
qemuDomainObjAbortAsyncJob(vm);
|
||||
qemuDomainObjEnterMonitor(vm);
|
||||
ret = qemuMonitorMigrateCancel(priv->mon);
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
|
||||
return ret;
|
||||
return qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -4611,6 +4611,32 @@ qemuMigrationSrcStart(virDomainObj *vm,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Requests outgoing migration to be canceled.
|
||||
*
|
||||
* The thread (the caller itself in most cases) which is watching the migration
|
||||
* will do all the cleanup once migration is canceled. If no thread is watching
|
||||
* the migration, use qemuMigrationSrcCancelUnattended instead.
|
||||
*/
|
||||
int
|
||||
qemuMigrationSrcCancel(virDomainObj *vm,
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int rc;
|
||||
|
||||
VIR_DEBUG("Cancelling outgoing migration of domain %s", vm->def->name);
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
rc = qemuMonitorMigrateCancel(priv->mon);
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
virDomainObj *vm,
|
||||
@ -4952,11 +4978,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
}
|
||||
|
||||
if (cancel &&
|
||||
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED &&
|
||||
qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
qemuMonitorMigrateCancel(priv->mon);
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
}
|
||||
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
|
||||
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
|
||||
|
||||
/* cancel any outstanding NBD jobs */
|
||||
if (mig && mig->nbd)
|
||||
@ -6900,11 +6923,8 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
|
||||
if (rc == -2) {
|
||||
virErrorPreserveLast(&orig_err);
|
||||
virCommandAbort(compressor);
|
||||
if (virDomainObjIsActive(vm) &&
|
||||
qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) {
|
||||
qemuMonitorMigrateCancel(priv->mon);
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
}
|
||||
if (virDomainObjIsActive(vm))
|
||||
qemuMigrationSrcCancel(vm, asyncJob);
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
@ -6949,16 +6969,13 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
|
||||
int
|
||||
qemuMigrationSrcCancelUnattended(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
bool storage = false;
|
||||
size_t i;
|
||||
|
||||
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
|
||||
vm->def->name);
|
||||
|
||||
qemuDomainObjEnterMonitor(vm);
|
||||
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
|
||||
|
||||
for (i = 0; i < vm->def->ndisks; i++) {
|
||||
virDomainDiskDef *disk = vm->def->disks[i];
|
||||
|
@ -243,6 +243,10 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
|
||||
int
|
||||
qemuMigrationSrcCancelUnattended(virDomainObj *vm);
|
||||
|
||||
int
|
||||
qemuMigrationSrcCancel(virDomainObj *vm,
|
||||
virDomainAsyncJob asyncJob);
|
||||
|
||||
int
|
||||
qemuMigrationAnyFetchStats(virDomainObj *vm,
|
||||
virDomainAsyncJob asyncJob,
|
||||
|
@ -3674,7 +3674,6 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
|
||||
virDomainJobObj *job,
|
||||
unsigned int *stopFlags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainState state;
|
||||
int reason;
|
||||
|
||||
@ -3697,9 +3696,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
|
||||
case VIR_ASYNC_JOB_SAVE:
|
||||
case VIR_ASYNC_JOB_DUMP:
|
||||
case VIR_ASYNC_JOB_SNAPSHOT:
|
||||
qemuDomainObjEnterMonitor(vm);
|
||||
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
|
||||
/* resume the domain but only if it was paused as a result of
|
||||
* running a migration-to-file operation. Although we are
|
||||
* recovering an async job, this function is run at startup
|
||||
|
Loading…
Reference in New Issue
Block a user