qemu: Create wrapper for qemuMonitorMigrateCancel

We will need a little bit more code around qemuMonitorMigrateCancel to
make sure it works as expected. The new qemuMigrationSrcCancel helper
will avoid repeating the code in several places.

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
This commit is contained in:
Jiri Denemark 2022-08-30 12:29:02 +02:00
parent 0ff8c175f7
commit 4e55fe21b5
4 changed files with 37 additions and 26 deletions

View File

@ -12810,17 +12810,10 @@ qemuDomainGetJobStats(virDomainPtr dom,
static int static int
qemuDomainAbortJobMigration(virDomainObj *vm) qemuDomainAbortJobMigration(virDomainObj *vm)
{ {
qemuDomainObjPrivate *priv = vm->privateData;
int ret;
VIR_DEBUG("Cancelling migration job at client request"); VIR_DEBUG("Cancelling migration job at client request");
qemuDomainObjAbortAsyncJob(vm); qemuDomainObjAbortAsyncJob(vm);
qemuDomainObjEnterMonitor(vm); return qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
ret = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(vm);
return ret;
} }

View File

@ -4611,6 +4611,32 @@ qemuMigrationSrcStart(virDomainObj *vm,
} }
/**
* Requests outgoing migration to be canceled.
*
* The thread (the caller itself in most cases) which is watching the migration
* will do all the cleanup once migration is canceled. If no thread is watching
* the migration, use qemuMigrationSrcCancelUnattended instead.
*/
int
qemuMigrationSrcCancel(virDomainObj *vm,
virDomainAsyncJob asyncJob)
{
qemuDomainObjPrivate *priv = vm->privateData;
int rc;
VIR_DEBUG("Cancelling outgoing migration of domain %s", vm->def->name);
if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0)
return -1;
rc = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(vm);
return rc;
}
static int static int
qemuMigrationSrcRun(virQEMUDriver *driver, qemuMigrationSrcRun(virQEMUDriver *driver,
virDomainObj *vm, virDomainObj *vm,
@ -4952,11 +4978,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
} }
if (cancel && if (cancel &&
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED && priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) { qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(vm);
}
/* cancel any outstanding NBD jobs */ /* cancel any outstanding NBD jobs */
if (mig && mig->nbd) if (mig && mig->nbd)
@ -6900,11 +6923,8 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
if (rc == -2) { if (rc == -2) {
virErrorPreserveLast(&orig_err); virErrorPreserveLast(&orig_err);
virCommandAbort(compressor); virCommandAbort(compressor);
if (virDomainObjIsActive(vm) && if (virDomainObjIsActive(vm))
qemuDomainObjEnterMonitorAsync(vm, asyncJob) == 0) { qemuMigrationSrcCancel(vm, asyncJob);
qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(vm);
}
} }
goto cleanup; goto cleanup;
} }
@ -6949,16 +6969,13 @@ qemuMigrationSrcToFile(virQEMUDriver *driver, virDomainObj *vm,
int int
qemuMigrationSrcCancelUnattended(virDomainObj *vm) qemuMigrationSrcCancelUnattended(virDomainObj *vm)
{ {
qemuDomainObjPrivate *priv = vm->privateData;
bool storage = false; bool storage = false;
size_t i; size_t i;
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name); vm->def->name);
qemuDomainObjEnterMonitor(vm); qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(vm);
for (i = 0; i < vm->def->ndisks; i++) { for (i = 0; i < vm->def->ndisks; i++) {
virDomainDiskDef *disk = vm->def->disks[i]; virDomainDiskDef *disk = vm->def->disks[i];

View File

@ -243,6 +243,10 @@ qemuMigrationSrcToFile(virQEMUDriver *driver,
int int
qemuMigrationSrcCancelUnattended(virDomainObj *vm); qemuMigrationSrcCancelUnattended(virDomainObj *vm);
int
qemuMigrationSrcCancel(virDomainObj *vm,
virDomainAsyncJob asyncJob);
int int
qemuMigrationAnyFetchStats(virDomainObj *vm, qemuMigrationAnyFetchStats(virDomainObj *vm,
virDomainAsyncJob asyncJob, virDomainAsyncJob asyncJob,

View File

@ -3674,7 +3674,6 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
virDomainJobObj *job, virDomainJobObj *job,
unsigned int *stopFlags) unsigned int *stopFlags)
{ {
qemuDomainObjPrivate *priv = vm->privateData;
virDomainState state; virDomainState state;
int reason; int reason;
@ -3697,9 +3696,7 @@ qemuProcessRecoverJob(virQEMUDriver *driver,
case VIR_ASYNC_JOB_SAVE: case VIR_ASYNC_JOB_SAVE:
case VIR_ASYNC_JOB_DUMP: case VIR_ASYNC_JOB_DUMP:
case VIR_ASYNC_JOB_SNAPSHOT: case VIR_ASYNC_JOB_SNAPSHOT:
qemuDomainObjEnterMonitor(vm); qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_NONE);
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(vm);
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
* running a migration-to-file operation. Although we are * running a migration-to-file operation. Although we are
* recovering an async job, this function is run at startup * recovering an async job, this function is run at startup