qemu_migration: Restore original memory locking limit

For RDMA migration we update memory locking limit, but never set it back
once migration finishes (on the destination host) or aborts (on the
source host).

Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
Jiri Denemark 2022-06-22 16:12:02 +02:00
parent 22ee8cbf09
commit d4d3bb8130
3 changed files with 23 additions and 2 deletions

View File

@ -2413,6 +2413,11 @@ qemuDomainObjPrivateXMLFormat(virBuffer *buf,
priv->originalMemlock);
}
if (priv->preMigrationMemlock > 0) {
virBufferAsprintf(buf, "<preMigrationMemlock>%llu</preMigrationMemlock>\n",
priv->preMigrationMemlock);
}
return 0;
}
@ -3139,6 +3144,13 @@ qemuDomainObjPrivateXMLParse(xmlXPathContextPtr ctxt,
return -1;
}
if (virXPathULongLong("string(./preMigrationMemlock)", ctxt,
&priv->preMigrationMemlock) == -2) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("failed to parse pre-migration memlock limit"));
return -1;
}
return 0;
}

View File

@ -140,6 +140,9 @@ struct _qemuDomainObjPrivate {
int nbdPort; /* Port used for migration with NBD */
unsigned short migrationPort;
int preMigrationState;
unsigned long long preMigrationMemlock; /* Original RLIMIT_MEMLOCK in case
it was changed for the current
migration job. */
virChrdevs *devs;

View File

@ -3181,7 +3181,8 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver,
if (STREQ_NULLABLE(protocol, "rdma") &&
vm->def->mem.hard_limit > 0 &&
qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10, NULL) < 0) {
qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10,
&priv->preMigrationMemlock) < 0) {
goto error;
}
@ -3945,6 +3946,7 @@ qemuMigrationSrcComplete(virQEMUDriver *driver,
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
virObjectEventStateQueue(driver->domainEventState, event);
qemuDomainEventEmitJobCompleted(driver, vm);
priv->preMigrationMemlock = 0;
}
@ -4035,6 +4037,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
}
qemuDomainSaveStatus(vm);
@ -4615,7 +4618,8 @@ qemuMigrationSrcStart(virDomainObj *vm,
case MIGRATION_DEST_HOST:
if (STREQ(spec->dest.host.protocol, "rdma") &&
vm->def->mem.hard_limit > 0 &&
qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10, NULL) < 0) {
qemuDomainSetMaxMemLock(vm, vm->def->mem.hard_limit << 10,
&priv->preMigrationMemlock) < 0) {
return -1;
}
return qemuMonitorMigrateToHost(priv->mon, migrateFlags,
@ -6155,6 +6159,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
qemuMigrationSrcRestoreDomainState(driver, vm);
qemuMigrationParamsReset(driver, vm, VIR_ASYNC_JOB_MIGRATION_OUT,
jobPriv->migParams, priv->job.apiFlags);
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
qemuMigrationJobFinish(vm);
} else {
if (ret < 0)
@ -6411,6 +6416,7 @@ qemuMigrationDstComplete(virQEMUDriver *driver,
virPortAllocatorRelease(priv->migrationPort);
priv->migrationPort = 0;
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
}