diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index b79e6c04a9..6371b7ba4a 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -1224,6 +1224,7 @@ typedef enum { VIR_MIGRATE_COMPRESSED = (1 << 11), /* compress data during migration */ VIR_MIGRATE_ABORT_ON_ERROR = (1 << 12), /* abort migration on I/O errors happened during migration */ VIR_MIGRATE_AUTO_CONVERGE = (1 << 13), /* force convergence */ + VIR_MIGRATE_RDMA_PIN_ALL = (1 << 14), /* RDMA memory pinning */ } virDomainMigrateFlags; diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index edb52ab166..44cb826b92 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1873,6 +1873,46 @@ qemuMigrationSetAutoConverge(virQEMUDriverPtr driver, } +static int +qemuMigrationSetPinAll(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuDomainAsyncJob job) +{ + qemuDomainObjPrivatePtr priv = vm->privateData; + int ret; + + if (qemuDomainObjEnterMonitorAsync(driver, vm, job) < 0) + return -1; + + ret = qemuMonitorGetMigrationCapability( + priv->mon, + QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL); + + if (ret < 0) { + goto cleanup; + } else if (ret == 0) { + if (job == QEMU_ASYNC_JOB_MIGRATION_IN) { + virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("rdma pinning migration is not supported by " + "target QEMU binary")); + } else { + virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("rdma pinning migration is not supported by " + "source QEMU binary")); + } + ret = -1; + goto cleanup; + } + + ret = qemuMonitorSetMigrationCapability( + priv->mon, + QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL); + + cleanup: + qemuDomainObjExitMonitor(driver, vm); + return ret; +} + static int qemuMigrationWaitForSpice(virQEMUDriverPtr driver, virDomainObjPtr vm) @@ -2709,6 +2749,10 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver, goto stop; } + if (flags & VIR_MIGRATE_RDMA_PIN_ALL && + qemuMigrationSetPinAll(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) + goto stop; + if (mig->lockState) { VIR_DEBUG("Received lockstate %s", mig->lockState); VIR_FREE(priv->lockState); @@ -3532,6 +3576,11 @@ qemuMigrationRun(virQEMUDriverPtr driver, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup; + if (flags & VIR_MIGRATE_RDMA_PIN_ALL && + qemuMigrationSetPinAll(driver, vm, + QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + goto cleanup; + if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup; diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 3fa68dc74e..e7a90c3828 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -40,7 +40,8 @@ VIR_MIGRATE_OFFLINE | \ VIR_MIGRATE_COMPRESSED | \ VIR_MIGRATE_ABORT_ON_ERROR | \ - VIR_MIGRATE_AUTO_CONVERGE) + VIR_MIGRATE_AUTO_CONVERGE | \ + VIR_MIGRATE_RDMA_PIN_ALL) /* All supported migration parameters and their types. */ # define QEMU_MIGRATION_PARAMETERS \ diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 105b99efd3..a6ced5f28f 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -9212,6 +9212,10 @@ static const vshCmdOptDef opts_migrate[] = { .type = VSH_OT_BOOL, .help = N_("force convergence during live migration") }, + {.name = "rdma-pin-all", + .type = VSH_OT_BOOL, + .help = N_("support memory pinning during RDMA live migration") + }, {.name = "abort-on-error", .type = VSH_OT_BOOL, .help = N_("abort on soft errors during migration") @@ -9360,6 +9364,9 @@ doMigrate(void *opaque) if (vshCommandOptBool(cmd, "auto-converge")) flags |= VIR_MIGRATE_AUTO_CONVERGE; + if (vshCommandOptBool(cmd, "rdma-pin-all")) + flags |= VIR_MIGRATE_RDMA_PIN_ALL; + if (vshCommandOptBool(cmd, "offline")) { flags |= VIR_MIGRATE_OFFLINE; }