mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-02-02 09:55:18 +00:00
qemu: Remove special case for virDomainMigrateSetMaxSpeed
Call qemu monitor command directly within a special job that is only allowed during outgoing migration.
This commit is contained in:
parent
90feb02dd0
commit
d1bd3f57bc
@ -52,6 +52,7 @@ VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
|
||||
"destroy",
|
||||
"suspend",
|
||||
"modify",
|
||||
"migration operation",
|
||||
"none", /* async job is never stored in job.active */
|
||||
"async nested",
|
||||
);
|
||||
|
@ -49,6 +49,7 @@ enum qemuDomainJob {
|
||||
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
|
||||
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
|
||||
QEMU_JOB_MODIFY, /* May change state */
|
||||
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
|
||||
|
||||
/* The following two items must always be the last items before JOB_LAST */
|
||||
QEMU_JOB_ASYNC, /* Asynchronous job */
|
||||
@ -75,12 +76,10 @@ enum qemuDomainJobSignals {
|
||||
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
|
||||
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live migration offline */
|
||||
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change */
|
||||
QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
|
||||
};
|
||||
|
||||
struct qemuDomainJobSignalsData {
|
||||
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
|
||||
unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
|
||||
};
|
||||
|
||||
struct qemuDomainJobObj {
|
||||
|
@ -7965,19 +7965,23 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
|
||||
|
||||
qemuDriverLock(driver);
|
||||
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
||||
qemuDriverUnlock(driver);
|
||||
|
||||
if (!vm) {
|
||||
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
||||
virUUIDFormat(dom->uuid, uuidstr);
|
||||
qemuReportError(VIR_ERR_NO_DOMAIN,
|
||||
_("no domain with matching uuid '%s'"), uuidstr);
|
||||
goto cleanup;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not running"));
|
||||
goto cleanup;
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
priv = vm->privateData;
|
||||
@ -8034,18 +8038,21 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
|
||||
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not being migrated"));
|
||||
goto cleanup;
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
VIR_DEBUG("Requesting migration speed change to %luMbs", bandwidth);
|
||||
priv->job.signalsData.migrateBandwidth = bandwidth;
|
||||
priv->job.signals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
||||
ret = 0;
|
||||
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
|
||||
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
|
||||
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
|
||||
qemuDomainObjExitMonitor(driver, vm);
|
||||
|
||||
endjob:
|
||||
if (qemuDomainObjEndJob(driver, vm) == 0)
|
||||
vm = NULL;
|
||||
|
||||
cleanup:
|
||||
if (vm)
|
||||
virDomainObjUnlock(vm);
|
||||
qemuDriverUnlock(driver);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -788,19 +788,6 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
|
||||
}
|
||||
if (ret < 0)
|
||||
VIR_WARN("Unable to set migration downtime");
|
||||
} else if (priv->job.signals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
|
||||
unsigned long bandwidth = priv->job.signalsData.migrateBandwidth;
|
||||
|
||||
priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
||||
priv->job.signalsData.migrateBandwidth = 0;
|
||||
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
|
||||
ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
||||
if (ret == 0) {
|
||||
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
|
||||
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
||||
}
|
||||
if (ret < 0)
|
||||
VIR_WARN("Unable to set migration speed");
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
@ -2883,10 +2870,12 @@ qemuMigrationJobStart(struct qemud_driver *driver,
|
||||
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
|
||||
return -1;
|
||||
|
||||
if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
|
||||
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
||||
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
||||
else
|
||||
qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK);
|
||||
} else {
|
||||
qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
|
||||
JOB_MASK(QEMU_JOB_MIGRATION_OP));
|
||||
}
|
||||
|
||||
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
||||
|
||||
|
@ -2451,6 +2451,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
|
||||
*/
|
||||
break;
|
||||
|
||||
case QEMU_JOB_MIGRATION_OP:
|
||||
case QEMU_JOB_ASYNC:
|
||||
case QEMU_JOB_ASYNC_NESTED:
|
||||
/* async job was already handled above */
|
||||
|
Loading…
x
Reference in New Issue
Block a user