mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-02-02 09:55:18 +00:00
qemu: Remove special case for virDomainMigrateSetMaxSpeed
Call qemu monitor command directly within a special job that is only allowed during outgoing migration.
This commit is contained in:
parent
90feb02dd0
commit
d1bd3f57bc
@ -52,6 +52,7 @@ VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
|
|||||||
"destroy",
|
"destroy",
|
||||||
"suspend",
|
"suspend",
|
||||||
"modify",
|
"modify",
|
||||||
|
"migration operation",
|
||||||
"none", /* async job is never stored in job.active */
|
"none", /* async job is never stored in job.active */
|
||||||
"async nested",
|
"async nested",
|
||||||
);
|
);
|
||||||
|
@ -49,6 +49,7 @@ enum qemuDomainJob {
|
|||||||
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
|
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
|
||||||
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
|
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
|
||||||
QEMU_JOB_MODIFY, /* May change state */
|
QEMU_JOB_MODIFY, /* May change state */
|
||||||
|
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
|
||||||
|
|
||||||
/* The following two items must always be the last items before JOB_LAST */
|
/* The following two items must always be the last items before JOB_LAST */
|
||||||
QEMU_JOB_ASYNC, /* Asynchronous job */
|
QEMU_JOB_ASYNC, /* Asynchronous job */
|
||||||
@ -75,12 +76,10 @@ enum qemuDomainJobSignals {
|
|||||||
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
|
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
|
||||||
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live migration offline */
|
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live migration offline */
|
||||||
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change */
|
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change */
|
||||||
QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qemuDomainJobSignalsData {
|
struct qemuDomainJobSignalsData {
|
||||||
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
|
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
|
||||||
unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qemuDomainJobObj {
|
struct qemuDomainJobObj {
|
||||||
|
@ -7965,19 +7965,23 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
|
|||||||
|
|
||||||
qemuDriverLock(driver);
|
qemuDriverLock(driver);
|
||||||
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
vm = virDomainFindByUUID(&driver->domains, dom->uuid);
|
||||||
|
qemuDriverUnlock(driver);
|
||||||
|
|
||||||
if (!vm) {
|
if (!vm) {
|
||||||
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
||||||
virUUIDFormat(dom->uuid, uuidstr);
|
virUUIDFormat(dom->uuid, uuidstr);
|
||||||
qemuReportError(VIR_ERR_NO_DOMAIN,
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
||||||
_("no domain with matching uuid '%s'"), uuidstr);
|
_("no domain with matching uuid '%s'"), uuidstr);
|
||||||
goto cleanup;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MIGRATION_OP) < 0)
|
||||||
|
goto cleanup;
|
||||||
|
|
||||||
if (!virDomainObjIsActive(vm)) {
|
if (!virDomainObjIsActive(vm)) {
|
||||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||||
"%s", _("domain is not running"));
|
"%s", _("domain is not running"));
|
||||||
goto cleanup;
|
goto endjob;
|
||||||
}
|
}
|
||||||
|
|
||||||
priv = vm->privateData;
|
priv = vm->privateData;
|
||||||
@ -8034,18 +8038,21 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
|
|||||||
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
if (priv->job.asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT) {
|
||||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||||
"%s", _("domain is not being migrated"));
|
"%s", _("domain is not being migrated"));
|
||||||
goto cleanup;
|
goto endjob;
|
||||||
}
|
}
|
||||||
|
|
||||||
VIR_DEBUG("Requesting migration speed change to %luMbs", bandwidth);
|
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
|
||||||
priv->job.signalsData.migrateBandwidth = bandwidth;
|
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
|
||||||
priv->job.signals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
|
||||||
ret = 0;
|
qemuDomainObjExitMonitor(driver, vm);
|
||||||
|
|
||||||
|
endjob:
|
||||||
|
if (qemuDomainObjEndJob(driver, vm) == 0)
|
||||||
|
vm = NULL;
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
if (vm)
|
if (vm)
|
||||||
virDomainObjUnlock(vm);
|
virDomainObjUnlock(vm);
|
||||||
qemuDriverUnlock(driver);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -788,19 +788,6 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
|
|||||||
}
|
}
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
VIR_WARN("Unable to set migration downtime");
|
VIR_WARN("Unable to set migration downtime");
|
||||||
} else if (priv->job.signals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
|
|
||||||
unsigned long bandwidth = priv->job.signalsData.migrateBandwidth;
|
|
||||||
|
|
||||||
priv->job.signals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
|
||||||
priv->job.signalsData.migrateBandwidth = 0;
|
|
||||||
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
|
|
||||||
ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
||||||
if (ret == 0) {
|
|
||||||
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
|
|
||||||
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
||||||
}
|
|
||||||
if (ret < 0)
|
|
||||||
VIR_WARN("Unable to set migration speed");
|
|
||||||
} else {
|
} else {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
@ -2883,10 +2870,12 @@ qemuMigrationJobStart(struct qemud_driver *driver,
|
|||||||
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
|
if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
||||||
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
|
||||||
else
|
} else {
|
||||||
qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK);
|
qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK |
|
||||||
|
JOB_MASK(QEMU_JOB_MIGRATION_OP));
|
||||||
|
}
|
||||||
|
|
||||||
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
||||||
|
|
||||||
|
@ -2451,6 +2451,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
|
|||||||
*/
|
*/
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case QEMU_JOB_MIGRATION_OP:
|
||||||
case QEMU_JOB_ASYNC:
|
case QEMU_JOB_ASYNC:
|
||||||
case QEMU_JOB_ASYNC_NESTED:
|
case QEMU_JOB_ASYNC_NESTED:
|
||||||
/* async job was already handled above */
|
/* async job was already handled above */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user