mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-22 05:35:25 +00:00
qemu: allow blkstat/blkinfo calls during migration
Originally most of libvirt domain-specific calls were blocking during a migration. A new mechanism to allow specific calls (blkstat/blkinfo) to be executed in such condition has been implemented. In the long term it'd be desirable to get a more general solution to mark further APIs as migration safe, without needing special case code. * src/qemu/qemu_migration.c: add some additional job signal flags for doing blkstat/blkinfo during a migration * src/qemu/qemu_domain.c: add a condition variable that can be used to efficiently wait for the migration code to clear the signal flag * src/qemu/qemu_driver.c: execute blkstat/blkinfo using the job signal flags during migration
This commit is contained in:
parent
fffea7fed7
commit
18c2a59206
1
.mailmap
1
.mailmap
@ -20,3 +20,4 @@
|
||||
<cfergeau@redhat.com> <teuf@gnome.org>
|
||||
<wency@cn.fujitsu.com> <wency cn fujitsu com>
|
||||
<cardoe@cardoe.com> <cardoe@gentoo.org>
|
||||
<fsimonce@redhat.com> <federico.simoncelli@gmail.com>
|
||||
|
2
AUTHORS
2
AUTHORS
@ -83,7 +83,7 @@ Patches have also been contributed by:
|
||||
Abel Míguez Rodríguez<amiguezr@pdi.ucm.es>
|
||||
Doug Goldstein <cardoe@cardoe.com>
|
||||
Javier Fontan <jfontan@gmail.com>
|
||||
Federico Simoncelli <federico.simoncelli@gmail.com>
|
||||
Federico Simoncelli <fsimonce@redhat.com>
|
||||
Amy Griffis <amy.griffis@hp.com>
|
||||
Henrik Persson E <henrik.e.persson@ericsson.com>
|
||||
Satoru SATOH <satoru.satoh@gmail.com>
|
||||
|
@ -89,7 +89,19 @@ static void *qemuDomainObjPrivateAlloc(void)
|
||||
if (VIR_ALLOC(priv) < 0)
|
||||
return NULL;
|
||||
|
||||
if (virCondInit(&priv->jobCond) < 0)
|
||||
goto initfail;
|
||||
|
||||
if (virCondInit(&priv->signalCond) < 0) {
|
||||
ignore_value(virCondDestroy(&priv->jobCond));
|
||||
goto initfail;
|
||||
}
|
||||
|
||||
return priv;
|
||||
|
||||
initfail:
|
||||
VIR_FREE(priv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void qemuDomainObjPrivateFree(void *data)
|
||||
@ -101,6 +113,8 @@ static void qemuDomainObjPrivateFree(void *data)
|
||||
qemuDomainPCIAddressSetFree(priv->pciaddrs);
|
||||
virDomainChrSourceDefFree(priv->monConfig);
|
||||
VIR_FREE(priv->vcpupids);
|
||||
ignore_value(virCondDestroy(&priv->jobCond));
|
||||
ignore_value(virCondDestroy(&priv->signalCond));
|
||||
|
||||
/* This should never be non-NULL if we get here, but just in case... */
|
||||
if (priv->mon) {
|
||||
|
@ -47,11 +47,19 @@ enum qemuDomainJobSignals {
|
||||
QEMU_JOB_SIGNAL_SUSPEND = 1 << 1, /* Request VM suspend to finish live migration offline */
|
||||
QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME = 1 << 2, /* Request migration downtime change */
|
||||
QEMU_JOB_SIGNAL_MIGRATE_SPEED = 1 << 3, /* Request migration speed change */
|
||||
QEMU_JOB_SIGNAL_BLKSTAT = 1 << 4, /* Request blkstat during migration */
|
||||
QEMU_JOB_SIGNAL_BLKINFO = 1 << 5, /* Request blkinfo during migration */
|
||||
};
|
||||
|
||||
struct qemuDomainJobSignalsData {
|
||||
unsigned long long migrateDowntime; /* Data for QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME */
|
||||
unsigned long migrateBandwidth; /* Data for QEMU_JOB_SIGNAL_MIGRATE_SPEED */
|
||||
char *statDevName; /* Device name used by blkstat calls */
|
||||
virDomainBlockStatsPtr blockStat; /* Block statistics for QEMU_JOB_SIGNAL_BLKSTAT */
|
||||
int *statRetCode; /* Return code for the blkstat calls */
|
||||
char *infoDevName; /* Device name used by blkinfo calls */
|
||||
virDomainBlockInfoPtr blockInfo; /* Block information for QEMU_JOB_SIGNAL_BLKINFO */
|
||||
int *infoRetCode; /* Return code for the blkinfo calls */
|
||||
};
|
||||
|
||||
typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet;
|
||||
@ -61,6 +69,7 @@ typedef struct _qemuDomainObjPrivate qemuDomainObjPrivate;
|
||||
typedef qemuDomainObjPrivate *qemuDomainObjPrivatePtr;
|
||||
struct _qemuDomainObjPrivate {
|
||||
virCond jobCond; /* Use in conjunction with main virDomainObjPtr lock */
|
||||
virCond signalCond; /* Use to coordinate the safe queries during migration */
|
||||
enum qemuDomainJob jobActive; /* Currently running job */
|
||||
unsigned int jobSignals; /* Signals for running job */
|
||||
struct qemuDomainJobSignalsData jobSignalsData; /* Signal specific data */
|
||||
|
@ -5278,15 +5278,6 @@ qemudDomainBlockStats (virDomainPtr dom,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (qemuDomainObjBeginJob(vm) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not running"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < vm->def->ndisks ; i++) {
|
||||
if (STREQ(path, vm->def->disks[i]->dst)) {
|
||||
disk = vm->def->disks[i];
|
||||
@ -5297,16 +5288,42 @@ qemudDomainBlockStats (virDomainPtr dom,
|
||||
if (!disk) {
|
||||
qemuReportError(VIR_ERR_INVALID_ARG,
|
||||
_("invalid path: %s"), path);
|
||||
goto endjob;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (!disk->info.alias) {
|
||||
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("missing disk device alias name for %s"), disk->dst);
|
||||
goto endjob;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
priv = vm->privateData;
|
||||
if ((priv->jobActive == QEMU_JOB_MIGRATION_OUT)
|
||||
|| (priv->jobActive == QEMU_JOB_SAVE)) {
|
||||
virDomainObjRef(vm);
|
||||
while (priv->jobSignals & QEMU_JOB_SIGNAL_BLKSTAT)
|
||||
ignore_value(virCondWait(&priv->signalCond, &vm->lock));
|
||||
|
||||
priv->jobSignalsData.statDevName = disk->info.alias;
|
||||
priv->jobSignalsData.blockStat = stats;
|
||||
priv->jobSignalsData.statRetCode = &ret;
|
||||
priv->jobSignals |= QEMU_JOB_SIGNAL_BLKSTAT;
|
||||
|
||||
while (priv->jobSignals & QEMU_JOB_SIGNAL_BLKSTAT)
|
||||
ignore_value(virCondWait(&priv->signalCond, &vm->lock));
|
||||
|
||||
if (virDomainObjUnref(vm) == 0)
|
||||
vm = NULL;
|
||||
} else {
|
||||
if (qemuDomainObjBeginJob(vm) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not running"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
qemuDomainObjEnterMonitor(vm);
|
||||
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
|
||||
disk->info.alias,
|
||||
@ -5320,6 +5337,7 @@ qemudDomainBlockStats (virDomainPtr dom,
|
||||
endjob:
|
||||
if (qemuDomainObjEndJob(vm) == 0)
|
||||
vm = NULL;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (vm)
|
||||
@ -5725,20 +5743,43 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
|
||||
format != VIR_STORAGE_FILE_RAW &&
|
||||
S_ISBLK(sb.st_mode)) {
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
|
||||
if ((priv->jobActive == QEMU_JOB_MIGRATION_OUT)
|
||||
|| (priv->jobActive == QEMU_JOB_SAVE)) {
|
||||
virDomainObjRef(vm);
|
||||
while (priv->jobSignals & QEMU_JOB_SIGNAL_BLKINFO)
|
||||
ignore_value(virCondWait(&priv->signalCond, &vm->lock));
|
||||
|
||||
priv->jobSignalsData.infoDevName = disk->info.alias;
|
||||
priv->jobSignalsData.blockInfo = info;
|
||||
priv->jobSignalsData.infoRetCode = &ret;
|
||||
priv->jobSignals |= QEMU_JOB_SIGNAL_BLKINFO;
|
||||
|
||||
while (priv->jobSignals & QEMU_JOB_SIGNAL_BLKINFO)
|
||||
ignore_value(virCondWait(&priv->signalCond, &vm->lock));
|
||||
|
||||
if (virDomainObjUnref(vm) == 0)
|
||||
vm = NULL;
|
||||
} else {
|
||||
if (qemuDomainObjBeginJob(vm) < 0)
|
||||
goto cleanup;
|
||||
if (!virDomainObjIsActive(vm))
|
||||
ret = 0;
|
||||
else {
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not running"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
qemuDomainObjEnterMonitor(vm);
|
||||
ret = qemuMonitorGetBlockExtent(priv->mon,
|
||||
disk->info.alias,
|
||||
&info->allocation);
|
||||
qemuDomainObjExitMonitor(vm);
|
||||
}
|
||||
|
||||
endjob:
|
||||
if (qemuDomainObjEndJob(vm) == 0)
|
||||
vm = NULL;
|
||||
}
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
@ -6637,8 +6678,8 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom,
|
||||
}
|
||||
|
||||
VIR_DEBUG("Requesting migration downtime change to %llums", downtime);
|
||||
priv->jobSignals |= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
|
||||
priv->jobSignalsData.migrateDowntime = downtime;
|
||||
priv->jobSignals |= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
@ -6686,8 +6727,8 @@ qemuDomainMigrateSetMaxSpeed(virDomainPtr dom,
|
||||
}
|
||||
|
||||
VIR_DEBUG("Requesting migration speed change to %luMbs", bandwidth);
|
||||
priv->jobSignals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
||||
priv->jobSignalsData.migrateBandwidth = bandwidth;
|
||||
priv->jobSignals |= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
|
@ -648,7 +648,8 @@ qemuMigrationSetOffline(struct qemud_driver *driver,
|
||||
static int
|
||||
qemuMigrationProcessJobSignals(struct qemud_driver *driver,
|
||||
virDomainObjPtr vm,
|
||||
const char *job)
|
||||
const char *job,
|
||||
bool cleanup)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
int ret = -1;
|
||||
@ -656,6 +657,8 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
|
||||
job, _("guest unexpectedly quit"));
|
||||
if (cleanup)
|
||||
priv->jobSignals = 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -695,6 +698,34 @@ qemuMigrationProcessJobSignals(struct qemud_driver *driver,
|
||||
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
||||
if (ret < 0)
|
||||
VIR_WARN("Unable to set migration speed");
|
||||
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_BLKSTAT) {
|
||||
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
||||
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
|
||||
priv->jobSignalsData.statDevName,
|
||||
&priv->jobSignalsData.blockStat->rd_req,
|
||||
&priv->jobSignalsData.blockStat->rd_bytes,
|
||||
&priv->jobSignalsData.blockStat->wr_req,
|
||||
&priv->jobSignalsData.blockStat->wr_bytes,
|
||||
&priv->jobSignalsData.blockStat->errs);
|
||||
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
||||
|
||||
*priv->jobSignalsData.statRetCode = ret;
|
||||
priv->jobSignals ^= QEMU_JOB_SIGNAL_BLKSTAT;
|
||||
|
||||
if (ret < 0)
|
||||
VIR_WARN("Unable to get block statistics");
|
||||
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_BLKINFO) {
|
||||
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
||||
ret = qemuMonitorGetBlockExtent(priv->mon,
|
||||
priv->jobSignalsData.infoDevName,
|
||||
&priv->jobSignalsData.blockInfo->allocation);
|
||||
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
||||
|
||||
*priv->jobSignalsData.infoRetCode = ret;
|
||||
priv->jobSignals ^= QEMU_JOB_SIGNAL_BLKINFO;
|
||||
|
||||
if (ret < 0)
|
||||
VIR_WARN("Unable to get block information");
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
@ -788,12 +819,6 @@ int
|
||||
qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
|
||||
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
||||
|
||||
while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
|
||||
/* Poll every 50ms for progress & to allow cancellation */
|
||||
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
||||
const char *job;
|
||||
|
||||
switch (priv->jobActive) {
|
||||
@ -810,8 +835,17 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
|
||||
job = _("job");
|
||||
}
|
||||
|
||||
if (qemuMigrationProcessJobSignals(driver, vm, job) < 0)
|
||||
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
||||
|
||||
while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
|
||||
/* Poll every 50ms for progress & to allow cancellation */
|
||||
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
||||
while (priv->jobSignals) {
|
||||
if (qemuMigrationProcessJobSignals(driver, vm, job, false) < 0)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
virCondSignal(&priv->signalCond);
|
||||
|
||||
if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0)
|
||||
goto cleanup;
|
||||
@ -827,6 +861,11 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
|
||||
}
|
||||
|
||||
cleanup:
|
||||
while (priv->jobSignals) {
|
||||
qemuMigrationProcessJobSignals(driver, vm, job, true);
|
||||
}
|
||||
virCondBroadcast(&priv->signalCond);
|
||||
|
||||
if (priv->jobInfo.type == VIR_DOMAIN_JOB_COMPLETED)
|
||||
return 0;
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user