mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-01-21 20:15:17 +00:00
virdomainjob: make drivers use job object in the domain object
This patch uses the job object directly in the domain object and removes the job object from private data of all drivers that use it as well as other relevant code (initializing and freeing the structure). Signed-off-by: Kristina Hanicova <khanicov@redhat.com> Signed-off-by: Ján Tomko <jtomko@redhat.com> Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
parent
84e9fd068c
commit
0150f7a8c1
@ -44,7 +44,6 @@ VIR_LOG_INIT("ch.ch_domain");
|
||||
int
|
||||
virCHDomainObjBeginJob(virDomainObj *obj, virDomainJob job)
|
||||
{
|
||||
virCHDomainObjPrivate *priv = obj->privateData;
|
||||
unsigned long long now;
|
||||
unsigned long long then;
|
||||
|
||||
@ -52,16 +51,16 @@ virCHDomainObjBeginJob(virDomainObj *obj, virDomainJob job)
|
||||
return -1;
|
||||
then = now + CH_JOB_WAIT_TIME;
|
||||
|
||||
while (priv->job.active) {
|
||||
while (obj->job->active) {
|
||||
VIR_DEBUG("Wait normal job condition for starting job: %s",
|
||||
virDomainJobTypeToString(job));
|
||||
if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0) {
|
||||
if (virCondWaitUntil(&obj->job->cond, &obj->parent.lock, then) < 0) {
|
||||
VIR_WARN("Cannot start job (%s) for domain %s;"
|
||||
" current job is (%s) owned by (%llu)",
|
||||
virDomainJobTypeToString(job),
|
||||
obj->def->name,
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
priv->job.owner);
|
||||
virDomainJobTypeToString(obj->job->active),
|
||||
obj->job->owner);
|
||||
|
||||
if (errno == ETIMEDOUT)
|
||||
virReportError(VIR_ERR_OPERATION_TIMEOUT,
|
||||
@ -73,11 +72,11 @@ virCHDomainObjBeginJob(virDomainObj *obj, virDomainJob job)
|
||||
}
|
||||
}
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virDomainObjResetJob(obj->job);
|
||||
|
||||
VIR_DEBUG("Starting job: %s", virDomainJobTypeToString(job));
|
||||
priv->job.active = job;
|
||||
priv->job.owner = virThreadSelfID();
|
||||
obj->job->active = job;
|
||||
obj->job->owner = virThreadSelfID();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -91,14 +90,13 @@ virCHDomainObjBeginJob(virDomainObj *obj, virDomainJob job)
|
||||
void
|
||||
virCHDomainObjEndJob(virDomainObj *obj)
|
||||
{
|
||||
virCHDomainObjPrivate *priv = obj->privateData;
|
||||
virDomainJob job = priv->job.active;
|
||||
virDomainJob job = obj->job->active;
|
||||
|
||||
VIR_DEBUG("Stopping job: %s",
|
||||
virDomainJobTypeToString(job));
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virCondSignal(&priv->job.cond);
|
||||
virDomainObjResetJob(obj->job);
|
||||
virCondSignal(&obj->job->cond);
|
||||
}
|
||||
|
||||
void
|
||||
@ -117,13 +115,7 @@ virCHDomainObjPrivateAlloc(void *opaque)
|
||||
|
||||
priv = g_new0(virCHDomainObjPrivate, 1);
|
||||
|
||||
if (virDomainObjInitJob(&priv->job, NULL, NULL) < 0) {
|
||||
g_free(priv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(priv->chrdevs = virChrdevAlloc())) {
|
||||
virDomainObjClearJob(&priv->job);
|
||||
g_free(priv);
|
||||
return NULL;
|
||||
}
|
||||
@ -138,7 +130,6 @@ virCHDomainObjPrivateFree(void *data)
|
||||
virCHDomainObjPrivate *priv = data;
|
||||
|
||||
virChrdevFree(priv->chrdevs);
|
||||
virDomainObjClearJob(&priv->job);
|
||||
g_free(priv->machineName);
|
||||
g_free(priv);
|
||||
}
|
||||
|
@ -32,8 +32,6 @@
|
||||
|
||||
typedef struct _virCHDomainObjPrivate virCHDomainObjPrivate;
|
||||
struct _virCHDomainObjPrivate {
|
||||
virDomainJobObj job;
|
||||
|
||||
virChrdevs *chrdevs;
|
||||
virCHDriver *driver;
|
||||
virCHMonitor *monitor;
|
||||
|
@ -61,6 +61,7 @@
|
||||
#include "virdomaincheckpointobjlist.h"
|
||||
#include "virutil.h"
|
||||
#include "virsecureerase.h"
|
||||
#include "virdomainjob.h"
|
||||
|
||||
#define VIR_FROM_THIS VIR_FROM_DOMAIN
|
||||
|
||||
|
@ -60,7 +60,6 @@ libxlDomainObjBeginJob(libxlDriverPrivate *driver G_GNUC_UNUSED,
|
||||
virDomainObj *obj,
|
||||
virDomainJob job)
|
||||
{
|
||||
libxlDomainObjPrivate *priv = obj->privateData;
|
||||
unsigned long long now;
|
||||
unsigned long long then;
|
||||
|
||||
@ -68,19 +67,19 @@ libxlDomainObjBeginJob(libxlDriverPrivate *driver G_GNUC_UNUSED,
|
||||
return -1;
|
||||
then = now + LIBXL_JOB_WAIT_TIME;
|
||||
|
||||
while (priv->job.active) {
|
||||
while (obj->job->active) {
|
||||
VIR_DEBUG("Wait normal job condition for starting job: %s",
|
||||
virDomainJobTypeToString(job));
|
||||
if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0)
|
||||
if (virCondWaitUntil(&obj->job->cond, &obj->parent.lock, then) < 0)
|
||||
goto error;
|
||||
}
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virDomainObjResetJob(obj->job);
|
||||
|
||||
VIR_DEBUG("Starting job: %s", virDomainJobTypeToString(job));
|
||||
priv->job.active = job;
|
||||
priv->job.owner = virThreadSelfID();
|
||||
priv->job.started = now;
|
||||
obj->job->active = job;
|
||||
obj->job->owner = virThreadSelfID();
|
||||
obj->job->started = now;
|
||||
|
||||
return 0;
|
||||
|
||||
@ -89,8 +88,8 @@ libxlDomainObjBeginJob(libxlDriverPrivate *driver G_GNUC_UNUSED,
|
||||
" current job is (%s) owned by (%llu)",
|
||||
virDomainJobTypeToString(job),
|
||||
obj->def->name,
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
priv->job.owner);
|
||||
virDomainJobTypeToString(obj->job->active),
|
||||
obj->job->owner);
|
||||
|
||||
if (errno == ETIMEDOUT)
|
||||
virReportError(VIR_ERR_OPERATION_TIMEOUT,
|
||||
@ -116,14 +115,13 @@ void
|
||||
libxlDomainObjEndJob(libxlDriverPrivate *driver G_GNUC_UNUSED,
|
||||
virDomainObj *obj)
|
||||
{
|
||||
libxlDomainObjPrivate *priv = obj->privateData;
|
||||
virDomainJob job = priv->job.active;
|
||||
virDomainJob job = obj->job->active;
|
||||
|
||||
VIR_DEBUG("Stopping job: %s",
|
||||
virDomainJobTypeToString(job));
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virCondSignal(&priv->job.cond);
|
||||
virDomainObjResetJob(obj->job);
|
||||
virCondSignal(&obj->job->cond);
|
||||
}
|
||||
|
||||
int
|
||||
@ -158,12 +156,6 @@ libxlDomainObjPrivateAlloc(void *opaque G_GNUC_UNUSED)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (virDomainObjInitJob(&priv->job, NULL, NULL) < 0) {
|
||||
virChrdevFree(priv->devs);
|
||||
g_free(priv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return priv;
|
||||
}
|
||||
|
||||
@ -173,7 +165,6 @@ libxlDomainObjPrivateFree(void *data)
|
||||
libxlDomainObjPrivate *priv = data;
|
||||
|
||||
g_free(priv->lockState);
|
||||
virDomainObjClearJob(&priv->job);
|
||||
virChrdevFree(priv->devs);
|
||||
g_free(priv);
|
||||
}
|
||||
|
@ -37,8 +37,6 @@ struct _libxlDomainObjPrivate {
|
||||
char *lockState;
|
||||
bool lockProcessRunning;
|
||||
|
||||
virDomainJobObj job;
|
||||
|
||||
bool hookRun; /* true if there was a hook run over this domain */
|
||||
};
|
||||
|
||||
|
@ -5204,7 +5204,6 @@ static int
|
||||
libxlDomainGetJobInfo(virDomainPtr dom,
|
||||
virDomainJobInfoPtr info)
|
||||
{
|
||||
libxlDomainObjPrivate *priv;
|
||||
virDomainObj *vm;
|
||||
int ret = -1;
|
||||
unsigned long long timeElapsed = 0;
|
||||
@ -5215,8 +5214,7 @@ libxlDomainGetJobInfo(virDomainPtr dom,
|
||||
if (virDomainGetJobInfoEnsureACL(dom->conn, vm->def) < 0)
|
||||
goto cleanup;
|
||||
|
||||
priv = vm->privateData;
|
||||
if (!priv->job.active) {
|
||||
if (!vm->job->active) {
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->type = VIR_DOMAIN_JOB_NONE;
|
||||
ret = 0;
|
||||
@ -5226,7 +5224,7 @@ libxlDomainGetJobInfo(virDomainPtr dom,
|
||||
/* In libxl we don't have an estimated completion time
|
||||
* thus we always set to unbounded and update time
|
||||
* for the active job. */
|
||||
if (libxlDomainJobGetTimeElapsed(&priv->job, &timeElapsed) < 0)
|
||||
if (libxlDomainJobGetTimeElapsed(vm->job, &timeElapsed) < 0)
|
||||
goto cleanup;
|
||||
|
||||
/* setting only these two attributes is enough because libxl never sets
|
||||
@ -5248,7 +5246,6 @@ libxlDomainGetJobStats(virDomainPtr dom,
|
||||
int *nparams,
|
||||
unsigned int flags)
|
||||
{
|
||||
libxlDomainObjPrivate *priv;
|
||||
virDomainObj *vm;
|
||||
int ret = -1;
|
||||
int maxparams = 0;
|
||||
@ -5263,8 +5260,7 @@ libxlDomainGetJobStats(virDomainPtr dom,
|
||||
if (virDomainGetJobStatsEnsureACL(dom->conn, vm->def) < 0)
|
||||
goto cleanup;
|
||||
|
||||
priv = vm->privateData;
|
||||
if (!priv->job.active) {
|
||||
if (!vm->job->active) {
|
||||
*type = VIR_DOMAIN_JOB_NONE;
|
||||
*params = NULL;
|
||||
*nparams = 0;
|
||||
@ -5275,7 +5271,7 @@ libxlDomainGetJobStats(virDomainPtr dom,
|
||||
/* In libxl we don't have an estimated completion time
|
||||
* thus we always set to unbounded and update time
|
||||
* for the active job. */
|
||||
if (libxlDomainJobGetTimeElapsed(&priv->job, &timeElapsed) < 0)
|
||||
if (libxlDomainJobGetTimeElapsed(vm->job, &timeElapsed) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (virTypedParamsAddULLong(params, nparams, &maxparams,
|
||||
|
@ -52,7 +52,6 @@ virLXCDomainObjBeginJob(virLXCDriver *driver G_GNUC_UNUSED,
|
||||
virDomainObj *obj,
|
||||
virDomainJob job)
|
||||
{
|
||||
virLXCDomainObjPrivate *priv = obj->privateData;
|
||||
unsigned long long now;
|
||||
unsigned long long then;
|
||||
|
||||
@ -60,18 +59,18 @@ virLXCDomainObjBeginJob(virLXCDriver *driver G_GNUC_UNUSED,
|
||||
return -1;
|
||||
then = now + LXC_JOB_WAIT_TIME;
|
||||
|
||||
while (priv->job.active) {
|
||||
while (obj->job->active) {
|
||||
VIR_DEBUG("Wait normal job condition for starting job: %s",
|
||||
virDomainJobTypeToString(job));
|
||||
if (virCondWaitUntil(&priv->job.cond, &obj->parent.lock, then) < 0)
|
||||
if (virCondWaitUntil(&obj->job->cond, &obj->parent.lock, then) < 0)
|
||||
goto error;
|
||||
}
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virDomainObjResetJob(obj->job);
|
||||
|
||||
VIR_DEBUG("Starting job: %s", virDomainJobTypeToString(job));
|
||||
priv->job.active = job;
|
||||
priv->job.owner = virThreadSelfID();
|
||||
obj->job->active = job;
|
||||
obj->job->owner = virThreadSelfID();
|
||||
|
||||
return 0;
|
||||
|
||||
@ -80,8 +79,8 @@ virLXCDomainObjBeginJob(virLXCDriver *driver G_GNUC_UNUSED,
|
||||
" current job is (%s) owned by (%llu)",
|
||||
virDomainJobTypeToString(job),
|
||||
obj->def->name,
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
priv->job.owner);
|
||||
virDomainJobTypeToString(obj->job->active),
|
||||
obj->job->owner);
|
||||
|
||||
if (errno == ETIMEDOUT)
|
||||
virReportError(VIR_ERR_OPERATION_TIMEOUT,
|
||||
@ -103,14 +102,13 @@ void
|
||||
virLXCDomainObjEndJob(virLXCDriver *driver G_GNUC_UNUSED,
|
||||
virDomainObj *obj)
|
||||
{
|
||||
virLXCDomainObjPrivate *priv = obj->privateData;
|
||||
virDomainJob job = priv->job.active;
|
||||
virDomainJob job = obj->job->active;
|
||||
|
||||
VIR_DEBUG("Stopping job: %s",
|
||||
virDomainJobTypeToString(job));
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virCondSignal(&priv->job.cond);
|
||||
virDomainObjResetJob(obj->job);
|
||||
virCondSignal(&obj->job->cond);
|
||||
}
|
||||
|
||||
|
||||
@ -119,11 +117,6 @@ virLXCDomainObjPrivateAlloc(void *opaque)
|
||||
{
|
||||
virLXCDomainObjPrivate *priv = g_new0(virLXCDomainObjPrivate, 1);
|
||||
|
||||
if (virDomainObjInitJob(&priv->job, NULL, NULL) < 0) {
|
||||
g_free(priv);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
priv->driver = opaque;
|
||||
|
||||
return priv;
|
||||
@ -136,7 +129,6 @@ virLXCDomainObjPrivateFree(void *data)
|
||||
virLXCDomainObjPrivate *priv = data;
|
||||
|
||||
virCgroupFree(priv->cgroup);
|
||||
virDomainObjClearJob(&priv->job);
|
||||
g_free(priv);
|
||||
}
|
||||
|
||||
|
@ -66,8 +66,6 @@ struct _virLXCDomainObjPrivate {
|
||||
|
||||
virCgroup *cgroup;
|
||||
char *machineName;
|
||||
|
||||
virDomainJobObj job;
|
||||
};
|
||||
|
||||
extern virXMLNamespace virLXCDriverDomainXMLNamespace;
|
||||
|
@ -594,30 +594,30 @@ qemuBackupJobTerminate(virDomainObj *vm,
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->job.current) {
|
||||
if (vm->job->current) {
|
||||
qemuDomainJobDataPrivate *privData = NULL;
|
||||
|
||||
qemuDomainJobDataUpdateTime(priv->job.current);
|
||||
qemuDomainJobDataUpdateTime(vm->job->current);
|
||||
|
||||
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
|
||||
priv->job.completed = virDomainJobDataCopy(priv->job.current);
|
||||
g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
|
||||
vm->job->completed = virDomainJobDataCopy(vm->job->current);
|
||||
|
||||
privData = priv->job.completed->privateData;
|
||||
privData = vm->job->completed->privateData;
|
||||
|
||||
privData->stats.backup.total = priv->backup->push_total;
|
||||
privData->stats.backup.transferred = priv->backup->push_transferred;
|
||||
privData->stats.backup.tmp_used = priv->backup->pull_tmp_used;
|
||||
privData->stats.backup.tmp_total = priv->backup->pull_tmp_total;
|
||||
|
||||
priv->job.completed->status = jobstatus;
|
||||
priv->job.completed->errmsg = g_strdup(priv->backup->errmsg);
|
||||
vm->job->completed->status = jobstatus;
|
||||
vm->job->completed->errmsg = g_strdup(priv->backup->errmsg);
|
||||
|
||||
qemuDomainEventEmitJobCompleted(priv->driver, vm);
|
||||
}
|
||||
|
||||
g_clear_pointer(&priv->backup, virDomainBackupDefFree);
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_BACKUP)
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_BACKUP)
|
||||
qemuDomainObjEndAsyncJob(vm);
|
||||
}
|
||||
|
||||
@ -793,7 +793,7 @@ qemuBackupBegin(virDomainObj *vm,
|
||||
qemuDomainObjSetAsyncJobMask(vm, (VIR_JOB_DEFAULT_MASK |
|
||||
JOB_MASK(VIR_JOB_SUSPEND) |
|
||||
JOB_MASK(VIR_JOB_MODIFY)));
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
qemuDomainJobSetStatsType(vm->job->current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_BACKUP);
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
|
@ -1272,14 +1272,18 @@ virDomainXMLOption *
|
||||
virQEMUDriverCreateXMLConf(virQEMUDriver *driver,
|
||||
const char *defsecmodel)
|
||||
{
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
|
||||
virQEMUDriverDomainDefParserConfig.priv = driver;
|
||||
virQEMUDriverDomainDefParserConfig.defSecModel = defsecmodel;
|
||||
virQEMUDriverDomainJobConfig.maxQueuedJobs = cfg->maxQueuedJobs;
|
||||
|
||||
return virDomainXMLOptionNew(&virQEMUDriverDomainDefParserConfig,
|
||||
&virQEMUDriverPrivateDataCallbacks,
|
||||
&virQEMUDriverDomainXMLNamespace,
|
||||
&virQEMUDriverDomainABIStability,
|
||||
&virQEMUDriverDomainSaveCookie,
|
||||
NULL);
|
||||
&virQEMUDriverDomainJobConfig);
|
||||
}
|
||||
|
||||
|
||||
|
@ -278,7 +278,7 @@ qemuDomainObjPrivateXMLParseJobNBD(virDomainObj *vm,
|
||||
return -1;
|
||||
|
||||
if (n > 0) {
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
VIR_WARN("Found disks marked for migration but we were not "
|
||||
"migrating");
|
||||
n = 0;
|
||||
@ -359,14 +359,46 @@ qemuDomainParseJobPrivate(xmlXPathContextPtr ctxt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
qemuJobDataAllocPrivateData(void)
|
||||
{
|
||||
return g_new0(qemuDomainJobDataPrivate, 1);
|
||||
}
|
||||
|
||||
static virDomainObjPrivateJobCallbacks qemuPrivateJobCallbacks = {
|
||||
.allocJobPrivate = qemuJobAllocPrivate,
|
||||
.freeJobPrivate = qemuJobFreePrivate,
|
||||
.resetJobPrivate = qemuJobResetPrivate,
|
||||
.formatJobPrivate = qemuDomainFormatJobPrivate,
|
||||
.parseJobPrivate = qemuDomainParseJobPrivate,
|
||||
.saveStatusPrivate = qemuDomainSaveStatus,
|
||||
|
||||
static void *
|
||||
qemuJobDataCopyPrivateData(void *data)
|
||||
{
|
||||
qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1);
|
||||
|
||||
memcpy(ret, data, sizeof(qemuDomainJobDataPrivate));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
qemuJobDataFreePrivateData(void *data)
|
||||
{
|
||||
g_free(data);
|
||||
}
|
||||
|
||||
|
||||
virDomainJobObjConfig virQEMUDriverDomainJobConfig = {
|
||||
.cb = {
|
||||
.allocJobPrivate = qemuJobAllocPrivate,
|
||||
.freeJobPrivate = qemuJobFreePrivate,
|
||||
.resetJobPrivate = qemuJobResetPrivate,
|
||||
.formatJobPrivate = qemuDomainFormatJobPrivate,
|
||||
.parseJobPrivate = qemuDomainParseJobPrivate,
|
||||
.saveStatusPrivate = qemuDomainSaveStatus,
|
||||
},
|
||||
.jobDataPrivateCb = {
|
||||
.allocPrivateData = qemuJobDataAllocPrivateData,
|
||||
.copyPrivateData = qemuJobDataCopyPrivateData,
|
||||
.freePrivateData = qemuJobDataFreePrivateData,
|
||||
},
|
||||
.maxQueuedJobs = 0,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1719,7 +1751,6 @@ qemuDomainObjPrivateFree(void *data)
|
||||
qemuDomainObjPrivateDataClear(priv);
|
||||
|
||||
virObjectUnref(priv->monConfig);
|
||||
virDomainObjClearJob(&priv->job);
|
||||
g_free(priv->lockState);
|
||||
g_free(priv->origname);
|
||||
|
||||
@ -1757,22 +1788,12 @@ static void *
|
||||
qemuDomainObjPrivateAlloc(void *opaque)
|
||||
{
|
||||
g_autoptr(qemuDomainObjPrivate) priv = g_new0(qemuDomainObjPrivate, 1);
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(opaque);
|
||||
|
||||
if (virDomainObjInitJob(&priv->job, &qemuPrivateJobCallbacks,
|
||||
&qemuJobDataPrivateDataCallbacks) < 0) {
|
||||
virReportSystemError(errno, "%s",
|
||||
_("Unable to init qemu driver mutexes"));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!(priv->devs = virChrdevAlloc()))
|
||||
return NULL;
|
||||
|
||||
priv->blockjobs = virHashNew(virObjectUnref);
|
||||
|
||||
priv->job.maxQueuedJobs = cfg->maxQueuedJobs;
|
||||
|
||||
/* agent commands block by default, user can choose different behavior */
|
||||
priv->agentTimeout = VIR_DOMAIN_AGENT_RESPONSE_TIMEOUT_BLOCK;
|
||||
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
|
||||
@ -5955,14 +5976,14 @@ qemuDomainObjEnterMonitorInternal(virDomainObj *obj,
|
||||
qemuDomainObjEndJob(obj);
|
||||
return -1;
|
||||
}
|
||||
} else if (priv->job.asyncOwner == virThreadSelfID()) {
|
||||
} else if (obj->job->asyncOwner == virThreadSelfID()) {
|
||||
VIR_WARN("This thread seems to be the async job owner; entering"
|
||||
" monitor without asking for a nested job is dangerous");
|
||||
} else if (priv->job.owner != virThreadSelfID()) {
|
||||
} else if (obj->job->owner != virThreadSelfID()) {
|
||||
VIR_WARN("Entering a monitor without owning a job. "
|
||||
"Job %s owner %s (%llu)",
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
priv->job.ownerAPI, priv->job.owner);
|
||||
virDomainJobTypeToString(obj->job->active),
|
||||
obj->job->ownerAPI, obj->job->owner);
|
||||
}
|
||||
|
||||
VIR_DEBUG("Entering monitor (mon=%p vm=%p name=%s)",
|
||||
@ -6001,7 +6022,7 @@ qemuDomainObjExitMonitor(virDomainObj *obj)
|
||||
if (!hasRefs)
|
||||
priv->mon = NULL;
|
||||
|
||||
if (priv->job.active == VIR_JOB_ASYNC_NESTED)
|
||||
if (obj->job->active == VIR_JOB_ASYNC_NESTED)
|
||||
qemuDomainObjEndJob(obj);
|
||||
}
|
||||
|
||||
|
@ -99,8 +99,6 @@ typedef struct _qemuDomainObjPrivate qemuDomainObjPrivate;
|
||||
struct _qemuDomainObjPrivate {
|
||||
virQEMUDriver *driver;
|
||||
|
||||
virDomainJobObj job;
|
||||
|
||||
virBitmap *namespaces;
|
||||
|
||||
virEventThread *eventThread;
|
||||
@ -775,6 +773,7 @@ extern virXMLNamespace virQEMUDriverDomainXMLNamespace;
|
||||
extern virDomainDefParserConfig virQEMUDriverDomainDefParserConfig;
|
||||
extern virDomainABIStability virQEMUDriverDomainABIStability;
|
||||
extern virSaveCookieCallbacks virQEMUDriverDomainSaveCookie;
|
||||
extern virDomainJobObjConfig virQEMUDriverDomainJobConfig;
|
||||
|
||||
int qemuDomainUpdateDeviceList(virDomainObj *vm, int asyncJob);
|
||||
|
||||
|
@ -31,38 +31,6 @@
|
||||
|
||||
VIR_LOG_INIT("qemu.qemu_domainjob");
|
||||
|
||||
static void *
|
||||
qemuJobDataAllocPrivateData(void)
|
||||
{
|
||||
return g_new0(qemuDomainJobDataPrivate, 1);
|
||||
}
|
||||
|
||||
|
||||
static void *
|
||||
qemuJobDataCopyPrivateData(void *data)
|
||||
{
|
||||
qemuDomainJobDataPrivate *ret = g_new0(qemuDomainJobDataPrivate, 1);
|
||||
|
||||
memcpy(ret, data, sizeof(qemuDomainJobDataPrivate));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
qemuJobDataFreePrivateData(void *data)
|
||||
{
|
||||
g_free(data);
|
||||
}
|
||||
|
||||
|
||||
virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks = {
|
||||
.allocPrivateData = qemuJobDataAllocPrivateData,
|
||||
.copyPrivateData = qemuJobDataCopyPrivateData,
|
||||
.freePrivateData = qemuJobDataFreePrivateData,
|
||||
};
|
||||
|
||||
|
||||
void
|
||||
qemuDomainJobSetStatsType(virDomainJobData *jobData,
|
||||
qemuDomainJobStatsType type)
|
||||
@ -130,16 +98,15 @@ void
|
||||
qemuDomainEventEmitJobCompleted(virQEMUDriver *driver,
|
||||
virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virObjectEvent *event;
|
||||
virTypedParameterPtr params = NULL;
|
||||
int nparams = 0;
|
||||
int type;
|
||||
|
||||
if (!priv->job.completed)
|
||||
if (!vm->job->completed)
|
||||
return;
|
||||
|
||||
if (qemuDomainJobDataToParams(priv->job.completed, &type,
|
||||
if (qemuDomainJobDataToParams(vm->job->completed, &type,
|
||||
¶ms, &nparams) < 0) {
|
||||
VIR_WARN("Could not get stats for completed job; domain %s",
|
||||
vm->def->name);
|
||||
@ -160,8 +127,7 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
|
||||
virDomainJobStatus status,
|
||||
unsigned long long allowedJobs)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobObj *job = &priv->job;
|
||||
virDomainJobObj *job = vm->job;
|
||||
|
||||
VIR_DEBUG("Restoring %s async job for domain %s",
|
||||
virDomainAsyncJobTypeToString(asyncJob), vm->def->name);
|
||||
@ -177,8 +143,8 @@ qemuDomainObjRestoreAsyncJob(virDomainObj *vm,
|
||||
|
||||
qemuDomainObjSetAsyncJobMask(vm, allowedJobs);
|
||||
|
||||
job->current = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
|
||||
qemuDomainJobSetStatsType(priv->job.current, statsType);
|
||||
job->current = virDomainJobDataInit(&virQEMUDriverDomainJobConfig.jobDataPrivateCb);
|
||||
qemuDomainJobSetStatsType(vm->job->current, statsType);
|
||||
job->current->operation = operation;
|
||||
job->current->status = status;
|
||||
job->current->started = started;
|
||||
@ -603,25 +569,24 @@ void
|
||||
qemuDomainObjSetJobPhase(virDomainObj *obj,
|
||||
int phase)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
unsigned long long me = virThreadSelfID();
|
||||
|
||||
if (!priv->job.asyncJob)
|
||||
if (!obj->job->asyncJob)
|
||||
return;
|
||||
|
||||
VIR_DEBUG("Setting '%s' phase to '%s'",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
qemuDomainAsyncJobPhaseToString(obj->job->asyncJob, phase));
|
||||
|
||||
if (priv->job.asyncOwner != 0 &&
|
||||
priv->job.asyncOwner != me) {
|
||||
if (obj->job->asyncOwner != 0 &&
|
||||
obj->job->asyncOwner != me) {
|
||||
VIR_WARN("'%s' async job is owned by thread %llu, API '%s'",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
priv->job.asyncOwner,
|
||||
NULLSTR(priv->job.asyncOwnerAPI));
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj->job->asyncOwner,
|
||||
NULLSTR(obj->job->asyncOwnerAPI));
|
||||
}
|
||||
|
||||
priv->job.phase = phase;
|
||||
obj->job->phase = phase;
|
||||
qemuDomainSaveStatus(obj);
|
||||
}
|
||||
|
||||
@ -634,26 +599,25 @@ void
|
||||
qemuDomainObjStartJobPhase(virDomainObj *obj,
|
||||
int phase)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
unsigned long long me = virThreadSelfID();
|
||||
|
||||
if (!priv->job.asyncJob)
|
||||
if (!obj->job->asyncJob)
|
||||
return;
|
||||
|
||||
VIR_DEBUG("Starting phase '%s' of '%s' job",
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
qemuDomainAsyncJobPhaseToString(obj->job->asyncJob, phase),
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob));
|
||||
|
||||
if (priv->job.asyncOwner == 0) {
|
||||
priv->job.asyncOwnerAPI = g_strdup(virThreadJobGet());
|
||||
} else if (me != priv->job.asyncOwner) {
|
||||
if (obj->job->asyncOwner == 0) {
|
||||
obj->job->asyncOwnerAPI = g_strdup(virThreadJobGet());
|
||||
} else if (me != obj->job->asyncOwner) {
|
||||
VIR_WARN("'%s' async job is owned by thread %llu, API '%s'",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
priv->job.asyncOwner,
|
||||
NULLSTR(priv->job.asyncOwnerAPI));
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj->job->asyncOwner,
|
||||
NULLSTR(obj->job->asyncOwnerAPI));
|
||||
}
|
||||
|
||||
priv->job.asyncOwner = me;
|
||||
obj->job->asyncOwner = me;
|
||||
qemuDomainObjSetJobPhase(obj, phase);
|
||||
}
|
||||
|
||||
@ -662,39 +626,33 @@ void
|
||||
qemuDomainObjSetAsyncJobMask(virDomainObj *obj,
|
||||
unsigned long long allowedJobs)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (!priv->job.asyncJob)
|
||||
if (!obj->job->asyncJob)
|
||||
return;
|
||||
|
||||
priv->job.mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
|
||||
obj->job->mask = allowedJobs | JOB_MASK(VIR_JOB_DESTROY);
|
||||
}
|
||||
|
||||
void
|
||||
qemuDomainObjDiscardAsyncJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (priv->job.active == VIR_JOB_ASYNC_NESTED)
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virDomainObjResetAsyncJob(&priv->job);
|
||||
if (obj->job->active == VIR_JOB_ASYNC_NESTED)
|
||||
virDomainObjResetJob(obj->job);
|
||||
virDomainObjResetAsyncJob(obj->job);
|
||||
qemuDomainSaveStatus(obj);
|
||||
}
|
||||
|
||||
void
|
||||
qemuDomainObjReleaseAsyncJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
VIR_DEBUG("Releasing ownership of '%s' async job",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob));
|
||||
|
||||
if (priv->job.asyncOwner != virThreadSelfID()) {
|
||||
if (obj->job->asyncOwner != virThreadSelfID()) {
|
||||
VIR_WARN("'%s' async job is owned by thread %llu",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
priv->job.asyncOwner);
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj->job->asyncOwner);
|
||||
}
|
||||
priv->job.asyncOwner = 0;
|
||||
obj->job->asyncOwner = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -708,9 +666,7 @@ qemuDomainObjReleaseAsyncJob(virDomainObj *obj)
|
||||
int qemuDomainObjBeginJob(virDomainObj *obj,
|
||||
virDomainJob job)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (virDomainObjBeginJobInternal(obj, &priv->job, job,
|
||||
if (virDomainObjBeginJobInternal(obj, obj->job, job,
|
||||
VIR_AGENT_JOB_NONE,
|
||||
VIR_ASYNC_JOB_NONE, false) < 0)
|
||||
return -1;
|
||||
@ -728,9 +684,7 @@ int
|
||||
qemuDomainObjBeginAgentJob(virDomainObj *obj,
|
||||
virDomainAgentJob agentJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
return virDomainObjBeginJobInternal(obj, &priv->job, VIR_JOB_NONE,
|
||||
return virDomainObjBeginJobInternal(obj, obj->job, VIR_JOB_NONE,
|
||||
agentJob,
|
||||
VIR_ASYNC_JOB_NONE, false);
|
||||
}
|
||||
@ -740,16 +694,13 @@ int qemuDomainObjBeginAsyncJob(virDomainObj *obj,
|
||||
virDomainJobOperation operation,
|
||||
unsigned long apiFlags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (virDomainObjBeginJobInternal(obj, &priv->job, VIR_JOB_ASYNC,
|
||||
if (virDomainObjBeginJobInternal(obj, obj->job, VIR_JOB_ASYNC,
|
||||
VIR_AGENT_JOB_NONE,
|
||||
asyncJob, false) < 0)
|
||||
return -1;
|
||||
|
||||
priv = obj->privateData;
|
||||
priv->job.current->operation = operation;
|
||||
priv->job.apiFlags = apiFlags;
|
||||
obj->job->current->operation = operation;
|
||||
obj->job->apiFlags = apiFlags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -757,21 +708,19 @@ int
|
||||
qemuDomainObjBeginNestedJob(virDomainObj *obj,
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
if (asyncJob != priv->job.asyncJob) {
|
||||
if (asyncJob != obj->job->asyncJob) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("unexpected async job %d type expected %d"),
|
||||
asyncJob, priv->job.asyncJob);
|
||||
asyncJob, obj->job->asyncJob);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (priv->job.asyncOwner != virThreadSelfID()) {
|
||||
if (obj->job->asyncOwner != virThreadSelfID()) {
|
||||
VIR_WARN("This thread doesn't seem to be the async job owner: %llu",
|
||||
priv->job.asyncOwner);
|
||||
obj->job->asyncOwner);
|
||||
}
|
||||
|
||||
return virDomainObjBeginJobInternal(obj, &priv->job,
|
||||
return virDomainObjBeginJobInternal(obj, obj->job,
|
||||
VIR_JOB_ASYNC_NESTED,
|
||||
VIR_AGENT_JOB_NONE,
|
||||
VIR_ASYNC_JOB_NONE,
|
||||
@ -794,9 +743,7 @@ int
|
||||
qemuDomainObjBeginJobNowait(virDomainObj *obj,
|
||||
virDomainJob job)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
return virDomainObjBeginJobInternal(obj, &priv->job, job,
|
||||
return virDomainObjBeginJobInternal(obj, obj->job, job,
|
||||
VIR_AGENT_JOB_NONE,
|
||||
VIR_ASYNC_JOB_NONE, true);
|
||||
}
|
||||
@ -810,69 +757,63 @@ qemuDomainObjBeginJobNowait(virDomainObj *obj,
|
||||
void
|
||||
qemuDomainObjEndJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
virDomainJob job = priv->job.active;
|
||||
virDomainJob job = obj->job->active;
|
||||
|
||||
priv->job.jobsQueued--;
|
||||
obj->job->jobsQueued--;
|
||||
|
||||
VIR_DEBUG("Stopping job: %s (async=%s vm=%p name=%s)",
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
virDomainObjResetJob(&priv->job);
|
||||
virDomainObjResetJob(obj->job);
|
||||
if (virDomainTrackJob(job))
|
||||
qemuDomainSaveStatus(obj);
|
||||
/* We indeed need to wake up ALL threads waiting because
|
||||
* grabbing a job requires checking more variables. */
|
||||
virCondBroadcast(&priv->job.cond);
|
||||
virCondBroadcast(&obj->job->cond);
|
||||
}
|
||||
|
||||
void
|
||||
qemuDomainObjEndAgentJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
virDomainAgentJob agentJob = priv->job.agentActive;
|
||||
virDomainAgentJob agentJob = obj->job->agentActive;
|
||||
|
||||
priv->job.jobsQueued--;
|
||||
obj->job->jobsQueued--;
|
||||
|
||||
VIR_DEBUG("Stopping agent job: %s (async=%s vm=%p name=%s)",
|
||||
virDomainAgentJobTypeToString(agentJob),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
virDomainObjResetAgentJob(&priv->job);
|
||||
virDomainObjResetAgentJob(obj->job);
|
||||
/* We indeed need to wake up ALL threads waiting because
|
||||
* grabbing a job requires checking more variables. */
|
||||
virCondBroadcast(&priv->job.cond);
|
||||
virCondBroadcast(&obj->job->cond);
|
||||
}
|
||||
|
||||
void
|
||||
qemuDomainObjEndAsyncJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
priv->job.jobsQueued--;
|
||||
obj->job->jobsQueued--;
|
||||
|
||||
VIR_DEBUG("Stopping async job: %s (vm=%p name=%s)",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
virDomainObjResetAsyncJob(&priv->job);
|
||||
virDomainObjResetAsyncJob(obj->job);
|
||||
qemuDomainSaveStatus(obj);
|
||||
virCondBroadcast(&priv->job.asyncCond);
|
||||
virCondBroadcast(&obj->job->asyncCond);
|
||||
}
|
||||
|
||||
void
|
||||
qemuDomainObjAbortAsyncJob(virDomainObj *obj)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = obj->privateData;
|
||||
|
||||
VIR_DEBUG("Requesting abort of async job: %s (vm=%p name=%s)",
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(obj->job->asyncJob),
|
||||
obj, obj->def->name);
|
||||
|
||||
priv->job.abortJob = true;
|
||||
obj->job->abortJob = true;
|
||||
virDomainObjBroadcast(obj);
|
||||
}
|
||||
|
||||
@ -880,35 +821,34 @@ int
|
||||
qemuDomainObjPrivateXMLFormatJob(virBuffer *buf,
|
||||
virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_auto(virBuffer) attrBuf = VIR_BUFFER_INITIALIZER;
|
||||
g_auto(virBuffer) childBuf = VIR_BUFFER_INIT_CHILD(buf);
|
||||
virDomainJob job = priv->job.active;
|
||||
virDomainJob job = vm->job->active;
|
||||
|
||||
if (!virDomainTrackJob(job))
|
||||
job = VIR_JOB_NONE;
|
||||
|
||||
if (job == VIR_JOB_NONE &&
|
||||
priv->job.asyncJob == VIR_ASYNC_JOB_NONE)
|
||||
vm->job->asyncJob == VIR_ASYNC_JOB_NONE)
|
||||
return 0;
|
||||
|
||||
virBufferAsprintf(&attrBuf, " type='%s' async='%s'",
|
||||
virDomainJobTypeToString(job),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainAsyncJobTypeToString(vm->job->asyncJob));
|
||||
|
||||
if (priv->job.phase) {
|
||||
if (vm->job->phase) {
|
||||
virBufferAsprintf(&attrBuf, " phase='%s'",
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
||||
priv->job.phase));
|
||||
qemuDomainAsyncJobPhaseToString(vm->job->asyncJob,
|
||||
vm->job->phase));
|
||||
}
|
||||
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE) {
|
||||
virBufferAsprintf(&attrBuf, " flags='0x%lx'", priv->job.apiFlags);
|
||||
virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", priv->job.asyncStarted);
|
||||
if (vm->job->asyncJob != VIR_ASYNC_JOB_NONE) {
|
||||
virBufferAsprintf(&attrBuf, " flags='0x%lx'", vm->job->apiFlags);
|
||||
virBufferAsprintf(&attrBuf, " asyncStarted='%llu'", vm->job->asyncStarted);
|
||||
}
|
||||
|
||||
if (priv->job.cb &&
|
||||
priv->job.cb->formatJobPrivate(&childBuf, &priv->job, vm) < 0)
|
||||
if (vm->job->cb &&
|
||||
vm->job->cb->formatJobPrivate(&childBuf, vm->job, vm) < 0)
|
||||
return -1;
|
||||
|
||||
virXMLFormatElement(buf, "job", &attrBuf, &childBuf);
|
||||
@ -921,8 +861,7 @@ int
|
||||
qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
||||
xmlXPathContextPtr ctxt)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobObj *job = &priv->job;
|
||||
virDomainJobObj *job = vm->job;
|
||||
VIR_XPATH_NODE_AUTORESTORE(ctxt)
|
||||
g_autofree char *tmp = NULL;
|
||||
|
||||
@ -938,7 +877,7 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
||||
return -1;
|
||||
}
|
||||
VIR_FREE(tmp);
|
||||
priv->job.active = type;
|
||||
vm->job->active = type;
|
||||
}
|
||||
|
||||
if ((tmp = virXPathString("string(@async)", ctxt))) {
|
||||
@ -950,11 +889,11 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
||||
return -1;
|
||||
}
|
||||
VIR_FREE(tmp);
|
||||
priv->job.asyncJob = async;
|
||||
vm->job->asyncJob = async;
|
||||
|
||||
if ((tmp = virXPathString("string(@phase)", ctxt))) {
|
||||
priv->job.phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
|
||||
if (priv->job.phase < 0) {
|
||||
vm->job->phase = qemuDomainAsyncJobPhaseFromString(async, tmp);
|
||||
if (vm->job->phase < 0) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("Unknown job phase %s"), tmp);
|
||||
return -1;
|
||||
@ -963,20 +902,20 @@ qemuDomainObjPrivateXMLParseJob(virDomainObj *vm,
|
||||
}
|
||||
|
||||
if (virXPathULongLong("string(@asyncStarted)", ctxt,
|
||||
&priv->job.asyncStarted) == -2) {
|
||||
&vm->job->asyncStarted) == -2) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||
_("Invalid async job start"));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
if (virXPathULongHex("string(@flags)", ctxt, &priv->job.apiFlags) == -2) {
|
||||
if (virXPathULongHex("string(@flags)", ctxt, &vm->job->apiFlags) == -2) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid job flags"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (priv->job.cb &&
|
||||
priv->job.cb->parseJobPrivate(ctxt, job, vm) < 0)
|
||||
if (vm->job->cb &&
|
||||
vm->job->cb->parseJobPrivate(ctxt, job, vm) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@ -58,8 +58,6 @@ struct _qemuDomainJobDataPrivate {
|
||||
qemuDomainMirrorStats mirrorStats;
|
||||
};
|
||||
|
||||
extern virDomainJobDataPrivateDataCallbacks qemuJobDataPrivateDataCallbacks;
|
||||
|
||||
void qemuDomainJobSetStatsType(virDomainJobData *jobData,
|
||||
qemuDomainJobStatsType type);
|
||||
|
||||
|
@ -1671,7 +1671,6 @@ static int qemuDomainSuspend(virDomainPtr dom)
|
||||
virQEMUDriver *driver = dom->conn->privateData;
|
||||
virDomainObj *vm;
|
||||
int ret = -1;
|
||||
qemuDomainObjPrivate *priv;
|
||||
virDomainPausedReason reason;
|
||||
int state;
|
||||
|
||||
@ -1681,17 +1680,15 @@ static int qemuDomainSuspend(virDomainPtr dom)
|
||||
if (virDomainSuspendEnsureACL(dom->conn, vm->def) < 0)
|
||||
goto cleanup;
|
||||
|
||||
priv = vm->privateData;
|
||||
|
||||
if (qemuDomainObjBeginJob(vm, VIR_JOB_SUSPEND) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (virDomainObjCheckActive(vm) < 0)
|
||||
goto endjob;
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
reason = VIR_DOMAIN_PAUSED_MIGRATION;
|
||||
else if (priv->job.asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
|
||||
else if (vm->job->asyncJob == VIR_ASYNC_JOB_SNAPSHOT)
|
||||
reason = VIR_DOMAIN_PAUSED_SNAPSHOT;
|
||||
else
|
||||
reason = VIR_DOMAIN_PAUSED_USER;
|
||||
@ -2102,7 +2099,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
|
||||
|
||||
qemuDomainSetFakeReboot(vm, false);
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED,
|
||||
@ -2589,12 +2586,12 @@ qemuDomainGetControlInfo(virDomainPtr dom,
|
||||
if (priv->monError) {
|
||||
info->state = VIR_DOMAIN_CONTROL_ERROR;
|
||||
info->details = VIR_DOMAIN_CONTROL_ERROR_REASON_MONITOR;
|
||||
} else if (priv->job.active) {
|
||||
} else if (vm->job->active) {
|
||||
if (virTimeMillisNow(&info->stateTime) < 0)
|
||||
goto cleanup;
|
||||
if (priv->job.current) {
|
||||
if (vm->job->current) {
|
||||
info->state = VIR_DOMAIN_CONTROL_JOB;
|
||||
info->stateTime -= priv->job.current->started;
|
||||
info->stateTime -= vm->job->current->started;
|
||||
} else {
|
||||
if (priv->monStart > 0) {
|
||||
info->state = VIR_DOMAIN_CONTROL_OCCUPIED;
|
||||
@ -2653,7 +2650,7 @@ qemuDomainSaveInternal(virQEMUDriver *driver,
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
qemuDomainJobSetStatsType(vm->job->current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
|
||||
|
||||
/* Pause */
|
||||
@ -3016,28 +3013,27 @@ qemuDomainManagedSaveRemove(virDomainPtr dom, unsigned int flags)
|
||||
static int
|
||||
qemuDumpWaitForCompletion(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobDataPrivate *privJobCurrent = priv->job.current->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
qemuDomainJobDataPrivate *privJobCurrent = vm->job->current->privateData;
|
||||
|
||||
VIR_DEBUG("Waiting for dump completion");
|
||||
while (!jobPriv->dumpCompleted && !priv->job.abortJob) {
|
||||
while (!jobPriv->dumpCompleted && !vm->job->abortJob) {
|
||||
if (qemuDomainObjWait(vm) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (privJobCurrent->stats.dump.status == QEMU_MONITOR_DUMP_STATUS_FAILED) {
|
||||
if (priv->job.error)
|
||||
if (vm->job->error)
|
||||
virReportError(VIR_ERR_OPERATION_FAILED,
|
||||
_("memory-only dump failed: %s"),
|
||||
priv->job.error);
|
||||
vm->job->error);
|
||||
else
|
||||
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
||||
_("memory-only dump failed for unknown reason"));
|
||||
|
||||
return -1;
|
||||
}
|
||||
qemuDomainJobDataUpdateTime(priv->job.current);
|
||||
qemuDomainJobDataUpdateTime(vm->job->current);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3060,10 +3056,10 @@ qemuDumpToFd(virQEMUDriver *driver,
|
||||
return -1;
|
||||
|
||||
if (detach) {
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
qemuDomainJobSetStatsType(vm->job->current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_MEMDUMP);
|
||||
} else {
|
||||
g_clear_pointer(&priv->job.current, virDomainJobDataFree);
|
||||
g_clear_pointer(&vm->job->current, virDomainJobDataFree);
|
||||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(vm, asyncJob) < 0)
|
||||
@ -3222,7 +3218,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
|
||||
goto endjob;
|
||||
|
||||
priv = vm->privateData;
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
qemuDomainJobSetStatsType(vm->job->current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
|
||||
|
||||
/* Migrate will always stop the VM, so the resume condition is
|
||||
@ -4058,7 +4054,7 @@ processMonitorEOFEvent(virQEMUDriver *driver,
|
||||
auditReason = "failed";
|
||||
}
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
|
||||
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
qemuMigrationDstErrorSave(driver, vm->def->name,
|
||||
qemuMonitorLastError(priv->mon));
|
||||
@ -6507,7 +6503,6 @@ qemuDomainObjStart(virConnectPtr conn,
|
||||
bool force_boot = (flags & VIR_DOMAIN_START_FORCE_BOOT) != 0;
|
||||
bool reset_nvram = (flags & VIR_DOMAIN_START_RESET_NVRAM) != 0;
|
||||
unsigned int start_flags = VIR_QEMU_PROCESS_START_COLD;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
start_flags |= start_paused ? VIR_QEMU_PROCESS_START_PAUSED : 0;
|
||||
start_flags |= autodestroy ? VIR_QEMU_PROCESS_START_AUTODESTROY : 0;
|
||||
@ -6529,8 +6524,8 @@ qemuDomainObjStart(virConnectPtr conn,
|
||||
}
|
||||
vm->hasManagedSave = false;
|
||||
} else {
|
||||
virDomainJobOperation op = priv->job.current->operation;
|
||||
priv->job.current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE;
|
||||
virDomainJobOperation op = vm->job->current->operation;
|
||||
vm->job->current->operation = VIR_DOMAIN_JOB_OPERATION_RESTORE;
|
||||
|
||||
ret = qemuDomainObjRestore(conn, driver, vm, managed_save,
|
||||
start_paused, bypass_cache,
|
||||
@ -6549,7 +6544,7 @@ qemuDomainObjStart(virConnectPtr conn,
|
||||
return ret;
|
||||
} else {
|
||||
VIR_WARN("Ignoring incomplete managed state %s", managed_save);
|
||||
priv->job.current->operation = op;
|
||||
vm->job->current->operation = op;
|
||||
vm->hasManagedSave = false;
|
||||
}
|
||||
}
|
||||
@ -12665,20 +12660,19 @@ qemuDomainGetJobStatsInternal(virDomainObj *vm,
|
||||
bool completed,
|
||||
virDomainJobData **jobData)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobDataPrivate *privStats = NULL;
|
||||
int ret = -1;
|
||||
|
||||
*jobData = NULL;
|
||||
|
||||
if (completed) {
|
||||
if (priv->job.completed && !priv->job.current)
|
||||
*jobData = virDomainJobDataCopy(priv->job.completed);
|
||||
if (vm->job->completed && !vm->job->current)
|
||||
*jobData = virDomainJobDataCopy(vm->job->completed);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN) {
|
||||
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
||||
_("migration statistics are available only on "
|
||||
"the source host"));
|
||||
@ -12691,11 +12685,11 @@ qemuDomainGetJobStatsInternal(virDomainObj *vm,
|
||||
if (virDomainObjCheckActive(vm) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!priv->job.current) {
|
||||
if (!vm->job->current) {
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
*jobData = virDomainJobDataCopy(priv->job.current);
|
||||
*jobData = virDomainJobDataCopy(vm->job->current);
|
||||
|
||||
privStats = (*jobData)->privateData;
|
||||
|
||||
@ -12769,7 +12763,6 @@ qemuDomainGetJobStats(virDomainPtr dom,
|
||||
unsigned int flags)
|
||||
{
|
||||
virDomainObj *vm;
|
||||
qemuDomainObjPrivate *priv;
|
||||
g_autoptr(virDomainJobData) jobData = NULL;
|
||||
bool completed = !!(flags & VIR_DOMAIN_JOB_STATS_COMPLETED);
|
||||
int ret = -1;
|
||||
@ -12783,7 +12776,6 @@ qemuDomainGetJobStats(virDomainPtr dom,
|
||||
if (virDomainGetJobStatsEnsureACL(dom->conn, vm->def) < 0)
|
||||
goto cleanup;
|
||||
|
||||
priv = vm->privateData;
|
||||
if (qemuDomainGetJobStatsInternal(vm, completed, &jobData) < 0)
|
||||
goto cleanup;
|
||||
|
||||
@ -12799,7 +12791,7 @@ qemuDomainGetJobStats(virDomainPtr dom,
|
||||
ret = qemuDomainJobDataToParams(jobData, type, params, nparams);
|
||||
|
||||
if (completed && ret == 0 && !(flags & VIR_DOMAIN_JOB_STATS_KEEP_COMPLETED))
|
||||
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
|
||||
g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
|
||||
|
||||
cleanup:
|
||||
virDomainObjEndAPI(&vm);
|
||||
@ -12868,14 +12860,14 @@ qemuDomainAbortJobFlags(virDomainPtr dom,
|
||||
priv = vm->privateData;
|
||||
|
||||
if (flags & VIR_DOMAIN_ABORT_JOB_POSTCOPY &&
|
||||
(priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT ||
|
||||
(vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT ||
|
||||
!virDomainObjIsPostcopy(vm, VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT))) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("current job is not outgoing migration in post-copy mode"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
switch (priv->job.asyncJob) {
|
||||
switch (vm->job->asyncJob) {
|
||||
case VIR_ASYNC_JOB_NONE:
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("no job is active on the domain"));
|
||||
@ -12905,7 +12897,7 @@ qemuDomainAbortJobFlags(virDomainPtr dom,
|
||||
break;
|
||||
|
||||
case VIR_ASYNC_JOB_DUMP:
|
||||
if (priv->job.apiFlags & VIR_DUMP_MEMORY_ONLY) {
|
||||
if (vm->job->apiFlags & VIR_DUMP_MEMORY_ONLY) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("cannot abort memory-only dump"));
|
||||
goto endjob;
|
||||
@ -12925,7 +12917,7 @@ qemuDomainAbortJobFlags(virDomainPtr dom,
|
||||
|
||||
case VIR_ASYNC_JOB_LAST:
|
||||
default:
|
||||
virReportEnumRangeError(virDomainAsyncJob, priv->job.asyncJob);
|
||||
virReportEnumRangeError(virDomainAsyncJob, vm->job->asyncJob);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -13334,14 +13326,14 @@ qemuDomainMigrateStartPostCopy(virDomainPtr dom,
|
||||
|
||||
priv = vm->privateData;
|
||||
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("post-copy can only be started while "
|
||||
"outgoing migration is in progress"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
if (!(priv->job.apiFlags & VIR_MIGRATE_POSTCOPY)) {
|
||||
if (!(vm->job->apiFlags & VIR_MIGRATE_POSTCOPY)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("switching to post-copy requires migration to be "
|
||||
"started with VIR_MIGRATE_POSTCOPY flag"));
|
||||
|
@ -86,10 +86,8 @@ VIR_ENUM_IMPL(qemuMigrationJobPhase,
|
||||
static bool ATTRIBUTE_NONNULL(1)
|
||||
qemuMigrationJobIsAllowed(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN ||
|
||||
priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN ||
|
||||
vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("another migration job is already running for domain '%s'"),
|
||||
vm->def->name);
|
||||
@ -105,7 +103,6 @@ qemuMigrationJobStart(virDomainObj *vm,
|
||||
virDomainAsyncJob job,
|
||||
unsigned long apiFlags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobOperation op;
|
||||
unsigned long long mask;
|
||||
|
||||
@ -126,7 +123,7 @@ qemuMigrationJobStart(virDomainObj *vm,
|
||||
if (qemuDomainObjBeginAsyncJob(vm, job, op, apiFlags) < 0)
|
||||
return -1;
|
||||
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
qemuDomainJobSetStatsType(vm->job->current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION);
|
||||
|
||||
qemuDomainObjSetAsyncJobMask(vm, mask);
|
||||
@ -138,13 +135,11 @@ static int
|
||||
qemuMigrationCheckPhase(virDomainObj *vm,
|
||||
qemuMigrationJobPhase phase)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
if (phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
|
||||
phase < priv->job.phase) {
|
||||
phase < vm->job->phase) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("migration protocol going backwards %s => %s"),
|
||||
qemuMigrationJobPhaseTypeToString(priv->job.phase),
|
||||
qemuMigrationJobPhaseTypeToString(vm->job->phase),
|
||||
qemuMigrationJobPhaseTypeToString(phase));
|
||||
return -1;
|
||||
}
|
||||
@ -190,9 +185,7 @@ static bool ATTRIBUTE_NONNULL(1)
|
||||
qemuMigrationJobIsActive(virDomainObj *vm,
|
||||
virDomainAsyncJob job)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
if (priv->job.asyncJob != job) {
|
||||
if (vm->job->asyncJob != job) {
|
||||
const char *msg;
|
||||
|
||||
if (job == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
@ -956,7 +949,7 @@ qemuMigrationSrcCancelRemoveTempBitmaps(virDomainObj *vm,
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
GSList *next;
|
||||
|
||||
for (next = jobPriv->migTempBitmaps; next; next = next->next) {
|
||||
@ -1236,10 +1229,10 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
|
||||
if (rv < 0)
|
||||
return -1;
|
||||
|
||||
if (priv->job.abortJob) {
|
||||
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
|
||||
if (vm->job->abortJob) {
|
||||
vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
|
||||
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(vm->job->asyncJob),
|
||||
_("canceled by client"));
|
||||
return -1;
|
||||
}
|
||||
@ -1255,7 +1248,7 @@ qemuMigrationSrcNBDStorageCopy(virQEMUDriver *driver,
|
||||
}
|
||||
|
||||
qemuMigrationSrcFetchMirrorStats(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
priv->job.current);
|
||||
vm->job->current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1718,14 +1711,13 @@ qemuMigrationDstPostcopyFailed(virDomainObj *vm)
|
||||
static void
|
||||
qemuMigrationSrcWaitForSpice(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
|
||||
if (!jobPriv->spiceMigration)
|
||||
return;
|
||||
|
||||
VIR_DEBUG("Waiting for SPICE to finish migration");
|
||||
while (!jobPriv->spiceMigrated && !priv->job.abortJob) {
|
||||
while (!jobPriv->spiceMigrated && !vm->job->abortJob) {
|
||||
if (qemuDomainObjWait(vm) < 0)
|
||||
return;
|
||||
}
|
||||
@ -1810,9 +1802,7 @@ qemuMigrationAnyFetchStats(virDomainObj *vm,
|
||||
static const char *
|
||||
qemuMigrationJobName(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
switch (priv->job.asyncJob) {
|
||||
switch (vm->job->asyncJob) {
|
||||
case VIR_ASYNC_JOB_MIGRATION_OUT:
|
||||
return _("migration out");
|
||||
case VIR_ASYNC_JOB_SAVE:
|
||||
@ -1839,8 +1829,7 @@ static int
|
||||
qemuMigrationJobCheckStatus(virDomainObj *vm,
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobData *jobData = priv->job.current;
|
||||
virDomainJobData *jobData = vm->job->current;
|
||||
qemuDomainJobDataPrivate *privJob = jobData->privateData;
|
||||
g_autofree char *error = NULL;
|
||||
|
||||
@ -1916,8 +1905,7 @@ qemuMigrationAnyCompleted(virDomainObj *vm,
|
||||
virConnectPtr dconn,
|
||||
unsigned int flags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobData *jobData = priv->job.current;
|
||||
virDomainJobData *jobData = vm->job->current;
|
||||
int pauseReason;
|
||||
|
||||
if (qemuMigrationJobCheckStatus(vm, asyncJob) < 0)
|
||||
@ -2009,7 +1997,7 @@ qemuMigrationSrcWaitForCompletion(virDomainObj *vm,
|
||||
unsigned int flags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobData *jobData = priv->job.current;
|
||||
virDomainJobData *jobData = vm->job->current;
|
||||
int rv;
|
||||
|
||||
jobData->status = VIR_DOMAIN_JOB_STATUS_MIGRATING;
|
||||
@ -2029,9 +2017,9 @@ qemuMigrationSrcWaitForCompletion(virDomainObj *vm,
|
||||
|
||||
qemuDomainJobDataUpdateTime(jobData);
|
||||
qemuDomainJobDataUpdateDowntime(jobData);
|
||||
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
|
||||
priv->job.completed = virDomainJobDataCopy(jobData);
|
||||
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
|
||||
vm->job->completed = virDomainJobDataCopy(jobData);
|
||||
vm->job->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
|
||||
if (asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
jobData->status == VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
|
||||
@ -2143,7 +2131,7 @@ qemuMigrationSrcGraphicsRelocate(virDomainObj *vm,
|
||||
return 0;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
|
||||
rc = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
|
||||
port, tlsPort, tlsSubject);
|
||||
@ -2276,16 +2264,15 @@ static void
|
||||
qemuMigrationAnyConnectionClosed(virDomainObj *vm,
|
||||
virConnectPtr conn)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
bool postcopy = false;
|
||||
int phase;
|
||||
|
||||
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
|
||||
vm->def->name, conn,
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
||||
priv->job.phase));
|
||||
virDomainAsyncJobTypeToString(vm->job->asyncJob),
|
||||
qemuDomainAsyncJobPhaseToString(vm->job->asyncJob,
|
||||
vm->job->phase));
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_IN) &&
|
||||
!qemuMigrationJobIsActive(vm, VIR_ASYNC_JOB_MIGRATION_OUT))
|
||||
@ -2294,7 +2281,7 @@ qemuMigrationAnyConnectionClosed(virDomainObj *vm,
|
||||
VIR_WARN("The connection which controls migration of domain %s was closed",
|
||||
vm->def->name);
|
||||
|
||||
switch ((qemuMigrationJobPhase) priv->job.phase) {
|
||||
switch ((qemuMigrationJobPhase) vm->job->phase) {
|
||||
case QEMU_MIGRATION_PHASE_BEGIN3:
|
||||
VIR_DEBUG("Aborting outgoing migration after Begin phase");
|
||||
break;
|
||||
@ -2346,14 +2333,14 @@ qemuMigrationAnyConnectionClosed(virDomainObj *vm,
|
||||
ignore_value(qemuMigrationJobStartPhase(vm, phase));
|
||||
|
||||
if (postcopy) {
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
qemuMigrationSrcPostcopyFailed(vm);
|
||||
else
|
||||
qemuMigrationDstPostcopyFailed(vm);
|
||||
qemuMigrationJobContinue(vm, qemuProcessCleanupMigrationJob);
|
||||
} else {
|
||||
qemuMigrationParamsReset(vm, priv->job.asyncJob,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
qemuMigrationParamsReset(vm, vm->job->asyncJob,
|
||||
jobPriv->migParams, vm->job->apiFlags);
|
||||
qemuMigrationJobFinish(vm);
|
||||
}
|
||||
}
|
||||
@ -2377,12 +2364,11 @@ qemuMigrationSrcBeginPhaseBlockDirtyBitmaps(qemuMigrationCookie *mig,
|
||||
|
||||
{
|
||||
GSList *disks = NULL;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
size_t i;
|
||||
|
||||
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
||||
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, priv->job.asyncJob)))
|
||||
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, vm->job->asyncJob)))
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < vm->def->ndisks; i++) {
|
||||
@ -2452,7 +2438,7 @@ qemuMigrationAnyRefreshStatus(virDomainObj *vm,
|
||||
g_autoptr(virDomainJobData) jobData = NULL;
|
||||
qemuDomainJobDataPrivate *priv;
|
||||
|
||||
jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
|
||||
jobData = virDomainJobDataInit(&virQEMUDriverDomainJobConfig.jobDataPrivateCb);
|
||||
priv = jobData->privateData;
|
||||
|
||||
if (qemuMigrationAnyFetchStats(vm, asyncJob, jobData, NULL) < 0)
|
||||
@ -2549,11 +2535,11 @@ qemuMigrationSrcBeginPhase(virQEMUDriver *driver,
|
||||
* Otherwise we will start the async job later in the perform phase losing
|
||||
* change protection.
|
||||
*/
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
qemuMigrationJobStartPhase(vm, QEMU_MIGRATION_PHASE_BEGIN3) < 0)
|
||||
return NULL;
|
||||
|
||||
if (!qemuMigrationSrcIsAllowed(driver, vm, true, priv->job.asyncJob, flags))
|
||||
if (!qemuMigrationSrcIsAllowed(driver, vm, true, vm->job->asyncJob, flags))
|
||||
return NULL;
|
||||
|
||||
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
|
||||
@ -2656,8 +2642,6 @@ qemuMigrationAnyCanResume(virDomainObj *vm,
|
||||
unsigned long flags,
|
||||
qemuMigrationJobPhase expectedPhase)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
VIR_DEBUG("vm=%p, job=%s, flags=0x%lx, expectedPhase=%s",
|
||||
vm, virDomainAsyncJobTypeToString(job), flags,
|
||||
qemuDomainAsyncJobPhaseToString(VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
@ -2684,22 +2668,22 @@ qemuMigrationAnyCanResume(virDomainObj *vm,
|
||||
if (!qemuMigrationJobIsActive(vm, job))
|
||||
return false;
|
||||
|
||||
if (priv->job.asyncOwner != 0 &&
|
||||
priv->job.asyncOwner != virThreadSelfID()) {
|
||||
if (vm->job->asyncOwner != 0 &&
|
||||
vm->job->asyncOwner != virThreadSelfID()) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("migration of domain %s is being actively monitored by another thread"),
|
||||
vm->def->name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!virDomainObjIsPostcopy(vm, priv->job.current->operation)) {
|
||||
if (!virDomainObjIsPostcopy(vm, vm->job->current->operation)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("migration of domain %s is not in post-copy phase"),
|
||||
vm->def->name);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (priv->job.phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
|
||||
if (vm->job->phase < QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
|
||||
!virDomainObjIsFailedPostcopy(vm)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("post-copy migration of domain %s has not failed"),
|
||||
@ -2707,7 +2691,7 @@ qemuMigrationAnyCanResume(virDomainObj *vm,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (priv->job.phase > expectedPhase) {
|
||||
if (vm->job->phase > expectedPhase) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("resuming failed post-copy migration of domain %s already in progress"),
|
||||
vm->def->name);
|
||||
@ -2881,8 +2865,8 @@ qemuMigrationDstPrepareCleanup(virQEMUDriver *driver,
|
||||
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
|
||||
driver,
|
||||
vm->def->name,
|
||||
virDomainJobTypeToString(priv->job.active),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob));
|
||||
virDomainJobTypeToString(vm->job->active),
|
||||
virDomainAsyncJobTypeToString(vm->job->asyncJob));
|
||||
|
||||
virPortAllocatorRelease(priv->migrationPort);
|
||||
priv->migrationPort = 0;
|
||||
@ -3061,7 +3045,7 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver,
|
||||
unsigned long flags)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
qemuProcessIncomingDef *incoming = NULL;
|
||||
g_autofree char *tlsAlias = NULL;
|
||||
virObjectEvent *event = NULL;
|
||||
@ -3219,7 +3203,7 @@ qemuMigrationDstPrepareActive(virQEMUDriver *driver,
|
||||
error:
|
||||
virErrorPreserveLast(&origErr);
|
||||
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
jobPriv->migParams, vm->job->apiFlags);
|
||||
|
||||
if (stopProcess) {
|
||||
unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
@ -3333,7 +3317,8 @@ qemuMigrationDstPrepareFresh(virQEMUDriver *driver,
|
||||
QEMU_MIGRATION_COOKIE_CPU_HOTPLUG |
|
||||
QEMU_MIGRATION_COOKIE_CPU |
|
||||
QEMU_MIGRATION_COOKIE_CAPS |
|
||||
QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS)))
|
||||
QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS,
|
||||
NULL)))
|
||||
goto cleanup;
|
||||
|
||||
if (!(vm = virDomainObjListAdd(driver->domains, def,
|
||||
@ -3477,7 +3462,7 @@ qemuMigrationDstPrepareResume(virQEMUDriver *driver,
|
||||
|
||||
if (!(mig = qemuMigrationCookieParse(driver, def, origname, NULL,
|
||||
cookiein, cookieinlen,
|
||||
QEMU_MIGRATION_COOKIE_CAPS)))
|
||||
QEMU_MIGRATION_COOKIE_CAPS, vm)))
|
||||
goto cleanup;
|
||||
|
||||
priv->origname = g_strdup(origname);
|
||||
@ -3858,13 +3843,13 @@ qemuMigrationSrcComplete(virQEMUDriver *driver,
|
||||
virDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobData *jobData = priv->job.completed;
|
||||
virDomainJobData *jobData = vm->job->completed;
|
||||
virObjectEvent *event;
|
||||
int reason;
|
||||
|
||||
if (!jobData) {
|
||||
priv->job.completed = virDomainJobDataCopy(priv->job.current);
|
||||
jobData = priv->job.completed;
|
||||
vm->job->completed = virDomainJobDataCopy(vm->job->current);
|
||||
jobData = vm->job->completed;
|
||||
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
}
|
||||
|
||||
@ -3909,7 +3894,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
||||
{
|
||||
g_autoptr(qemuMigrationCookie) mig = NULL;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
virDomainJobData *jobData = NULL;
|
||||
qemuMigrationJobPhase phase;
|
||||
|
||||
@ -3927,7 +3912,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
||||
* job will stay active even though migration API finishes with an
|
||||
* error.
|
||||
*/
|
||||
phase = priv->job.phase;
|
||||
phase = vm->job->phase;
|
||||
} else if (retcode == 0) {
|
||||
phase = QEMU_MIGRATION_PHASE_CONFIRM3;
|
||||
} else {
|
||||
@ -3939,13 +3924,13 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
||||
|
||||
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
||||
cookiein, cookieinlen,
|
||||
QEMU_MIGRATION_COOKIE_STATS)))
|
||||
QEMU_MIGRATION_COOKIE_STATS, vm)))
|
||||
return -1;
|
||||
|
||||
if (retcode == 0)
|
||||
jobData = priv->job.completed;
|
||||
jobData = vm->job->completed;
|
||||
else
|
||||
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
|
||||
g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
|
||||
|
||||
/* Update times with the values sent by the destination daemon */
|
||||
if (mig->jobData && jobData) {
|
||||
@ -3985,7 +3970,7 @@ qemuMigrationSrcConfirmPhase(virQEMUDriver *driver,
|
||||
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||
|
||||
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
jobPriv->migParams, vm->job->apiFlags);
|
||||
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
|
||||
}
|
||||
|
||||
@ -4005,7 +3990,6 @@ qemuMigrationSrcConfirm(virQEMUDriver *driver,
|
||||
{
|
||||
qemuMigrationJobPhase phase;
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
int ret = -1;
|
||||
|
||||
VIR_DEBUG("vm=%p, flags=0x%x, cancelled=%d", vm, flags, cancelled);
|
||||
@ -4024,7 +4008,7 @@ qemuMigrationSrcConfirm(virQEMUDriver *driver,
|
||||
* error.
|
||||
*/
|
||||
if (virDomainObjIsFailedPostcopy(vm))
|
||||
phase = priv->job.phase;
|
||||
phase = vm->job->phase;
|
||||
else if (cancelled)
|
||||
phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
|
||||
else
|
||||
@ -4416,7 +4400,7 @@ qemuMigrationSrcRunPrepareBlockDirtyBitmapsMerge(virDomainObj *vm,
|
||||
{
|
||||
g_autoslist(qemuDomainJobPrivateMigrateTempBitmap) tmpbitmaps = NULL;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
g_autoptr(virJSONValue) actions = virJSONValueNewArray();
|
||||
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
||||
GSList *nextdisk;
|
||||
@ -4614,8 +4598,7 @@ qemuMigrationSrcStart(virDomainObj *vm,
|
||||
static bool
|
||||
qemuMigrationSrcIsCanceled(virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
virDomainJobData *jobData = priv->job.current;
|
||||
virDomainJobData *jobData = vm->job->current;
|
||||
|
||||
qemuMigrationUpdateJobType(jobData);
|
||||
switch (jobData->status) {
|
||||
@ -4770,7 +4753,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
cookieFlags |
|
||||
QEMU_MIGRATION_COOKIE_GRAPHICS |
|
||||
QEMU_MIGRATION_COOKIE_CAPS |
|
||||
QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS);
|
||||
QEMU_MIGRATION_COOKIE_BLOCK_DIRTY_BITMAPS,
|
||||
NULL);
|
||||
if (!mig)
|
||||
goto error;
|
||||
|
||||
@ -4881,13 +4865,13 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
if (qemuDomainObjEnterMonitorAsync(vm, VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto error;
|
||||
|
||||
if (priv->job.abortJob) {
|
||||
if (vm->job->abortJob) {
|
||||
/* explicitly do this *after* we entered the monitor,
|
||||
* as this is a critical section so we are guaranteed
|
||||
* priv->job.abortJob will not change */
|
||||
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
|
||||
* vm->job->abortJob will not change */
|
||||
vm->job->current->status = VIR_DOMAIN_JOB_STATUS_CANCELED;
|
||||
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(vm->job->asyncJob),
|
||||
_("canceled by client"));
|
||||
goto exit_monitor;
|
||||
}
|
||||
@ -4951,7 +4935,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
* resume it now once we finished all block jobs and wait for the real
|
||||
* end of the migration.
|
||||
*/
|
||||
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
|
||||
if (vm->job->current->status == VIR_DOMAIN_JOB_STATUS_PAUSED) {
|
||||
if (qemuMigrationSrcContinue(vm,
|
||||
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
|
||||
VIR_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
@ -4980,11 +4964,11 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (priv->job.completed) {
|
||||
priv->job.completed->stopped = priv->job.current->stopped;
|
||||
qemuDomainJobDataUpdateTime(priv->job.completed);
|
||||
qemuDomainJobDataUpdateDowntime(priv->job.completed);
|
||||
ignore_value(virTimeMillisNow(&priv->job.completed->sent));
|
||||
if (vm->job->completed) {
|
||||
vm->job->completed->stopped = vm->job->current->stopped;
|
||||
qemuDomainJobDataUpdateTime(vm->job->completed);
|
||||
qemuDomainJobDataUpdateDowntime(vm->job->completed);
|
||||
ignore_value(virTimeMillisNow(&vm->job->completed->sent));
|
||||
}
|
||||
|
||||
cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
|
||||
@ -5019,7 +5003,7 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
}
|
||||
|
||||
if (cancel &&
|
||||
priv->job.current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
|
||||
vm->job->current->status != VIR_DOMAIN_JOB_STATUS_HYPERVISOR_COMPLETED)
|
||||
qemuMigrationSrcCancel(vm, VIR_ASYNC_JOB_MIGRATION_OUT, true);
|
||||
|
||||
/* cancel any outstanding NBD jobs */
|
||||
@ -5030,8 +5014,8 @@ qemuMigrationSrcRun(virQEMUDriver *driver,
|
||||
|
||||
qemuMigrationSrcCancelRemoveTempBitmaps(vm, VIR_ASYNC_JOB_MIGRATION_OUT);
|
||||
|
||||
if (priv->job.current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
|
||||
priv->job.current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
|
||||
if (vm->job->current->status != VIR_DOMAIN_JOB_STATUS_CANCELED)
|
||||
vm->job->current->status = VIR_DOMAIN_JOB_STATUS_FAILED;
|
||||
}
|
||||
|
||||
if (iothread)
|
||||
@ -5066,7 +5050,7 @@ qemuMigrationSrcResume(virDomainObj *vm,
|
||||
|
||||
mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
||||
cookiein, cookieinlen,
|
||||
QEMU_MIGRATION_COOKIE_CAPS);
|
||||
QEMU_MIGRATION_COOKIE_CAPS, vm);
|
||||
if (!mig)
|
||||
return -1;
|
||||
|
||||
@ -5977,7 +5961,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
|
||||
virErrorPtr orig_err = NULL;
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
|
||||
if (flags & VIR_MIGRATE_POSTCOPY_RESUME) {
|
||||
if (!qemuMigrationAnyCanResume(vm, VIR_ASYNC_JOB_MIGRATION_OUT, flags,
|
||||
@ -6055,7 +6039,7 @@ qemuMigrationSrcPerformJob(virQEMUDriver *driver,
|
||||
*/
|
||||
if (!v3proto && ret < 0)
|
||||
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
jobPriv->migParams, vm->job->apiFlags);
|
||||
|
||||
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||
|
||||
@ -6144,7 +6128,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
|
||||
const char *nbdURI)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
int ret = -1;
|
||||
|
||||
if (flags & VIR_MIGRATE_POSTCOPY_RESUME) {
|
||||
@ -6184,7 +6168,7 @@ qemuMigrationSrcPerformPhase(virQEMUDriver *driver,
|
||||
if (ret < 0 && !virDomainObjIsFailedPostcopy(vm)) {
|
||||
qemuMigrationSrcRestoreDomainState(driver, vm);
|
||||
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_OUT,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
jobPriv->migParams, vm->job->apiFlags);
|
||||
qemuDomainSetMaxMemLock(vm, 0, &priv->preMigrationMemlock);
|
||||
qemuMigrationJobFinish(vm);
|
||||
} else {
|
||||
@ -6463,7 +6447,7 @@ qemuMigrationDstFinishOffline(virQEMUDriver *driver,
|
||||
g_autoptr(qemuMigrationCookie) mig = NULL;
|
||||
|
||||
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
||||
cookiein, cookieinlen, cookie_flags)))
|
||||
cookiein, cookieinlen, cookie_flags, NULL)))
|
||||
return NULL;
|
||||
|
||||
if (qemuMigrationDstPersist(driver, vm, mig, false) < 0)
|
||||
@ -6496,7 +6480,6 @@ qemuMigrationDstFinishFresh(virQEMUDriver *driver,
|
||||
bool *doKill,
|
||||
bool *inPostCopy)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
g_autoptr(virDomainJobData) jobData = NULL;
|
||||
|
||||
if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
|
||||
@ -6556,7 +6539,7 @@ qemuMigrationDstFinishFresh(virQEMUDriver *driver,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
|
||||
if (vm->job->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
|
||||
*inPostCopy = true;
|
||||
|
||||
if (!(flags & VIR_MIGRATE_PAUSED)) {
|
||||
@ -6606,9 +6589,9 @@ qemuMigrationDstFinishFresh(virQEMUDriver *driver,
|
||||
}
|
||||
|
||||
if (jobData) {
|
||||
priv->job.completed = g_steal_pointer(&jobData);
|
||||
priv->job.completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
qemuDomainJobSetStatsType(priv->job.completed,
|
||||
vm->job->completed = g_steal_pointer(&jobData);
|
||||
vm->job->completed->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
qemuDomainJobSetStatsType(vm->job->completed,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION);
|
||||
}
|
||||
|
||||
@ -6650,17 +6633,17 @@ qemuMigrationDstFinishActive(virQEMUDriver *driver,
|
||||
virDomainPtr dom = NULL;
|
||||
g_autoptr(qemuMigrationCookie) mig = NULL;
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
virObjectEvent *event;
|
||||
bool inPostCopy = false;
|
||||
bool doKill = priv->job.phase != QEMU_MIGRATION_PHASE_FINISH_RESUME;
|
||||
bool doKill = vm->job->phase != QEMU_MIGRATION_PHASE_FINISH_RESUME;
|
||||
int rc;
|
||||
|
||||
VIR_DEBUG("vm=%p, flags=0x%lx, retcode=%d",
|
||||
vm, flags, retcode);
|
||||
|
||||
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
||||
cookiein, cookieinlen, cookie_flags)))
|
||||
cookiein, cookieinlen, cookie_flags, NULL)))
|
||||
goto error;
|
||||
|
||||
if (retcode != 0) {
|
||||
@ -6697,7 +6680,7 @@ qemuMigrationDstFinishActive(virQEMUDriver *driver,
|
||||
VIR_WARN("Unable to encode migration cookie");
|
||||
|
||||
qemuMigrationDstComplete(driver, vm, inPostCopy,
|
||||
VIR_ASYNC_JOB_MIGRATION_IN, &priv->job);
|
||||
VIR_ASYNC_JOB_MIGRATION_IN, vm->job);
|
||||
|
||||
return dom;
|
||||
|
||||
@ -6727,7 +6710,7 @@ qemuMigrationDstFinishActive(virQEMUDriver *driver,
|
||||
*finishJob = false;
|
||||
} else {
|
||||
qemuMigrationParamsReset(vm, VIR_ASYNC_JOB_MIGRATION_IN,
|
||||
jobPriv->migParams, priv->job.apiFlags);
|
||||
jobPriv->migParams, vm->job->apiFlags);
|
||||
}
|
||||
|
||||
if (!virDomainObjIsActive(vm))
|
||||
@ -6789,7 +6772,7 @@ qemuMigrationDstFinish(virQEMUDriver *driver,
|
||||
} else {
|
||||
qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
|
||||
}
|
||||
g_clear_pointer(&priv->job.completed, virDomainJobDataFree);
|
||||
g_clear_pointer(&vm->job->completed, virDomainJobDataFree);
|
||||
|
||||
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
|
||||
QEMU_MIGRATION_COOKIE_STATS |
|
||||
@ -6842,7 +6825,6 @@ qemuMigrationProcessUnattended(virQEMUDriver *driver,
|
||||
virDomainAsyncJob job,
|
||||
qemuMonitorMigrationStatus status)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuMigrationJobPhase phase;
|
||||
|
||||
if (!qemuMigrationJobIsActive(vm, job) ||
|
||||
@ -6862,7 +6844,7 @@ qemuMigrationProcessUnattended(virQEMUDriver *driver,
|
||||
return;
|
||||
|
||||
if (job == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
qemuMigrationDstComplete(driver, vm, true, job, &priv->job);
|
||||
qemuMigrationDstComplete(driver, vm, true, job, vm->job);
|
||||
else
|
||||
qemuMigrationSrcComplete(driver, vm, job);
|
||||
|
||||
|
@ -495,7 +495,7 @@ qemuMigrationCookieAddNBD(qemuMigrationCookie *mig,
|
||||
mig->nbd->disks = g_new0(struct qemuMigrationCookieNBDDisk, vm->def->ndisks);
|
||||
mig->nbd->ndisks = 0;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(vm, priv->job.asyncJob) < 0)
|
||||
if (qemuDomainObjEnterMonitorAsync(vm, vm->job->asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats);
|
||||
@ -525,13 +525,11 @@ static int
|
||||
qemuMigrationCookieAddStatistics(qemuMigrationCookie *mig,
|
||||
virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
|
||||
if (!priv->job.completed)
|
||||
if (!vm->job->completed)
|
||||
return 0;
|
||||
|
||||
g_clear_pointer(&mig->jobData, virDomainJobDataFree);
|
||||
mig->jobData = virDomainJobDataCopy(priv->job.completed);
|
||||
mig->jobData = virDomainJobDataCopy(vm->job->completed);
|
||||
|
||||
mig->flags |= QEMU_MIGRATION_COOKIE_STATS;
|
||||
|
||||
@ -1042,7 +1040,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt)
|
||||
if (!(ctxt->node = virXPathNode("./statistics", ctxt)))
|
||||
return NULL;
|
||||
|
||||
jobData = virDomainJobDataInit(&qemuJobDataPrivateDataCallbacks);
|
||||
jobData = virDomainJobDataInit(&virQEMUDriverDomainJobConfig.jobDataPrivateCb);
|
||||
priv = jobData->privateData;
|
||||
stats = &priv->stats.mig;
|
||||
jobData->status = VIR_DOMAIN_JOB_STATUS_COMPLETED;
|
||||
@ -1497,7 +1495,8 @@ qemuMigrationCookieParse(virQEMUDriver *driver,
|
||||
qemuDomainObjPrivate *priv,
|
||||
const char *cookiein,
|
||||
int cookieinlen,
|
||||
unsigned int flags)
|
||||
unsigned int flags,
|
||||
virDomainObj *vm)
|
||||
{
|
||||
g_autoptr(qemuMigrationCookie) mig = NULL;
|
||||
|
||||
@ -1547,8 +1546,8 @@ qemuMigrationCookieParse(virQEMUDriver *driver,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && priv->job.current)
|
||||
mig->jobData->operation = priv->job.current->operation;
|
||||
if (vm && flags & QEMU_MIGRATION_COOKIE_STATS && mig->jobData && vm->job->current)
|
||||
mig->jobData->operation = vm->job->current->operation;
|
||||
|
||||
return g_steal_pointer(&mig);
|
||||
}
|
||||
|
@ -194,7 +194,8 @@ qemuMigrationCookieParse(virQEMUDriver *driver,
|
||||
qemuDomainObjPrivate *priv,
|
||||
const char *cookiein,
|
||||
int cookieinlen,
|
||||
unsigned int flags);
|
||||
unsigned int flags,
|
||||
virDomainObj *vm);
|
||||
|
||||
void
|
||||
qemuMigrationCookieFree(qemuMigrationCookie *mig);
|
||||
|
@ -1005,7 +1005,7 @@ qemuMigrationParamsEnableTLS(virQEMUDriver *driver,
|
||||
qemuMigrationParams *migParams)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
g_autoptr(virJSONValue) tlsProps = NULL;
|
||||
g_autoptr(virJSONValue) secProps = NULL;
|
||||
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
||||
@ -1080,8 +1080,7 @@ int
|
||||
qemuMigrationParamsDisableTLS(virDomainObj *vm,
|
||||
qemuMigrationParams *migParams)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
|
||||
if (!jobPriv->migParams->params[QEMU_MIGRATION_PARAM_TLS_CREDS].set)
|
||||
return 0;
|
||||
@ -1213,8 +1212,7 @@ qemuMigrationParamsCheck(virDomainObj *vm,
|
||||
qemuMigrationParams *migParams,
|
||||
virBitmap *remoteCaps)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = priv->job.privateData;
|
||||
qemuDomainJobPrivate *jobPriv = vm->job->privateData;
|
||||
qemuMigrationCapability cap;
|
||||
qemuMigrationParty party;
|
||||
size_t i;
|
||||
|
@ -648,8 +648,8 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
* reveal it in domain state nor sent events */
|
||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
|
||||
!priv->pausedShutdown) {
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (priv->job.current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
if (vm->job->current->status == VIR_DOMAIN_JOB_STATUS_POSTCOPY)
|
||||
reason = VIR_DOMAIN_PAUSED_POSTCOPY;
|
||||
else
|
||||
reason = VIR_DOMAIN_PAUSED_MIGRATION;
|
||||
@ -661,8 +661,8 @@ qemuProcessHandleStop(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
vm->def->name, virDomainPausedReasonTypeToString(reason),
|
||||
detail);
|
||||
|
||||
if (priv->job.current)
|
||||
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
|
||||
if (vm->job->current)
|
||||
ignore_value(virTimeMillisNow(&vm->job->current->stopped));
|
||||
|
||||
if (priv->signalStop)
|
||||
virDomainObjBroadcast(vm);
|
||||
@ -1390,7 +1390,6 @@ static void
|
||||
qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
virDomainObj *vm)
|
||||
{
|
||||
qemuDomainObjPrivate *priv;
|
||||
qemuDomainJobPrivate *jobPriv;
|
||||
|
||||
virObjectLock(vm);
|
||||
@ -1398,9 +1397,8 @@ qemuProcessHandleSpiceMigrated(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
VIR_DEBUG("Spice migration completed for domain %p %s",
|
||||
vm, vm->def->name);
|
||||
|
||||
priv = vm->privateData;
|
||||
jobPriv = priv->job.privateData;
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
jobPriv = vm->job->privateData;
|
||||
if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT) {
|
||||
VIR_DEBUG("got SPICE_MIGRATE_COMPLETED event without a migration job");
|
||||
goto cleanup;
|
||||
}
|
||||
@ -1434,12 +1432,12 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
priv = vm->privateData;
|
||||
driver = priv->driver;
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
VIR_DEBUG("got MIGRATION event without a migration job");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
privJob = priv->job.current->privateData;
|
||||
privJob = vm->job->current->privateData;
|
||||
|
||||
privJob->stats.mig.status = status;
|
||||
virDomainObjBroadcast(vm);
|
||||
@ -1448,7 +1446,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
|
||||
switch ((qemuMonitorMigrationStatus) status) {
|
||||
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
state == VIR_DOMAIN_PAUSED &&
|
||||
reason == VIR_DOMAIN_PAUSED_MIGRATION) {
|
||||
VIR_DEBUG("Correcting paused state reason for domain %s to %s",
|
||||
@ -1464,7 +1462,7 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
break;
|
||||
|
||||
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY_PAUSED:
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_OUT &&
|
||||
state == VIR_DOMAIN_PAUSED) {
|
||||
/* At this point no thread is watching the migration progress on
|
||||
* the source as it is just waiting for the Finish phase to end.
|
||||
@ -1505,11 +1503,11 @@ qemuProcessHandleMigrationStatus(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
* watching it in any thread. Let's make sure the migration is properly
|
||||
* finished in case we get a "completed" event.
|
||||
*/
|
||||
if (virDomainObjIsPostcopy(vm, priv->job.current->operation) &&
|
||||
priv->job.phase == QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
|
||||
priv->job.asyncOwner == 0) {
|
||||
if (virDomainObjIsPostcopy(vm, vm->job->current->operation) &&
|
||||
vm->job->phase == QEMU_MIGRATION_PHASE_POSTCOPY_FAILED &&
|
||||
vm->job->asyncOwner == 0) {
|
||||
qemuProcessEventSubmit(vm, QEMU_PROCESS_EVENT_UNATTENDED_MIGRATION,
|
||||
priv->job.asyncJob, status, NULL);
|
||||
vm->job->asyncJob, status, NULL);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1546,7 +1544,7 @@ qemuProcessHandleMigrationPass(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
vm, vm->def->name, pass);
|
||||
|
||||
priv = vm->privateData;
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
VIR_DEBUG("got MIGRATION_PASS event without a migration job");
|
||||
goto cleanup;
|
||||
}
|
||||
@ -1566,7 +1564,6 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
qemuMonitorDumpStats *stats,
|
||||
const char *error)
|
||||
{
|
||||
qemuDomainObjPrivate *priv;
|
||||
qemuDomainJobPrivate *jobPriv;
|
||||
qemuDomainJobDataPrivate *privJobCurrent = NULL;
|
||||
|
||||
@ -1575,20 +1572,19 @@ qemuProcessHandleDumpCompleted(qemuMonitor *mon G_GNUC_UNUSED,
|
||||
VIR_DEBUG("Dump completed for domain %p %s with stats=%p error='%s'",
|
||||
vm, vm->def->name, stats, NULLSTR(error));
|
||||
|
||||
priv = vm->privateData;
|
||||
jobPriv = priv->job.privateData;
|
||||
privJobCurrent = priv->job.current->privateData;
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
jobPriv = vm->job->privateData;
|
||||
privJobCurrent = vm->job->current->privateData;
|
||||
if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
|
||||
VIR_DEBUG("got DUMP_COMPLETED event without a dump_completed job");
|
||||
goto cleanup;
|
||||
}
|
||||
jobPriv->dumpCompleted = true;
|
||||
privJobCurrent->stats.dump = *stats;
|
||||
priv->job.error = g_strdup(error);
|
||||
vm->job->error = g_strdup(error);
|
||||
|
||||
/* Force error if extracting the DUMP_COMPLETED status failed */
|
||||
if (!error && status < 0) {
|
||||
priv->job.error = g_strdup(virGetLastErrorMessage());
|
||||
vm->job->error = g_strdup(virGetLastErrorMessage());
|
||||
privJobCurrent->stats.dump.status = QEMU_MONITOR_DUMP_STATUS_FAILED;
|
||||
}
|
||||
|
||||
@ -3209,8 +3205,8 @@ int qemuProcessStopCPUs(virQEMUDriver *driver,
|
||||
/* de-activate netdevs after stopping CPUs */
|
||||
ignore_value(qemuInterfaceStopDevices(vm->def));
|
||||
|
||||
if (priv->job.current)
|
||||
ignore_value(virTimeMillisNow(&priv->job.current->stopped));
|
||||
if (vm->job->current)
|
||||
ignore_value(virTimeMillisNow(&vm->job->current->stopped));
|
||||
|
||||
/* The STOP event handler will change the domain state with the reason
|
||||
* saved in priv->pausedReason and it will also emit corresponding domain
|
||||
@ -3375,12 +3371,12 @@ qemuProcessCleanupMigrationJob(virQEMUDriver *driver,
|
||||
|
||||
VIR_DEBUG("driver=%p, vm=%s, asyncJob=%s, state=%s, reason=%s",
|
||||
driver, vm->def->name,
|
||||
virDomainAsyncJobTypeToString(priv->job.asyncJob),
|
||||
virDomainAsyncJobTypeToString(vm->job->asyncJob),
|
||||
virDomainStateTypeToString(state),
|
||||
virDomainStateReasonToString(state, reason));
|
||||
|
||||
if (priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_IN &&
|
||||
priv->job.asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
if (vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_IN &&
|
||||
vm->job->asyncJob != VIR_ASYNC_JOB_MIGRATION_OUT)
|
||||
return;
|
||||
|
||||
virPortAllocatorRelease(priv->migrationPort);
|
||||
@ -3393,7 +3389,6 @@ static void
|
||||
qemuProcessRestoreMigrationJob(virDomainObj *vm,
|
||||
virDomainJobObj *job)
|
||||
{
|
||||
qemuDomainObjPrivate *priv = vm->privateData;
|
||||
qemuDomainJobPrivate *jobPriv = job->privateData;
|
||||
virDomainJobOperation op;
|
||||
unsigned long long allowedJobs;
|
||||
@ -3413,9 +3408,9 @@ qemuProcessRestoreMigrationJob(virDomainObj *vm,
|
||||
VIR_DOMAIN_JOB_STATUS_PAUSED,
|
||||
allowedJobs);
|
||||
|
||||
job->privateData = g_steal_pointer(&priv->job.privateData);
|
||||
priv->job.privateData = jobPriv;
|
||||
priv->job.apiFlags = job->apiFlags;
|
||||
job->privateData = g_steal_pointer(&vm->job->privateData);
|
||||
vm->job->privateData = jobPriv;
|
||||
vm->job->apiFlags = job->apiFlags;
|
||||
|
||||
qemuDomainCleanupAdd(vm, qemuProcessCleanupMigrationJob);
|
||||
}
|
||||
@ -8087,9 +8082,9 @@ void qemuProcessStop(virQEMUDriver *driver,
|
||||
if (asyncJob != VIR_ASYNC_JOB_NONE) {
|
||||
if (qemuDomainObjBeginNestedJob(vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
} else if (priv->job.asyncJob != VIR_ASYNC_JOB_NONE &&
|
||||
priv->job.asyncOwner == virThreadSelfID() &&
|
||||
priv->job.active != VIR_JOB_ASYNC_NESTED) {
|
||||
} else if (vm->job->asyncJob != VIR_ASYNC_JOB_NONE &&
|
||||
vm->job->asyncOwner == virThreadSelfID() &&
|
||||
vm->job->active != VIR_JOB_ASYNC_NESTED) {
|
||||
VIR_WARN("qemuProcessStop called without a nested job (async=%s)",
|
||||
virDomainAsyncJobTypeToString(asyncJob));
|
||||
}
|
||||
@ -8412,10 +8407,10 @@ qemuProcessAutoDestroy(virDomainObj *dom,
|
||||
|
||||
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
|
||||
|
||||
if (priv->job.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
if (dom->job->asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
|
||||
if (priv->job.asyncJob) {
|
||||
if (dom->job->asyncJob) {
|
||||
VIR_DEBUG("vm=%s has long-term job active, cancelling",
|
||||
dom->def->name);
|
||||
qemuDomainObjDiscardAsyncJob(dom);
|
||||
@ -8679,7 +8674,7 @@ qemuProcessReconnect(void *opaque)
|
||||
cfg = virQEMUDriverGetConfig(driver);
|
||||
priv = obj->privateData;
|
||||
|
||||
virDomainObjPreserveJob(&priv->job, &oldjob);
|
||||
virDomainObjPreserveJob(obj->job, &oldjob);
|
||||
if (oldjob.asyncJob == VIR_ASYNC_JOB_MIGRATION_IN)
|
||||
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
|
||||
if (oldjob.asyncJob == VIR_ASYNC_JOB_BACKUP && priv->backup)
|
||||
|
@ -1334,7 +1334,7 @@ qemuSnapshotCreateActiveExternal(virQEMUDriver *driver,
|
||||
if (!qemuMigrationSrcIsAllowed(driver, vm, false, VIR_ASYNC_JOB_SNAPSHOT, 0))
|
||||
goto cleanup;
|
||||
|
||||
qemuDomainJobSetStatsType(priv->job.current,
|
||||
qemuDomainJobSetStatsType(vm->job->current,
|
||||
QEMU_DOMAIN_JOB_STATS_TYPE_SAVEDUMP);
|
||||
|
||||
/* allow the migration job to be cancelled or the domain to be paused */
|
||||
|
@ -146,7 +146,8 @@ testQemuMigrationCookieParse(const void *opaque)
|
||||
priv,
|
||||
data->xmlstr,
|
||||
data->xmlstrlen,
|
||||
data->cookieParseFlags))) {
|
||||
data->cookieParseFlags,
|
||||
data->vm))) {
|
||||
VIR_TEST_DEBUG("\nfailed to parse qemu migration cookie:\n%s\n", data->xmlstr);
|
||||
return -1;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user