qemu: Remove special case for virDomainAbortJob

This doesn't abort migration job in any phase, yet.
This commit is contained in:
Jiri Denemark 2011-07-19 02:27:39 +02:00 committed by Eric Blake
parent ad6cc26c8d
commit f9a837da73
5 changed files with 39 additions and 78 deletions

View File

@ -52,6 +52,7 @@ VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
"destroy", "destroy",
"suspend", "suspend",
"modify", "modify",
"abort",
"migration operation", "migration operation",
"none", /* async job is never stored in job.active */ "none", /* async job is never stored in job.active */
"async nested", "async nested",
@ -158,12 +159,6 @@ qemuDomainObjInitJob(qemuDomainObjPrivatePtr priv)
return -1; return -1;
} }
if (virCondInit(&priv->job.signalCond) < 0) {
ignore_value(virCondDestroy(&priv->job.cond));
ignore_value(virCondDestroy(&priv->job.asyncCond));
return -1;
}
return 0; return 0;
} }
@ -185,7 +180,6 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
job->mask = DEFAULT_JOB_MASK; job->mask = DEFAULT_JOB_MASK;
job->start = 0; job->start = 0;
memset(&job->info, 0, sizeof(job->info)); memset(&job->info, 0, sizeof(job->info));
job->signals = 0;
} }
void void
@ -208,7 +202,6 @@ qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
{ {
ignore_value(virCondDestroy(&priv->job.cond)); ignore_value(virCondDestroy(&priv->job.cond));
ignore_value(virCondDestroy(&priv->job.asyncCond)); ignore_value(virCondDestroy(&priv->job.asyncCond));
ignore_value(virCondDestroy(&priv->job.signalCond));
} }

View File

@ -38,7 +38,9 @@
# define JOB_MASK(job) (1 << (job - 1)) # define JOB_MASK(job) (1 << (job - 1))
# define DEFAULT_JOB_MASK \ # define DEFAULT_JOB_MASK \
(JOB_MASK(QEMU_JOB_QUERY) | JOB_MASK(QEMU_JOB_DESTROY)) (JOB_MASK(QEMU_JOB_QUERY) | \
JOB_MASK(QEMU_JOB_DESTROY) | \
JOB_MASK(QEMU_JOB_ABORT))
/* Only 1 job is allowed at any time /* Only 1 job is allowed at any time
* A job includes *all* monitor commands, even those just querying * A job includes *all* monitor commands, even those just querying
@ -49,6 +51,7 @@ enum qemuDomainJob {
QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */ QEMU_JOB_DESTROY, /* Destroys the domain (cannot be masked out) */
QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */ QEMU_JOB_SUSPEND, /* Suspends (stops vCPUs) the domain */
QEMU_JOB_MODIFY, /* May change state */ QEMU_JOB_MODIFY, /* May change state */
QEMU_JOB_ABORT, /* Abort current async job */
QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */ QEMU_JOB_MIGRATION_OP, /* Operation influencing outgoing migration */
/* The following two items must always be the last items before JOB_LAST */ /* The following two items must always be the last items before JOB_LAST */
@ -72,10 +75,6 @@ enum qemuDomainAsyncJob {
QEMU_ASYNC_JOB_LAST QEMU_ASYNC_JOB_LAST
}; };
enum qemuDomainJobSignals {
QEMU_JOB_SIGNAL_CANCEL = 1 << 0, /* Request job cancellation */
};
struct qemuDomainJobObj { struct qemuDomainJobObj {
virCond cond; /* Use to coordinate jobs */ virCond cond; /* Use to coordinate jobs */
enum qemuDomainJob active; /* Currently running job */ enum qemuDomainJob active; /* Currently running job */
@ -86,9 +85,6 @@ struct qemuDomainJobObj {
unsigned long long mask; /* Jobs allowed during async job */ unsigned long long mask; /* Jobs allowed during async job */
unsigned long long start; /* When the async job started */ unsigned long long start; /* When the async job started */
virDomainJobInfo info; /* Async job progress data */ virDomainJobInfo info; /* Async job progress data */
virCond signalCond; /* Use to coordinate the safe queries during migration */
unsigned int signals; /* Signals for running job */
}; };
typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet; typedef struct _qemuDomainPCIAddressSet qemuDomainPCIAddressSet;

View File

@ -7925,24 +7925,36 @@ static int qemuDomainAbortJob(virDomainPtr dom) {
goto cleanup; goto cleanup;
} }
priv = vm->privateData; if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_ABORT) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) { if (virDomainObjIsActive(vm)) {
if (priv->job.asyncJob) {
VIR_DEBUG("Requesting cancellation of job on vm %s", vm->def->name);
priv->job.signals |= QEMU_JOB_SIGNAL_CANCEL;
} else {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("no job is active on the domain"));
goto cleanup;
}
} else {
qemuReportError(VIR_ERR_OPERATION_INVALID, qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running")); "%s", _("domain is not running"));
goto cleanup; goto endjob;
} }
ret = 0; priv = vm->privateData;
if (!priv->job.asyncJob) {
qemuReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("no job is active on the domain"));
goto endjob;
} else if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
qemuReportError(VIR_ERR_OPERATION_INVALID, "%s",
_("cannot abort incoming migration;"
" use virDomainDestroy instead"));
goto endjob;
}
VIR_DEBUG("Cancelling job at client request");
ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ret = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitor(driver, vm);
endjob:
if (qemuDomainObjEndJob(driver, vm) == 0)
vm = NULL;
cleanup: cleanup:
if (vm) if (vm)

View File

@ -742,42 +742,6 @@ qemuMigrationSetOffline(struct qemud_driver *driver,
} }
static int
qemuMigrationProcessJobSignals(struct qemud_driver *driver,
virDomainObjPtr vm,
const char *job,
bool cleanup)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
if (!virDomainObjIsActive(vm)) {
qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
job, _("guest unexpectedly quit"));
if (cleanup)
priv->job.signals = 0;
return -1;
}
if (priv->job.signals & QEMU_JOB_SIGNAL_CANCEL) {
priv->job.signals ^= QEMU_JOB_SIGNAL_CANCEL;
VIR_DEBUG("Cancelling job at client request");
ret = qemuDomainObjEnterMonitorWithDriver(driver, vm);
if (ret == 0) {
ret = qemuMonitorMigrateCancel(priv->mon);
qemuDomainObjExitMonitorWithDriver(driver, vm);
}
if (ret < 0) {
VIR_WARN("Unable to cancel job");
}
} else {
ret = 0;
}
return ret;
}
static int static int
qemuMigrationUpdateJobStatus(struct qemud_driver *driver, qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
virDomainObjPtr vm, virDomainObjPtr vm,
@ -878,17 +842,10 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
while (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) { while (priv->job.info.type == VIR_DOMAIN_JOB_UNBOUNDED) {
/* Poll every 50ms for progress & to allow cancellation */ /* Poll every 50ms for progress & to allow cancellation */
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull }; struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
while (priv->job.signals) {
if (qemuMigrationProcessJobSignals(driver, vm, job, false) < 0)
goto cleanup;
}
virCondSignal(&priv->job.signalCond);
if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0) if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0)
goto cleanup; goto cleanup;
virDomainObjUnlock(vm); virDomainObjUnlock(vm);
qemuDriverUnlock(driver); qemuDriverUnlock(driver);
@ -899,11 +856,6 @@ qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
} }
cleanup: cleanup:
while (priv->job.signals) {
qemuMigrationProcessJobSignals(driver, vm, job, true);
}
virCondBroadcast(&priv->job.signalCond);
if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED) if (priv->job.info.type == VIR_DOMAIN_JOB_COMPLETED)
return 0; return 0;
else else

View File

@ -2290,6 +2290,8 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
virDomainState state, virDomainState state,
int reason) int reason)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData;
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) { if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
switch (phase) { switch (phase) {
case QEMU_MIGRATION_PHASE_NONE: case QEMU_MIGRATION_PHASE_NONE:
@ -2344,7 +2346,9 @@ qemuProcessRecoverMigration(struct qemud_driver *driver,
* domain */ * domain */
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s", VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
vm->def->name); vm->def->name);
/* TODO cancel possibly running migrate operation */ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
* migration */ * migration */
if (state == VIR_DOMAIN_PAUSED && if (state == VIR_DOMAIN_PAUSED &&
@ -2392,6 +2396,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
virConnectPtr conn, virConnectPtr conn,
const struct qemuDomainJobObj *job) const struct qemuDomainJobObj *job)
{ {
qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainState state; virDomainState state;
int reason; int reason;
@ -2407,7 +2412,9 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
case QEMU_ASYNC_JOB_SAVE: case QEMU_ASYNC_JOB_SAVE:
case QEMU_ASYNC_JOB_DUMP: case QEMU_ASYNC_JOB_DUMP:
/* TODO cancel possibly running migrate operation */ ignore_value(qemuDomainObjEnterMonitor(driver, vm));
ignore_value(qemuMonitorMigrateCancel(priv->mon));
qemuDomainObjExitMonitor(driver, vm);
/* resume the domain but only if it was paused as a result of /* resume the domain but only if it was paused as a result of
* running save/dump operation */ * running save/dump operation */
if (state == VIR_DOMAIN_PAUSED && if (state == VIR_DOMAIN_PAUSED &&
@ -2452,6 +2459,7 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
break; break;
case QEMU_JOB_MIGRATION_OP: case QEMU_JOB_MIGRATION_OP:
case QEMU_JOB_ABORT:
case QEMU_JOB_ASYNC: case QEMU_JOB_ASYNC:
case QEMU_JOB_ASYNC_NESTED: case QEMU_JOB_ASYNC_NESTED:
/* async job was already handled above */ /* async job was already handled above */