From d58e91a812e254d1384370fe6f3c7b0b24cc952a Mon Sep 17 00:00:00 2001 From: Jiri Denemark Date: Tue, 19 Jul 2011 02:27:32 +0200 Subject: [PATCH] qemu: Migration job on source daemon Make MIGRATION_OUT use the new helper methods. This also introduces new protection to migration v3 process: the migration job is held from Begin to Confirm to avoid changes to a domain during migration (esp. between Begin and Perform phases). This change is automatically applied to p2p and tunneled migrations. For normal migration, this requires support from a client. In other words, if an old (pre 0.9.4) client starts normal migration of a domain, the domain will not be protected against changes between Begin and Perform steps. --- include/libvirt/libvirt.h.in | 3 + src/libvirt.c | 71 +++++++-- src/libvirt_internal.h | 6 + src/qemu/qemu_driver.c | 61 +++++++- src/qemu/qemu_migration.c | 288 +++++++++++++++++++++++++---------- src/qemu/qemu_migration.h | 3 +- 6 files changed, 340 insertions(+), 92 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 5771ba7fe6..b1bda31169 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -676,6 +676,9 @@ typedef enum { VIR_MIGRATE_NON_SHARED_DISK = (1 << 6), /* migration with non-shared storage with full disk copy */ VIR_MIGRATE_NON_SHARED_INC = (1 << 7), /* migration with non-shared storage with incremental copy */ /* (same base image shared between source and destination) */ + VIR_MIGRATE_CHANGE_PROTECTION = (1 << 8), /* protect for changing domain configuration through the + * whole migration process; this will be used automatically + * when supported */ } virDomainMigrateFlags; diff --git a/src/libvirt.c b/src/libvirt.c index 996ab948f6..988320447b 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4139,7 +4139,9 @@ virDomainMigrateVersion3(virDomainPtr domain, int ret; virDomainInfo info; virErrorPtr orig_err = NULL; - int cancelled; + int cancelled = 1; + unsigned long protection = 0; + VIR_DOMAIN_DEBUG(domain, "dconn=%p xmlin=%s, flags=%lx, " "dname=%s, uri=%s, bandwidth=%lu", dconn, NULLSTR(xmlin), flags, @@ -4155,10 +4157,14 @@ virDomainMigrateVersion3(virDomainPtr domain, return NULL; } + if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION)) + protection = VIR_MIGRATE_CHANGE_PROTECTION; + VIR_DEBUG("Begin3 %p", domain->conn); dom_xml = domain->conn->driver->domainMigrateBegin3 (domain, xmlin, &cookieout, &cookieoutlen, - flags, dname, bandwidth); + flags | protection, dname, bandwidth); if (!dom_xml) goto done; @@ -4176,14 +4182,22 @@ virDomainMigrateVersion3(virDomainPtr domain, (dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen, uri, &uri_out, flags, dname, bandwidth, dom_xml); VIR_FREE (dom_xml); - if (ret == -1) - goto done; + if (ret == -1) { + if (protection) { + /* Begin already started a migration job so we need to cancel it by + * calling Confirm while making sure it doesn't overwrite the error + */ + orig_err = virSaveLastError(); + goto confirm; + } else { + goto done; + } + } if (uri == NULL && uri_out == NULL) { virLibConnError(VIR_ERR_INTERNAL_ERROR, _("domainMigratePrepare3 did not set uri")); virDispatchError(domain->conn); - cancelled = 1; goto finish; } if (uri_out) @@ -4204,7 +4218,7 @@ virDomainMigrateVersion3(virDomainPtr domain, ret = domain->conn->driver->domainMigratePerform3 (domain, NULL, cookiein, cookieinlen, &cookieout, &cookieoutlen, NULL, - uri, flags, dname, bandwidth); + uri, flags | protection, dname, bandwidth); /* Perform failed. Make sure Finish doesn't overwrite the error */ if (ret < 0) @@ -4249,6 +4263,7 @@ finish: if (!orig_err) orig_err = virSaveLastError(); +confirm: /* * If cancelled, then src VM will be restarted, else * it will be killed @@ -4261,7 +4276,7 @@ finish: cookieoutlen = 0; ret = domain->conn->driver->domainMigrateConfirm3 (domain, cookiein, cookieinlen, - flags, cancelled); + flags | protection, cancelled); /* If Confirm3 returns -1, there's nothing more we can * do, but fortunately worst case is that there is a * domain left in 'paused' state on source. @@ -4280,7 +4295,7 @@ finish: /* - * In normal migration, the libvirt client co-ordinates communcation + * In normal migration, the libvirt client co-ordinates communication * between the 2 libvirtd instances on source & dest hosts. * * In this peer-2-peer migration alternative, the libvirt client @@ -4365,7 +4380,7 @@ virDomainMigratePeer2Peer (virDomainPtr domain, /* - * In normal migration, the libvirt client co-ordinates communcation + * In normal migration, the libvirt client co-ordinates communication * between the 2 libvirtd instances on source & dest hosts. * * Some hypervisors support an alternative, direct migration where @@ -4452,6 +4467,9 @@ virDomainMigrateDirect (virDomainPtr domain, * VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the * domain on the source host. * VIR_MIGRATE_PAUSED Leave the domain suspended on the remote side. + * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration + * changes during the migration process (set + * automatically when supported). * * VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set. * Applications using the VIR_MIGRATE_PEER2PEER flag will probably @@ -4559,6 +4577,19 @@ virDomainMigrate (virDomainPtr domain, goto error; } } else { + /* Change protection requires support only on source side, and + * is only needed in v3 migration, which automatically re-adds + * the flag for just the source side. We mask it out for + * non-peer2peer to allow migration from newer source to an + * older destination that rejects the flag. */ + if (flags & VIR_MIGRATE_CHANGE_PROTECTION && + !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("cannot enforce change protection")); + goto error; + } + flags &= ~VIR_MIGRATE_CHANGE_PROTECTION; if (flags & VIR_MIGRATE_TUNNELLED) { virLibConnError(VIR_ERR_OPERATION_INVALID, _("cannot perform tunnelled migration without using peer2peer flag")); @@ -4627,6 +4658,9 @@ error: * VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the * domain on the source host. * VIR_MIGRATE_PAUSED Leave the domain suspended on the remote side. + * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration + * changes during the migration process (set + * automatically when supported). * * VIR_MIGRATE_TUNNELLED requires that VIR_MIGRATE_PEER2PEER be set. * Applications using the VIR_MIGRATE_PEER2PEER flag will probably @@ -4741,6 +4775,19 @@ virDomainMigrate2(virDomainPtr domain, goto error; } } else { + /* Change protection requires support only on source side, and + * is only needed in v3 migration, which automatically re-adds + * the flag for just the source side. We mask it out for + * non-peer2peer to allow migration from newer source to an + * older destination that rejects the flag. */ + if (flags & VIR_MIGRATE_CHANGE_PROTECTION && + !VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn, + VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION)) { + virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s", + _("cannot enforce change protection")); + goto error; + } + flags &= ~VIR_MIGRATE_CHANGE_PROTECTION; if (flags & VIR_MIGRATE_TUNNELLED) { virLibConnError(VIR_ERR_OPERATION_INVALID, _("cannot perform tunnelled migration without using peer2peer flag")); @@ -4816,6 +4863,9 @@ error: * on the destination host. * VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the * domain on the source host. + * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration + * changes during the migration process (set + * automatically when supported). * * The operation of this API hinges on the VIR_MIGRATE_PEER2PEER flag. * If the VIR_MIGRATE_PEER2PEER flag is NOT set, the duri parameter @@ -4937,6 +4987,9 @@ error: * on the destination host. * VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful, undefine the * domain on the source host. + * VIR_MIGRATE_CHANGE_PROTECTION Protect against domain configuration + * changes during the migration process (set + * automatically when supported). * * The operation of this API hinges on the VIR_MIGRATE_PEER2PEER flag. * diff --git a/src/libvirt_internal.h b/src/libvirt_internal.h index 83c25fceb8..6e44341289 100644 --- a/src/libvirt_internal.h +++ b/src/libvirt_internal.h @@ -73,6 +73,12 @@ enum { * domainMigrateConfirm3. */ VIR_DRV_FEATURE_MIGRATION_V3 = 6, + + /* + * Driver supports protecting the whole V3-style migration against changes + * to domain configuration, i.e., starting from Begin3 and not Perform3. + */ + VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION = 7, }; diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0066c55e16..ba5bdfc90c 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -893,6 +893,7 @@ qemudSupportsFeature (virConnectPtr conn ATTRIBUTE_UNUSED, int feature) case VIR_DRV_FEATURE_MIGRATION_V2: case VIR_DRV_FEATURE_MIGRATION_V3: case VIR_DRV_FEATURE_MIGRATION_P2P: + case VIR_DRV_FEATURE_MIGRATE_CHANGE_PROTECTION: return 1; default: return 0; @@ -7447,12 +7448,56 @@ qemuDomainMigrateBegin3(virDomainPtr domain, goto cleanup; } - xml = qemuMigrationBegin(driver, vm, xmlin, - cookieout, cookieoutlen); + if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + goto cleanup; + } else { + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + } + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + + if (!(xml = qemuMigrationBegin(driver, vm, xmlin, + cookieout, cookieoutlen))) + goto endjob; + + if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { + /* We keep the job active across API calls until the confirm() call. + * This prevents any other APIs being invoked while migration is taking + * place. + */ + if (qemuMigrationJobContinue(vm) == 0) { + vm = NULL; + qemuReportError(VIR_ERR_OPERATION_FAILED, + "%s", _("domain disappeared")); + VIR_FREE(xml); + if (cookieout) + VIR_FREE(*cookieout); + } + } else { + goto endjob; + } cleanup: + if (vm) + virDomainObjUnlock(vm); qemuDriverUnlock(driver); return xml; + +endjob: + if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) { + if (qemuMigrationJobFinish(driver, vm) == 0) + vm = NULL; + } else { + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + } + goto cleanup; } static int @@ -7634,6 +7679,7 @@ qemuDomainMigrateConfirm3(virDomainPtr domain, struct qemud_driver *driver = domain->conn->privateData; virDomainObjPtr vm; int ret = -1; + enum qemuMigrationJobPhase phase; virCheckFlags(QEMU_MIGRATION_FLAGS, -1); @@ -7647,14 +7693,21 @@ qemuDomainMigrateConfirm3(virDomainPtr domain, goto cleanup; } - if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0) + if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) goto cleanup; + if (cancelled) + phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED; + else + phase = QEMU_MIGRATION_PHASE_CONFIRM3; + + qemuMigrationJobStartPhase(driver, vm, phase); + ret = qemuMigrationConfirm(driver, domain->conn, vm, cookiein, cookieinlen, flags, cancelled); - if (qemuDomainObjEndJob(driver, vm) == 0) { + if (qemuMigrationJobFinish(driver, vm) == 0) { vm = NULL; } else if (!virDomainObjIsActive(vm) && (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) { diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index e3b8b94ab6..11217496f5 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c @@ -1009,6 +1009,7 @@ qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver, } +/* The caller is supposed to lock the vm and start a migration job. */ char *qemuMigrationBegin(struct qemud_driver *driver, virDomainObjPtr vm, const char *xmlin, @@ -1018,14 +1019,17 @@ char *qemuMigrationBegin(struct qemud_driver *driver, char *rv = NULL; qemuMigrationCookiePtr mig = NULL; virDomainDefPtr def = NULL; + qemuDomainObjPrivatePtr priv = vm->privateData; + VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, cookieout=%p, cookieoutlen=%p", driver, vm, NULLSTR(xmlin), cookieout, cookieoutlen); - if (!virDomainObjIsActive(vm)) { - qemuReportError(VIR_ERR_OPERATION_INVALID, - "%s", _("domain is not running")); - goto cleanup; - } + /* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT. + * Otherwise we will start the async job later in the perform phase losing + * change protection. + */ + if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) + qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3); if (qemuProcessAutoDestroyActive(driver, vm)) { qemuReportError(VIR_ERR_OPERATION_INVALID, @@ -1063,7 +1067,6 @@ char *qemuMigrationBegin(struct qemud_driver *driver, } cleanup: - virDomainObjUnlock(vm); qemuMigrationCookieFree(mig); virDomainDefFree(def); return rv; @@ -1904,6 +1907,7 @@ static int doPeer2PeerMigrate2(struct qemud_driver *driver, * until the migration is complete. */ VIR_DEBUG("Perform %p", sconn); + qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2); if (flags & VIR_MIGRATE_TUNNELLED) ret = doTunnelMigrate(driver, vm, st, NULL, 0, NULL, NULL, @@ -1990,6 +1994,11 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, NULLSTR(dconnuri), NULLSTR(uri), flags, NULLSTR(dname), resource); + /* Unlike the virDomainMigrateVersion3 counterpart, we don't need + * to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION + * bit here, because we are already running inside the context of + * a single job. */ + dom_xml = qemuMigrationBegin(driver, vm, xmlin, &cookieout, &cookieoutlen); if (!dom_xml) @@ -2038,6 +2047,7 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, * confirm migration completion. */ VIR_DEBUG("Perform3 %p uri=%s uri_out=%s", sconn, uri, uri_out); + qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3); VIR_FREE(cookiein); cookiein = cookieout; cookieinlen = cookieoutlen; @@ -2055,8 +2065,12 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver, flags, dname, resource); /* Perform failed. Make sure Finish doesn't overwrite the error */ - if (ret < 0) + if (ret < 0) { orig_err = virSaveLastError(); + } else { + qemuMigrationJobSetPhase(driver, vm, + QEMU_MIGRATION_PHASE_PERFORM3_DONE); + } /* If Perform returns < 0, then we need to cancel the VM * startup on the destination @@ -2178,7 +2192,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, VIR_DRV_FEATURE_MIGRATION_P2P); /* v3proto reflects whether the caller used Perform3, but with - * p2p migrate, regardless of whether Perform3 or Perform3 + * p2p migrate, regardless of whether Perform2 or Perform3 * were used, we decide protocol based on what target supports */ *v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn, @@ -2198,6 +2212,13 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver, goto cleanup; } + /* Change protection is only required on the source side (us), and + * only for v3 migration when begin and perform are separate jobs. + * But peer-2-peer is already a single job, and we still want to + * talk to older destinations that would reject the flag. + * Therefore it is safe to clear the bit here. */ + flags &= ~VIR_MIGRATE_CHANGE_PROTECTION; + if (*v3proto) ret = doPeer2PeerMigrate3(driver, sconn, dconn, vm, xmlin, dconnuri, uri, flags, dname, resource); @@ -2219,35 +2240,32 @@ cleanup: } -int qemuMigrationPerform(struct qemud_driver *driver, - virConnectPtr conn, - virDomainObjPtr vm, - const char *xmlin, - const char *dconnuri, - const char *uri, - const char *cookiein, - int cookieinlen, - char **cookieout, - int *cookieoutlen, - unsigned long flags, - const char *dname, - unsigned long resource, - bool v3proto) +/* + * This implements perform part of the migration protocol when migration job + * does not need to be active across several APIs, i.e., peer2peer migration or + * perform phase of v2 non-peer2peer migration. + */ +static int +qemuMigrationPerformJob(struct qemud_driver *driver, + virConnectPtr conn, + virDomainObjPtr vm, + const char *xmlin, + const char *dconnuri, + const char *uri, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource, + bool v3proto) { virDomainEventPtr event = NULL; int ret = -1; int resume = 0; - qemuDomainObjPrivatePtr priv = vm->privateData; - VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, " - "uri=%s, cookiein=%s, cookieinlen=%d, cookieout=%p, " - "cookieoutlen=%p, flags=%lx, dname=%s, resource=%lu, v3proto=%d", - driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri), - NULLSTR(uri), NULLSTR(cookiein), cookieinlen, - cookieout, cookieoutlen, flags, NULLSTR(dname), - resource, v3proto); - if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, - QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) goto cleanup; if (!virDomainObjIsActive(vm)) { @@ -2262,52 +2280,33 @@ int qemuMigrationPerform(struct qemud_driver *driver, goto endjob; } - memset(&priv->job.info, 0, sizeof(priv->job.info)); - priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED; - resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING; if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) { - if (cookieinlen) { - qemuReportError(VIR_ERR_OPERATION_INVALID, - "%s", _("received unexpected cookie with P2P migration")); - goto endjob; - } - - if (doPeer2PeerMigrate(driver, conn, vm, xmlin, - dconnuri, uri, flags, dname, - resource, &v3proto) < 0) - /* doPeer2PeerMigrate already set the error, so just get out */ - goto endjob; + ret = doPeer2PeerMigrate(driver, conn, vm, xmlin, + dconnuri, uri, flags, dname, + resource, &v3proto); } else { - if (dconnuri) { - qemuReportError(VIR_ERR_INTERNAL_ERROR, - "%s", _("Unexpected dconnuri parameter with non-peer2peer migration")); - goto endjob; - } - if (doNativeMigrate(driver, vm, uri, cookiein, cookieinlen, - cookieout, cookieoutlen, - flags, dname, resource) < 0) - goto endjob; + qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2); + ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, dname, resource); } + if (ret < 0) + goto endjob; /* * In v3 protocol, the source VM is not killed off until the * confirm step. */ - if (v3proto) { - resume = 0; - } else { + if (!v3proto) { qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_MIGRATED); virDomainAuditStop(vm, "migrated"); - resume = 0; - event = virDomainEventNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED, VIR_DOMAIN_EVENT_STOPPED_MIGRATED); } - - ret = 0; + resume = 0; endjob: if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { @@ -2326,16 +2325,16 @@ endjob: VIR_DOMAIN_EVENT_RESUMED, VIR_DOMAIN_EVENT_RESUMED_MIGRATED); } - if (vm) { - if (qemuDomainObjEndAsyncJob(driver, vm) == 0) { - vm = NULL; - } else if (!virDomainObjIsActive(vm) && - (!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) { - if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) - virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm); - virDomainRemoveInactive(&driver->domains, vm); - vm = NULL; - } + + if (qemuMigrationJobFinish(driver, vm) == 0) { + vm = NULL; + } else if (!virDomainObjIsActive(vm) && + (!vm->persistent || + (ret == 0 && (flags & VIR_MIGRATE_UNDEFINE_SOURCE)))) { + if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) + virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm); + virDomainRemoveInactive(&driver->domains, vm); + vm = NULL; } cleanup: @@ -2346,6 +2345,140 @@ cleanup: return ret; } +/* + * This implements perform phase of v3 migration protocol. + */ +static int +qemuMigrationPerformPhase(struct qemud_driver *driver, + virConnectPtr conn, + virDomainObjPtr vm, + const char *uri, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource) +{ + virDomainEventPtr event = NULL; + int ret = -1; + bool resume; + int refs; + + /* If we didn't start the job in the begin phase, start it now. */ + if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) { + if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) + goto cleanup; + } else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) { + goto cleanup; + } + + qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3); + + resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING; + ret = doNativeMigrate(driver, vm, uri, cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, dname, resource); + + if (ret < 0 && resume && + virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) { + /* we got here through some sort of failure; start the domain again */ + if (qemuProcessStartCPUs(driver, vm, conn, + VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) { + /* Hm, we already know we are in error here. We don't want to + * overwrite the previous error, though, so we just throw something + * to the logs and hope for the best + */ + VIR_ERROR(_("Failed to resume guest %s after failure"), + vm->def->name); + } + + event = virDomainEventNewFromObj(vm, + VIR_DOMAIN_EVENT_RESUMED, + VIR_DOMAIN_EVENT_RESUMED_MIGRATED); + } + + if (ret < 0) + goto endjob; + + qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE); + +endjob: + if (ret < 0) + refs = qemuMigrationJobFinish(driver, vm); + else + refs = qemuMigrationJobContinue(vm); + if (refs == 0) { + vm = NULL; + } else if (!virDomainObjIsActive(vm) && !vm->persistent) { + virDomainRemoveInactive(&driver->domains, vm); + vm = NULL; + } + +cleanup: + if (vm) + virDomainObjUnlock(vm); + if (event) + qemuDomainEventQueue(driver, event); + return ret; +} + +int +qemuMigrationPerform(struct qemud_driver *driver, + virConnectPtr conn, + virDomainObjPtr vm, + const char *xmlin, + const char *dconnuri, + const char *uri, + const char *cookiein, + int cookieinlen, + char **cookieout, + int *cookieoutlen, + unsigned long flags, + const char *dname, + unsigned long resource, + bool v3proto) +{ + VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, " + "uri=%s, cookiein=%s, cookieinlen=%d, cookieout=%p, " + "cookieoutlen=%p, flags=%lx, dname=%s, resource=%lu, v3proto=%d", + driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri), + NULLSTR(uri), NULLSTR(cookiein), cookieinlen, + cookieout, cookieoutlen, flags, NULLSTR(dname), + resource, v3proto); + + if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) { + if (cookieinlen) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("received unexpected cookie with P2P migration")); + return -1; + } + + return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri, uri, + cookiein, cookieinlen, cookieout, + cookieoutlen, flags, dname, resource, + v3proto); + } else { + if (dconnuri) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + "%s", _("Unexpected dconnuri parameter with non-peer2peer migration")); + return -1; + } + + if (v3proto) { + return qemuMigrationPerformPhase(driver, conn, vm, uri, + cookiein, cookieinlen, + cookieout, cookieoutlen, + flags, dname, resource); + } else { + return qemuMigrationPerformJob(driver, conn, vm, xmlin, dconnuri, + uri, cookiein, cookieinlen, + cookieout, cookieoutlen, flags, + dname, resource, v3proto); + } + } +} #if WITH_MACVTAP static void @@ -2579,15 +2712,14 @@ int qemuMigrationConfirm(struct qemud_driver *driver, virCheckFlags(QEMU_MIGRATION_FLAGS, -1); + qemuMigrationJobSetPhase(driver, vm, + retcode == 0 + ? QEMU_MIGRATION_PHASE_CONFIRM3 + : QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED); + if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0))) return -1; - if (!virDomainObjIsActive(vm)) { - qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("guest unexpectedly quit")); - goto cleanup; - } - /* Did the migration go as planned? If yes, kill off the * domain object, but if no, resume CPUs */ diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h index 005e415b31..9e88271266 100644 --- a/src/qemu/qemu_migration.h +++ b/src/qemu/qemu_migration.h @@ -34,7 +34,8 @@ VIR_MIGRATE_UNDEFINE_SOURCE | \ VIR_MIGRATE_PAUSED | \ VIR_MIGRATE_NON_SHARED_DISK | \ - VIR_MIGRATE_NON_SHARED_INC) + VIR_MIGRATE_NON_SHARED_INC | \ + VIR_MIGRATE_CHANGE_PROTECTION) enum qemuMigrationJobPhase { QEMU_MIGRATION_PHASE_NONE = 0,