mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-22 13:45:38 +00:00
Add support for offline migration
Offline migration transfers inactive definition of a domain (which may or may not be active). After successful completion, the domain remains in its current state on source host and is defined but inactive on destination host. It's a bit more clever than virDomainGetXMLDesc() on source host followed by virDomainDefineXML() on destination host, as offline migration will run pre-migration hook to update the domain XML on destination host. Currently, copying non-shared storage is not supported during offline migration. Offline migration can be requested with a new migration flag called VIR_MIGRATE_OFFLINE (which has to be combined with VIR_MIGRATE_PERSIST_DEST flag).
This commit is contained in:
parent
e5577872cb
commit
8b9bf7879b
@ -1092,6 +1092,7 @@ typedef enum {
|
||||
* whole migration process; this will be used automatically
|
||||
* when supported */
|
||||
VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */
|
||||
VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */
|
||||
} virDomainMigrateFlags;
|
||||
|
||||
/* Domain migration. */
|
||||
|
@ -4829,6 +4829,14 @@ virDomainMigrateVersion3(virDomainPtr domain,
|
||||
if (uri_out)
|
||||
uri = uri_out; /* Did domainMigratePrepare3 change URI? */
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE) {
|
||||
VIR_DEBUG("Offline migration, skipping Perform phase");
|
||||
VIR_FREE(cookieout);
|
||||
cookieoutlen = 0;
|
||||
cancelled = 0;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* Perform the migration. The driver isn't supposed to return
|
||||
* until the migration is complete. The src VM should remain
|
||||
* running, but in paused state until the destination can
|
||||
@ -5199,6 +5207,23 @@ virDomainMigrate(virDomainPtr domain,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE) {
|
||||
if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
|
||||
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
||||
_("offline migration is not supported by "
|
||||
"the source host"));
|
||||
goto error;
|
||||
}
|
||||
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
|
||||
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
||||
_("offline migration is not supported by "
|
||||
"the destination host"));
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & VIR_MIGRATE_PEER2PEER) {
|
||||
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
|
||||
VIR_DRV_FEATURE_MIGRATION_P2P)) {
|
||||
@ -5404,6 +5429,23 @@ virDomainMigrate2(virDomainPtr domain,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE) {
|
||||
if (!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
|
||||
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
||||
_("offline migration is not supported by "
|
||||
"the source host"));
|
||||
goto error;
|
||||
}
|
||||
if (!VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
|
||||
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
||||
_("offline migration is not supported by "
|
||||
"the destination host"));
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & VIR_MIGRATE_PEER2PEER) {
|
||||
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
|
||||
VIR_DRV_FEATURE_MIGRATION_P2P)) {
|
||||
@ -5581,6 +5623,15 @@ virDomainMigrateToURI(virDomainPtr domain,
|
||||
|
||||
virCheckNonNullArgGoto(duri, error);
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE &&
|
||||
!VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE)) {
|
||||
virLibConnError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
||||
_("offline migration is not supported by "
|
||||
"the source host"));
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (flags & VIR_MIGRATE_PEER2PEER) {
|
||||
if (VIR_DRV_SUPPORTS_FEATURE(domain->conn->driver, domain->conn,
|
||||
VIR_DRV_FEATURE_MIGRATION_P2P)) {
|
||||
|
@ -105,6 +105,11 @@ enum {
|
||||
* Support for VIR_DOMAIN_XML_MIGRATABLE flag in domainGetXMLDesc
|
||||
*/
|
||||
VIR_DRV_FEATURE_XML_MIGRATABLE = 11,
|
||||
|
||||
/*
|
||||
* Support for offline migration.
|
||||
*/
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE = 12,
|
||||
};
|
||||
|
||||
|
||||
|
@ -1208,6 +1208,7 @@ qemuSupportsFeature(virConnectPtr conn ATTRIBUTE_UNUSED, int feature)
|
||||
case VIR_DRV_FEATURE_FD_PASSING:
|
||||
case VIR_DRV_FEATURE_TYPED_PARAM_STRING:
|
||||
case VIR_DRV_FEATURE_XML_MIGRATABLE:
|
||||
case VIR_DRV_FEATURE_MIGRATION_OFFLINE:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
@ -9698,7 +9699,7 @@ qemuDomainMigratePrepareTunnel(virConnectPtr dconn,
|
||||
|
||||
ret = qemuMigrationPrepareTunnel(driver, dconn,
|
||||
NULL, 0, NULL, NULL, /* No cookies in v2 */
|
||||
st, dname, dom_xml);
|
||||
st, dname, dom_xml, flags);
|
||||
|
||||
cleanup:
|
||||
qemuDriverUnlock(driver);
|
||||
@ -9758,7 +9759,7 @@ qemuDomainMigratePrepare2(virConnectPtr dconn,
|
||||
ret = qemuMigrationPrepareDirect(driver, dconn,
|
||||
NULL, 0, NULL, NULL, /* No cookies */
|
||||
uri_in, uri_out,
|
||||
dname, dom_xml);
|
||||
dname, dom_xml, flags);
|
||||
|
||||
cleanup:
|
||||
qemuDriverUnlock(driver);
|
||||
@ -9900,7 +9901,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
|
||||
asyncJob = QEMU_ASYNC_JOB_NONE;
|
||||
}
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not running"));
|
||||
goto endjob;
|
||||
@ -9909,8 +9910,8 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
|
||||
/* Check if there is any ejected media.
|
||||
* We don't want to require them on the destination.
|
||||
*/
|
||||
|
||||
if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
|
||||
if (!(flags & VIR_MIGRATE_OFFLINE) &&
|
||||
qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
|
||||
goto endjob;
|
||||
|
||||
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
|
||||
@ -9995,7 +9996,7 @@ qemuDomainMigratePrepare3(virConnectPtr dconn,
|
||||
cookiein, cookieinlen,
|
||||
cookieout, cookieoutlen,
|
||||
uri_in, uri_out,
|
||||
dname, dom_xml);
|
||||
dname, dom_xml, flags);
|
||||
|
||||
cleanup:
|
||||
qemuDriverUnlock(driver);
|
||||
@ -10040,7 +10041,7 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
|
||||
ret = qemuMigrationPrepareTunnel(driver, dconn,
|
||||
cookiein, cookieinlen,
|
||||
cookieout, cookieoutlen,
|
||||
st, dname, dom_xml);
|
||||
st, dname, dom_xml, flags);
|
||||
qemuDriverUnlock(driver);
|
||||
|
||||
cleanup:
|
||||
|
@ -1442,6 +1442,28 @@ char *qemuMigrationBegin(virQEMUDriverPtr driver,
|
||||
QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE) {
|
||||
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
||||
VIR_MIGRATE_NON_SHARED_INC)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("offline migration cannot handle "
|
||||
"non-shared storage"));
|
||||
goto cleanup;
|
||||
}
|
||||
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("offline migration must be specified with "
|
||||
"the persistent flag set"));
|
||||
goto cleanup;
|
||||
}
|
||||
if (flags & VIR_MIGRATE_TUNNELLED) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("tunnelled offline migration does not "
|
||||
"make sense"));
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (xmlin) {
|
||||
if (!(def = virDomainDefParseString(driver->caps, xmlin,
|
||||
QEMU_EXPECTED_VIRT_TYPES,
|
||||
@ -1499,7 +1521,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
||||
const char *dname,
|
||||
const char *dom_xml,
|
||||
const char *migrateFrom,
|
||||
virStreamPtr st)
|
||||
virStreamPtr st,
|
||||
unsigned long flags)
|
||||
{
|
||||
virDomainDefPtr def = NULL;
|
||||
virDomainObjPtr vm = NULL;
|
||||
@ -1512,10 +1535,33 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
||||
bool tunnel = !!st;
|
||||
char *origname = NULL;
|
||||
char *xmlout = NULL;
|
||||
unsigned int cookieFlags;
|
||||
|
||||
if (virTimeMillisNow(&now) < 0)
|
||||
return -1;
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE) {
|
||||
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
||||
VIR_MIGRATE_NON_SHARED_INC)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("offline migration cannot handle "
|
||||
"non-shared storage"));
|
||||
goto cleanup;
|
||||
}
|
||||
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("offline migration must be specified with "
|
||||
"the persistent flag set"));
|
||||
goto cleanup;
|
||||
}
|
||||
if (tunnel) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("tunnelled offline migration does not "
|
||||
"make sense"));
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (!(def = virDomainDefParseString(driver->caps, dom_xml,
|
||||
QEMU_EXPECTED_VIRT_TYPES,
|
||||
VIR_DOMAIN_XML_INACTIVE)))
|
||||
@ -1599,6 +1645,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
||||
/* Domain starts inactive, even if the domain XML had an id field. */
|
||||
vm->def->id = -1;
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE)
|
||||
goto done;
|
||||
|
||||
if (tunnel &&
|
||||
(pipe(dataFD) < 0 || virSetCloseExec(dataFD[1]) < 0)) {
|
||||
virReportSystemError(errno, "%s",
|
||||
@ -1640,8 +1689,14 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
||||
VIR_DEBUG("Received no lockstate");
|
||||
}
|
||||
|
||||
done:
|
||||
if (flags & VIR_MIGRATE_OFFLINE)
|
||||
cookieFlags = 0;
|
||||
else
|
||||
cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS;
|
||||
|
||||
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
|
||||
QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) {
|
||||
cookieFlags) < 0) {
|
||||
/* We could tear down the whole guest here, but
|
||||
* cookie data is (so far) non-critical, so that
|
||||
* seems a little harsh. We'll just warn for now.
|
||||
@ -1652,10 +1707,12 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
||||
if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
|
||||
goto endjob;
|
||||
|
||||
virDomainAuditStart(vm, "migrated", true);
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_STARTED,
|
||||
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
|
||||
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
virDomainAuditStart(vm, "migrated", true);
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_STARTED,
|
||||
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
|
||||
}
|
||||
|
||||
/* We keep the job active across API calls until the finish() call.
|
||||
* This prevents any other APIs being invoked while incoming
|
||||
@ -1708,7 +1765,8 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
|
||||
int *cookieoutlen,
|
||||
virStreamPtr st,
|
||||
const char *dname,
|
||||
const char *dom_xml)
|
||||
const char *dom_xml,
|
||||
unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1722,7 +1780,7 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
|
||||
*/
|
||||
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
|
||||
cookieout, cookieoutlen, dname, dom_xml,
|
||||
"stdio", st);
|
||||
"stdio", st, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1737,7 +1795,8 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
|
||||
const char *uri_in,
|
||||
char **uri_out,
|
||||
const char *dname,
|
||||
const char *dom_xml)
|
||||
const char *dom_xml,
|
||||
unsigned long flags)
|
||||
{
|
||||
static int port = 0;
|
||||
int this_port;
|
||||
@ -1833,7 +1892,7 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
|
||||
|
||||
ret = qemuMigrationPrepareAny(driver, dconn, cookiein, cookieinlen,
|
||||
cookieout, cookieoutlen, dname, dom_xml,
|
||||
migrateFrom, NULL);
|
||||
migrateFrom, NULL, flags);
|
||||
cleanup:
|
||||
VIR_FREE(hostname);
|
||||
if (ret != 0)
|
||||
@ -2679,6 +2738,14 @@ static int doPeer2PeerMigrate3(virQEMUDriverPtr driver,
|
||||
if (ret == -1)
|
||||
goto cleanup;
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE) {
|
||||
VIR_DEBUG("Offline migration, skipping Perform phase");
|
||||
VIR_FREE(cookieout);
|
||||
cookieoutlen = 0;
|
||||
cancelled = 0;
|
||||
goto finish;
|
||||
}
|
||||
|
||||
if (!(flags & VIR_MIGRATE_TUNNELLED) &&
|
||||
(uri_out == NULL)) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||
@ -2817,6 +2884,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
|
||||
virConnectPtr dconn = NULL;
|
||||
bool p2p;
|
||||
virErrorPtr orig_err = NULL;
|
||||
bool offline;
|
||||
|
||||
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
|
||||
"uri=%s, flags=%lx, dname=%s, resource=%lu",
|
||||
@ -2849,6 +2917,9 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
|
||||
*/
|
||||
*v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
||||
VIR_DRV_FEATURE_MIGRATION_V3);
|
||||
if (flags & VIR_MIGRATE_OFFLINE)
|
||||
offline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
||||
VIR_DRV_FEATURE_MIGRATION_OFFLINE);
|
||||
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
||||
|
||||
if (!p2p) {
|
||||
@ -2857,8 +2928,15 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE && !offline) {
|
||||
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
||||
_("offline migration is not supported by "
|
||||
"the destination host"));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* domain may have been stopped while we were talking to remote daemon */
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||
_("guest unexpectedly quit"));
|
||||
goto cleanup;
|
||||
@ -2921,7 +2999,7 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
|
||||
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
"%s", _("domain is not running"));
|
||||
goto endjob;
|
||||
@ -3245,26 +3323,27 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
|
||||
* object, but if no, clean up the empty qemu process.
|
||||
*/
|
||||
if (retcode == 0) {
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
||||
_("guest unexpectedly quit"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "failed");
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_STOPPED,
|
||||
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
||||
goto endjob;
|
||||
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "failed");
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_STOPPED,
|
||||
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
||||
goto endjob;
|
||||
}
|
||||
if (mig->network)
|
||||
if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
|
||||
VIR_WARN("unable to provide network data for relocation");
|
||||
}
|
||||
|
||||
if (mig->network)
|
||||
if (qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
|
||||
VIR_WARN("unable to provide network data for relocation");
|
||||
|
||||
if (flags & VIR_MIGRATE_PERSIST_DEST) {
|
||||
virDomainDefPtr vmdef;
|
||||
if (vm->persistent)
|
||||
@ -3290,9 +3369,11 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
|
||||
* to restart during confirm() step, so we kill it off now.
|
||||
*/
|
||||
if (v3proto) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "failed");
|
||||
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "failed");
|
||||
}
|
||||
if (newVM)
|
||||
vm->persistent = 0;
|
||||
}
|
||||
@ -3312,7 +3393,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
|
||||
event = NULL;
|
||||
}
|
||||
|
||||
if (!(flags & VIR_MIGRATE_PAUSED)) {
|
||||
if (!(flags & VIR_MIGRATE_PAUSED) && !(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
/* run 'cont' on the destination, which allows migration on qemu
|
||||
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
||||
* older qemu's, but it also doesn't hurt anything there
|
||||
@ -3350,25 +3431,30 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
|
||||
|
||||
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid);
|
||||
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_RESUMED,
|
||||
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
||||
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
|
||||
if (event)
|
||||
qemuDomainEventQueue(driver, event);
|
||||
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
||||
VIR_DOMAIN_EVENT_RESUMED,
|
||||
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
||||
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
||||
VIR_DOMAIN_PAUSED_USER);
|
||||
if (event)
|
||||
qemuDomainEventQueue(driver, event);
|
||||
event = virDomainEventNewFromObj(vm,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED,
|
||||
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
||||
}
|
||||
}
|
||||
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
||||
|
||||
if (virDomainObjIsActive(vm) &&
|
||||
virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
||||
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
/* Guest is successfully running, so cancel previous auto destroy */
|
||||
qemuProcessAutoDestroyRemove(driver, vm);
|
||||
} else {
|
||||
} else if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
||||
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
||||
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
||||
virDomainAuditStop(vm, "failed");
|
||||
@ -3430,6 +3516,9 @@ int qemuMigrationConfirm(virQEMUDriverPtr driver,
|
||||
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
|
||||
return -1;
|
||||
|
||||
if (flags & VIR_MIGRATE_OFFLINE)
|
||||
goto done;
|
||||
|
||||
/* Did the migration go as planned? If yes, kill off the
|
||||
* domain object, but if no, resume CPUs
|
||||
*/
|
||||
@ -3465,6 +3554,7 @@ int qemuMigrationConfirm(virQEMUDriverPtr driver,
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
qemuMigrationCookieFree(mig);
|
||||
rv = 0;
|
||||
|
||||
|
@ -36,7 +36,8 @@
|
||||
VIR_MIGRATE_NON_SHARED_DISK | \
|
||||
VIR_MIGRATE_NON_SHARED_INC | \
|
||||
VIR_MIGRATE_CHANGE_PROTECTION | \
|
||||
VIR_MIGRATE_UNSAFE)
|
||||
VIR_MIGRATE_UNSAFE | \
|
||||
VIR_MIGRATE_OFFLINE)
|
||||
|
||||
enum qemuMigrationJobPhase {
|
||||
QEMU_MIGRATION_PHASE_NONE = 0,
|
||||
@ -97,7 +98,8 @@ int qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
|
||||
int *cookieoutlen,
|
||||
virStreamPtr st,
|
||||
const char *dname,
|
||||
const char *dom_xml);
|
||||
const char *dom_xml,
|
||||
unsigned long flags);
|
||||
|
||||
int qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
|
||||
virConnectPtr dconn,
|
||||
@ -108,7 +110,8 @@ int qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
|
||||
const char *uri_in,
|
||||
char **uri_out,
|
||||
const char *dname,
|
||||
const char *dom_xml);
|
||||
const char *dom_xml,
|
||||
unsigned long flags);
|
||||
|
||||
int qemuMigrationPerform(virQEMUDriverPtr driver,
|
||||
virConnectPtr conn,
|
||||
|
@ -6803,6 +6803,7 @@ static const vshCmdInfo info_migrate[] = {
|
||||
|
||||
static const vshCmdOptDef opts_migrate[] = {
|
||||
{"live", VSH_OT_BOOL, 0, N_("live migration")},
|
||||
{"offline", VSH_OT_BOOL, 0, N_("offline migration")},
|
||||
{"p2p", VSH_OT_BOOL, 0, N_("peer-2-peer migration")},
|
||||
{"direct", VSH_OT_BOOL, 0, N_("direct migration")},
|
||||
{"tunneled", VSH_OT_ALIAS, 0, "tunnelled"},
|
||||
@ -6888,6 +6889,10 @@ doMigrate(void *opaque)
|
||||
if (vshCommandOptBool(cmd, "unsafe"))
|
||||
flags |= VIR_MIGRATE_UNSAFE;
|
||||
|
||||
if (vshCommandOptBool(cmd, "offline")) {
|
||||
flags |= VIR_MIGRATE_OFFLINE;
|
||||
}
|
||||
|
||||
if (xmlfile &&
|
||||
virFileReadAll(xmlfile, 8192, &xml) < 0) {
|
||||
vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
|
||||
|
@ -1040,15 +1040,18 @@ I<--total> for only the total stats, I<start> for only the per-cpu
|
||||
stats of the CPUs from I<start>, I<count> for only I<count> CPUs'
|
||||
stats.
|
||||
|
||||
=item B<migrate> [I<--live>] [I<--direct>] [I<--p2p> [I<--tunnelled>]]
|
||||
=item B<migrate> [I<--live>] [I<--offline>] [I<--direct>] [I<--p2p> [I<--tunnelled>]]
|
||||
[I<--persistent>] [I<--undefinesource>] [I<--suspend>] [I<--copy-storage-all>]
|
||||
[I<--copy-storage-inc>] [I<--change-protection>] [I<--unsafe>] [I<--verbose>]
|
||||
I<domain> I<desturi> [I<migrateuri>] [I<dname>]
|
||||
[I<--timeout> B<seconds>] [I<--xml> B<file>]
|
||||
|
||||
Migrate domain to another host. Add I<--live> for live migration; I<--p2p>
|
||||
Migrate domain to another host. Add I<--live> for live migration; <--p2p>
|
||||
for peer-2-peer migration; I<--direct> for direct migration; or I<--tunnelled>
|
||||
for tunnelled migration. I<--persistent> leaves the domain persistent on
|
||||
for tunnelled migration. I<--offline> migrates domain definition without
|
||||
starting the domain on destination and without stopping it on source host.
|
||||
Offline migration may be used with inactive domains and it must be used with
|
||||
I<--persistent> option. I<--persistent> leaves the domain persistent on
|
||||
destination host, I<--undefinesource> undefines the domain on the source host,
|
||||
and I<--suspend> leaves the domain paused on the destination host.
|
||||
I<--copy-storage-all> indicates migration with non-shared storage with full
|
||||
|
Loading…
Reference in New Issue
Block a user