2011-01-31 10:47:03 +00:00
|
|
|
/*
|
|
|
|
* qemu_migration.c: QEMU migration handling
|
|
|
|
*
|
2015-03-17 17:46:44 +00:00
|
|
|
* Copyright (C) 2006-2015 Red Hat, Inc.
|
2011-01-31 10:47:03 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2012-09-20 22:30:55 +00:00
|
|
|
* License along with this library. If not, see
|
2012-07-21 10:06:23 +00:00
|
|
|
* <http://www.gnu.org/licenses/>.
|
2011-01-31 10:47:03 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <sys/time.h>
|
2011-08-29 23:31:42 +00:00
|
|
|
#include <fcntl.h>
|
2012-04-23 14:17:55 +00:00
|
|
|
#include <poll.h>
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
#include "qemu_migration.h"
|
2017-03-27 18:50:44 +00:00
|
|
|
#include "qemu_migration_cookie.h"
|
2018-02-21 13:18:03 +00:00
|
|
|
#include "qemu_migration_params.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
#include "qemu_monitor.h"
|
|
|
|
#include "qemu_domain.h"
|
|
|
|
#include "qemu_process.h"
|
|
|
|
#include "qemu_capabilities.h"
|
2016-06-29 16:37:39 +00:00
|
|
|
#include "qemu_alias.h"
|
2011-03-10 00:35:13 +00:00
|
|
|
#include "qemu_cgroup.h"
|
2013-06-25 07:44:14 +00:00
|
|
|
#include "qemu_hotplug.h"
|
2015-04-16 09:24:23 +00:00
|
|
|
#include "qemu_blockjob.h"
|
2017-02-13 13:36:53 +00:00
|
|
|
#include "qemu_security.h"
|
2019-08-08 14:55:09 +00:00
|
|
|
#include "qemu_slirp.h"
|
2018-02-28 14:20:17 +00:00
|
|
|
#include "qemu_block.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
#include "domain_audit.h"
|
2012-12-12 17:59:27 +00:00
|
|
|
#include "virlog.h"
|
2012-12-13 18:21:53 +00:00
|
|
|
#include "virerror.h"
|
2012-12-12 18:06:53 +00:00
|
|
|
#include "viralloc.h"
|
2011-07-19 18:32:58 +00:00
|
|
|
#include "virfile.h"
|
2015-03-17 17:46:44 +00:00
|
|
|
#include "virnetdevopenvswitch.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
#include "datatypes.h"
|
2017-03-07 09:34:47 +00:00
|
|
|
#include "virfdstream.h"
|
2012-12-13 18:01:25 +00:00
|
|
|
#include "viruuid.h"
|
2011-11-29 12:33:23 +00:00
|
|
|
#include "virtime.h"
|
2011-05-18 16:34:21 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2011-08-12 08:54:05 +00:00
|
|
|
#include "rpc/virnetsocket.h"
|
2021-01-21 14:44:53 +00:00
|
|
|
#include "storage_source_conf.h"
|
2012-02-24 18:48:55 +00:00
|
|
|
#include "viruri.h"
|
2012-12-12 17:00:34 +00:00
|
|
|
#include "virhook.h"
|
2013-04-03 10:36:23 +00:00
|
|
|
#include "virstring.h"
|
2013-06-25 13:49:21 +00:00
|
|
|
#include "virtypedparam.h"
|
2014-01-13 06:28:11 +00:00
|
|
|
#include "virprocess.h"
|
2014-11-05 13:28:57 +00:00
|
|
|
#include "nwfilter_conf.h"
|
2019-03-15 02:19:18 +00:00
|
|
|
#include "virdomainsnapshotobjlist.h"
|
2020-01-14 17:38:59 +00:00
|
|
|
#include "virsocket.h"
|
2020-02-16 21:59:28 +00:00
|
|
|
#include "virutil.h"
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2014-02-28 12:16:17 +00:00
|
|
|
VIR_LOG_INIT("qemu.qemu_migration");
|
|
|
|
|
2019-03-16 18:20:32 +00:00
|
|
|
VIR_ENUM_IMPL(qemuMigrationJobPhase,
|
|
|
|
QEMU_MIGRATION_PHASE_LAST,
|
2011-07-19 00:27:30 +00:00
|
|
|
"none",
|
|
|
|
"perform2",
|
|
|
|
"begin3",
|
|
|
|
"perform3",
|
|
|
|
"perform3_done",
|
|
|
|
"confirm3_cancelled",
|
|
|
|
"confirm3",
|
|
|
|
"prepare",
|
|
|
|
"finish2",
|
|
|
|
"finish3",
|
|
|
|
);
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationJobStart(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2018-03-21 12:01:59 +00:00
|
|
|
qemuDomainAsyncJob job,
|
|
|
|
unsigned long apiFlags)
|
2019-10-14 12:25:14 +00:00
|
|
|
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) G_GNUC_WARN_UNUSED_RESULT;
|
2018-02-12 16:54:23 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationJobPhase phase)
|
|
|
|
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationJobPhase phase)
|
|
|
|
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuMigrationJobContinue(virDomainObjPtr obj)
|
|
|
|
ATTRIBUTE_NONNULL(1);
|
|
|
|
|
|
|
|
static bool
|
|
|
|
qemuMigrationJobIsActive(virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob job)
|
|
|
|
ATTRIBUTE_NONNULL(1);
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuMigrationJobFinish(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr obj)
|
|
|
|
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-02-06 13:30:59 +00:00
|
|
|
static void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcStoreDomainState(virDomainObjPtr vm)
|
2014-02-06 13:30:59 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
priv->preMigrationState = virDomainObjGetState(vm, NULL);
|
|
|
|
|
|
|
|
VIR_DEBUG("Storing pre-migration state=%d domain=%p",
|
|
|
|
priv->preMigrationState, vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if the domain was resumed, false otherwise */
|
|
|
|
static bool
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
|
2014-02-06 13:30:59 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2016-01-13 15:29:58 +00:00
|
|
|
int reason;
|
|
|
|
virDomainState state = virDomainObjGetState(vm, &reason);
|
2014-02-06 13:30:59 +00:00
|
|
|
bool ret = false;
|
|
|
|
|
2016-01-13 15:29:58 +00:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, pre-mig-state=%s, state=%s, reason=%s",
|
|
|
|
driver, vm,
|
|
|
|
virDomainStateTypeToString(priv->preMigrationState),
|
|
|
|
virDomainStateTypeToString(state),
|
|
|
|
virDomainStateReasonToString(state, reason));
|
2014-02-06 13:30:59 +00:00
|
|
|
|
2016-01-13 15:29:58 +00:00
|
|
|
if (state != VIR_DOMAIN_PAUSED ||
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (priv->preMigrationState == VIR_DOMAIN_RUNNING) {
|
2014-02-06 13:30:59 +00:00
|
|
|
/* This is basically the only restore possibility that's safe
|
|
|
|
* and we should attempt to do */
|
|
|
|
|
|
|
|
VIR_DEBUG("Restoring pre-migration state due to migration error");
|
|
|
|
|
|
|
|
/* we got here through some sort of failure; start the domain again */
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2014-02-06 13:30:59 +00:00
|
|
|
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
|
|
|
/* Hm, we already know we are in error here. We don't want to
|
|
|
|
* overwrite the previous error, though, so we just throw something
|
|
|
|
* to the logs and hope for the best */
|
|
|
|
VIR_ERROR(_("Failed to resume guest %s after failure"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
ret = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
priv->preMigrationState = VIR_DOMAIN_NOSTATE;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-25 13:19:07 +00:00
|
|
|
|
|
|
|
static int
|
2020-10-07 19:20:30 +00:00
|
|
|
qemuMigrationDstPrecreateDisk(virConnectPtr *conn,
|
2018-02-12 17:11:41 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
unsigned long long capacity)
|
2014-11-25 13:19:07 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virStoragePoolPtr pool = NULL;
|
|
|
|
virStorageVolPtr vol = NULL;
|
|
|
|
char *volName = NULL, *basePath = NULL;
|
|
|
|
char *volStr = NULL;
|
2020-07-02 22:26:41 +00:00
|
|
|
g_auto(virBuffer) buf = VIR_BUFFER_INITIALIZER;
|
2014-11-25 13:19:07 +00:00
|
|
|
const char *format = NULL;
|
|
|
|
unsigned int flags = 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Precreate disk type=%s", virStorageTypeToString(disk->src->type));
|
|
|
|
|
2018-04-25 12:42:34 +00:00
|
|
|
switch ((virStorageType)disk->src->type) {
|
2014-11-25 13:19:07 +00:00
|
|
|
case VIR_STORAGE_TYPE_FILE:
|
|
|
|
if (!virDomainDiskGetSource(disk)) {
|
|
|
|
VIR_DEBUG("Dropping sourceless disk '%s'",
|
|
|
|
disk->dst);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
basePath = g_strdup(disk->src->path);
|
2014-11-25 13:19:07 +00:00
|
|
|
|
|
|
|
if (!(volName = strrchr(basePath, '/'))) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("malformed disk path: %s"),
|
|
|
|
disk->src->path);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
*volName = '\0';
|
|
|
|
volName++;
|
|
|
|
|
2020-10-07 19:20:30 +00:00
|
|
|
if (!*conn) {
|
|
|
|
if (!(*conn = virGetConnectStorage()))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(pool = virStoragePoolLookupByTargetPath(*conn, basePath)))
|
2014-11-25 13:19:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
format = virStorageFileFormatTypeToString(disk->src->format);
|
|
|
|
if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
|
|
|
|
flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_STORAGE_TYPE_VOLUME:
|
2020-10-07 19:20:30 +00:00
|
|
|
if (!*conn) {
|
|
|
|
if (!(*conn = virGetConnectStorage()))
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(pool = virStoragePoolLookupByName(*conn, disk->src->srcpool->pool)))
|
2014-11-25 13:19:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
format = virStorageFileFormatTypeToString(disk->src->format);
|
|
|
|
volName = disk->src->srcpool->volume;
|
|
|
|
if (disk->src->format == VIR_STORAGE_FILE_QCOW2)
|
|
|
|
flags |= VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA;
|
|
|
|
break;
|
|
|
|
|
2015-03-13 00:34:58 +00:00
|
|
|
case VIR_STORAGE_TYPE_NETWORK:
|
|
|
|
VIR_DEBUG("Skipping creation of network disk '%s'",
|
|
|
|
disk->dst);
|
|
|
|
return 0;
|
|
|
|
|
2014-11-25 13:19:07 +00:00
|
|
|
case VIR_STORAGE_TYPE_BLOCK:
|
|
|
|
case VIR_STORAGE_TYPE_DIR:
|
2019-06-03 15:31:13 +00:00
|
|
|
case VIR_STORAGE_TYPE_NVME:
|
2021-01-25 17:13:29 +00:00
|
|
|
case VIR_STORAGE_TYPE_VHOST_USER:
|
2014-11-25 13:19:07 +00:00
|
|
|
case VIR_STORAGE_TYPE_NONE:
|
|
|
|
case VIR_STORAGE_TYPE_LAST:
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot precreate storage for disk type '%s'"),
|
|
|
|
virStorageTypeToString(disk->src->type));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vol = virStorageVolLookupByName(pool, volName))) {
|
|
|
|
VIR_DEBUG("Skipping creation of already existing volume of name '%s'",
|
|
|
|
volName);
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
virBufferAddLit(&buf, "<volume>\n");
|
|
|
|
virBufferAdjustIndent(&buf, 2);
|
|
|
|
virBufferEscapeString(&buf, "<name>%s</name>\n", volName);
|
|
|
|
virBufferAsprintf(&buf, "<capacity>%llu</capacity>\n", capacity);
|
|
|
|
virBufferAddLit(&buf, "<target>\n");
|
|
|
|
virBufferAdjustIndent(&buf, 2);
|
|
|
|
virBufferAsprintf(&buf, "<format type='%s'/>\n", format);
|
|
|
|
virBufferAdjustIndent(&buf, -2);
|
|
|
|
virBufferAddLit(&buf, "</target>\n");
|
|
|
|
virBufferAdjustIndent(&buf, -2);
|
|
|
|
virBufferAddLit(&buf, "</volume>\n");
|
|
|
|
|
|
|
|
if (!(volStr = virBufferContentAndReset(&buf))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("unable to create volume XML"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(vol = virStorageVolCreateXML(pool, volStr, flags)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(basePath);
|
|
|
|
VIR_FREE(volStr);
|
|
|
|
virObjectUnref(vol);
|
|
|
|
virObjectUnref(pool);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
static bool
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyCopyDisk(virDomainDiskDef const *disk,
|
|
|
|
size_t nmigrate_disks, const char **migrate_disks)
|
2015-06-15 22:42:10 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* Check if the disk alias is in the list */
|
|
|
|
if (nmigrate_disks) {
|
|
|
|
for (i = 0; i < nmigrate_disks; i++) {
|
|
|
|
if (STREQ(disk->dst, migrate_disks[i]))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Default is to migrate only non-shared non-readonly disks
|
|
|
|
* with source */
|
|
|
|
return !disk->src->shared && !disk->src->readonly &&
|
2017-04-07 15:38:36 +00:00
|
|
|
!virStorageSourceIsEmpty(disk->src);
|
2015-06-15 22:42:10 +00:00
|
|
|
}
|
|
|
|
|
2014-11-25 13:19:07 +00:00
|
|
|
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPrecreateStorage(virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookieNBDPtr nbd,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
bool incremental)
|
2014-11-25 13:19:07 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
size_t i = 0;
|
2020-10-07 19:20:30 +00:00
|
|
|
virConnectPtr conn = NULL;
|
2014-11-25 13:19:07 +00:00
|
|
|
|
|
|
|
if (!nbd || !nbd->ndisks)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < nbd->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk;
|
|
|
|
const char *diskSrcPath;
|
2020-05-26 14:26:25 +00:00
|
|
|
g_autofree char *nvmePath = NULL;
|
2014-11-25 13:19:07 +00:00
|
|
|
|
2015-04-02 14:34:01 +00:00
|
|
|
VIR_DEBUG("Looking up disk target '%s' (capacity=%llu)",
|
2014-11-25 13:19:07 +00:00
|
|
|
nbd->disks[i].target, nbd->disks[i].capacity);
|
|
|
|
|
2019-10-14 15:24:20 +00:00
|
|
|
if (!(disk = virDomainDiskByTarget(vm->def, nbd->disks[i].target))) {
|
2014-11-25 13:19:07 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unable to find disk by target: %s"),
|
|
|
|
nbd->disks[i].target);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-05-26 14:26:25 +00:00
|
|
|
if (disk->src->type == VIR_STORAGE_TYPE_NVME) {
|
|
|
|
virPCIDeviceAddressGetSysfsFile(&disk->src->nvme->pciAddr, &nvmePath);
|
|
|
|
diskSrcPath = nvmePath;
|
|
|
|
} else {
|
|
|
|
diskSrcPath = virDomainDiskGetSource(disk);
|
|
|
|
}
|
2014-11-25 13:19:07 +00:00
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
/* Skip disks we don't want to migrate and already existing disks. */
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks) ||
|
2014-11-25 13:19:07 +00:00
|
|
|
(diskSrcPath && virFileExists(diskSrcPath))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-08-04 08:12:30 +00:00
|
|
|
if (incremental) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("pre-creation of storage targets for incremental "
|
|
|
|
"storage migration is not supported"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2014-11-25 13:19:07 +00:00
|
|
|
VIR_DEBUG("Proceeding with disk source %s", NULLSTR(diskSrcPath));
|
|
|
|
|
2020-10-07 19:20:30 +00:00
|
|
|
if (qemuMigrationDstPrecreateDisk(&conn,
|
|
|
|
disk, nbd->disks[i].capacity) < 0)
|
2014-11-25 13:19:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
cleanup:
|
2018-02-12 16:50:01 +00:00
|
|
|
virObjectUnref(conn);
|
2014-11-25 13:19:07 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-31 13:48:06 +00:00
|
|
|
/**
|
2018-02-12 17:11:41 +00:00
|
|
|
* qemuMigrationDstStartNBDServer:
|
2013-01-31 13:48:06 +00:00
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain
|
|
|
|
*
|
|
|
|
* Starts NBD server. This is a newer method to copy
|
|
|
|
* storage during migration than using 'blk' and 'inc'
|
|
|
|
* arguments in 'migrate' monitor command.
|
|
|
|
* Error is reported here.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *listenAddr,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
2018-02-21 15:55:15 +00:00
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-02-21 15:55:15 +00:00
|
|
|
const char *tls_alias)
|
2013-01-31 13:48:06 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
size_t i;
|
2019-04-09 21:21:38 +00:00
|
|
|
virStorageNetHostDef server = {
|
|
|
|
.name = (char *)listenAddr, /* cast away const */
|
|
|
|
.transport = VIR_STORAGE_NET_HOST_TRANS_TCP,
|
2020-08-24 19:24:13 +00:00
|
|
|
.port = nbdPort,
|
2019-04-09 21:21:38 +00:00
|
|
|
};
|
2020-08-24 19:24:13 +00:00
|
|
|
bool server_started = false;
|
2020-08-24 13:42:31 +00:00
|
|
|
g_autoptr(virURI) uri = NULL;
|
|
|
|
|
|
|
|
/* Prefer nbdURI */
|
|
|
|
if (nbdURI) {
|
|
|
|
uri = virURIParse(nbdURI);
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (!uri)
|
|
|
|
return -1;
|
|
|
|
|
2020-12-13 14:49:29 +00:00
|
|
|
if (!uri->scheme) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, _("No URI scheme specified: %s"), nbdURI);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (STREQ(uri->scheme, "tcp")) {
|
|
|
|
server.transport = VIR_STORAGE_NET_HOST_TRANS_TCP;
|
|
|
|
if (!uri->server || STREQ(uri->server, "")) {
|
|
|
|
/* Since tcp://:<port>/ is parsed as server = NULL and port = 0
|
|
|
|
* we should rather error out instead of auto-allocating a port
|
|
|
|
* as that would be the exact opposite of what was requested. */
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("URI with tcp scheme did not provide a server part: %s"),
|
|
|
|
nbdURI);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
server.name = (char *)uri->server;
|
|
|
|
if (uri->port)
|
|
|
|
server.port = uri->port;
|
|
|
|
} else if (STREQ(uri->scheme, "unix")) {
|
|
|
|
if (!uri->path) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("UNIX disks URI does not include path"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
server.transport = VIR_STORAGE_NET_HOST_TRANS_UNIX;
|
|
|
|
server.socket = (char *)uri->path;
|
|
|
|
} else {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Unsupported scheme in disks URI: %s"),
|
|
|
|
uri->scheme);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (nbdPort < 0 || nbdPort > USHRT_MAX) {
|
2016-03-17 14:58:48 +00:00
|
|
|
virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("nbd port must be in range 0-65535"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-01-31 13:48:06 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
2019-12-06 15:54:33 +00:00
|
|
|
g_autofree char *diskAlias = NULL;
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
/* check whether disk should be migrated */
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
|
2013-01-31 13:48:06 +00:00
|
|
|
continue;
|
|
|
|
|
2017-04-07 15:39:39 +00:00
|
|
|
if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
|
2016-09-26 17:16:00 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
2017-04-07 15:39:39 +00:00
|
|
|
_("Cannot migrate empty or read-only disk %s"),
|
|
|
|
disk->dst);
|
2016-09-26 17:16:00 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-05-31 09:55:24 +00:00
|
|
|
if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
|
2013-01-31 13:48:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (!server_started &&
|
|
|
|
server.transport == VIR_STORAGE_NET_HOST_TRANS_TCP) {
|
2020-08-24 19:24:13 +00:00
|
|
|
if (server.port) {
|
|
|
|
if (virPortAllocatorSetUsed(server.port) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
unsigned short port = 0;
|
|
|
|
|
|
|
|
if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
server.port = port;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-31 13:48:06 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2020-10-26 08:49:35 +00:00
|
|
|
if (!server_started) {
|
|
|
|
if (qemuMonitorNBDServerStart(priv->mon, &server, tls_alias) < 0)
|
|
|
|
goto exit_monitor;
|
|
|
|
server_started = true;
|
|
|
|
}
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2020-10-14 09:12:19 +00:00
|
|
|
if (qemuBlockExportAddNBD(vm, diskAlias, disk->src, diskAlias, true, NULL) < 0)
|
2014-12-16 09:40:58 +00:00
|
|
|
goto exit_monitor;
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
2013-01-31 13:48:06 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (server.transport == VIR_STORAGE_NET_HOST_TRANS_TCP)
|
|
|
|
priv->nbdPort = server.port;
|
2020-08-24 19:24:13 +00:00
|
|
|
|
2013-01-31 13:48:06 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2020-08-24 19:24:13 +00:00
|
|
|
if (ret < 0)
|
|
|
|
virPortAllocatorRelease(server.port);
|
2013-01-31 13:48:06 +00:00
|
|
|
return ret;
|
2014-12-16 09:40:58 +00:00
|
|
|
|
|
|
|
exit_monitor:
|
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
|
|
|
goto cleanup;
|
2013-01-31 13:48:06 +00:00
|
|
|
}
|
|
|
|
|
2015-04-16 09:24:23 +00:00
|
|
|
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr mig)
|
2015-04-16 09:24:23 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (!mig->nbd)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuMonitorNBDServerStop(priv->mon) < 0)
|
|
|
|
VIR_WARN("Unable to stop NBD server");
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(priv->nbdPort);
|
2015-04-16 09:24:23 +00:00
|
|
|
priv->nbdPort = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-26 13:19:57 +00:00
|
|
|
static void
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuMigrationNBDReportMirrorError(qemuBlockJobDataPtr job,
|
|
|
|
const char *diskdst)
|
2018-11-26 13:19:57 +00:00
|
|
|
{
|
2018-12-04 12:51:46 +00:00
|
|
|
if (job->errmsg) {
|
2018-11-26 13:19:57 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("migration of disk %s failed: %s"),
|
2018-11-27 12:55:28 +00:00
|
|
|
diskdst, job->errmsg);
|
2018-11-26 13:19:57 +00:00
|
|
|
} else {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
2018-11-27 12:55:28 +00:00
|
|
|
_("migration of disk %s failed"), diskdst);
|
2018-11-26 13:19:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-04-16 09:24:23 +00:00
|
|
|
/**
|
2018-02-22 15:11:59 +00:00
|
|
|
* qemuMigrationSrcNBDStorageCopyReady:
|
2015-04-16 09:24:23 +00:00
|
|
|
* @vm: domain
|
|
|
|
*
|
2018-02-22 15:11:59 +00:00
|
|
|
* Check the status of all drives copied via qemuMigrationSrcNBDStorageCopy.
|
|
|
|
* Any pending block job events for the mirrored disks will be processed.
|
2015-04-16 09:24:23 +00:00
|
|
|
*
|
|
|
|
* Returns 1 if all mirrors are "ready",
|
|
|
|
* 0 if some mirrors are still performing initial sync,
|
|
|
|
* -1 on error.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDStorageCopyReady(virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2015-04-16 09:24:23 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
2015-05-14 12:28:12 +00:00
|
|
|
size_t notReady = 0;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
2015-05-13 09:20:36 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuBlockJobDataPtr job;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2015-05-14 12:28:12 +00:00
|
|
|
if (!diskPriv->migrating)
|
2015-04-16 09:24:23 +00:00
|
|
|
continue;
|
|
|
|
|
2018-11-27 12:55:28 +00:00
|
|
|
if (!(job = qemuBlockJobDiskGetJob(disk))) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("missing block job data for disk '%s'"), disk->dst);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-03-26 12:14:12 +00:00
|
|
|
qemuBlockJobUpdate(vm, job, asyncJob);
|
|
|
|
if (job->state == VIR_DOMAIN_BLOCK_JOB_FAILED) {
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuMigrationNBDReportMirrorError(job, disk->dst);
|
|
|
|
virObjectUnref(job);
|
2015-04-16 09:24:23 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2015-05-14 12:28:12 +00:00
|
|
|
|
2021-01-11 09:40:43 +00:00
|
|
|
if (job->state != VIR_DOMAIN_BLOCK_JOB_READY)
|
2015-05-14 12:28:12 +00:00
|
|
|
notReady++;
|
2021-01-11 09:40:43 +00:00
|
|
|
|
|
|
|
virObjectUnref(job);
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
|
|
|
|
2015-05-14 12:28:12 +00:00
|
|
|
if (notReady) {
|
|
|
|
VIR_DEBUG("Waiting for %zu disk mirrors to get ready", notReady);
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("All disk mirrors are ready");
|
|
|
|
return 1;
|
|
|
|
}
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
/*
|
|
|
|
* If @check is true, the function will report an error and return a different
|
|
|
|
* code in case a block job fails. This way we can properly abort migration in
|
|
|
|
* case some block jobs failed once all memory has already been transferred.
|
2015-04-16 09:24:23 +00:00
|
|
|
*
|
2015-06-09 21:50:36 +00:00
|
|
|
* Returns 1 if all mirrors are gone,
|
|
|
|
* 0 if some mirrors are still active,
|
|
|
|
* -1 some mirrors failed but some are still active,
|
|
|
|
* -2 all mirrors are gone but some of them failed.
|
2015-04-16 09:24:23 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDCopyCancelled(virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
bool check)
|
2015-04-16 09:24:23 +00:00
|
|
|
{
|
2015-06-09 21:50:36 +00:00
|
|
|
size_t i;
|
|
|
|
size_t active = 0;
|
2017-04-07 11:06:25 +00:00
|
|
|
size_t completed = 0;
|
2015-06-09 21:50:36 +00:00
|
|
|
bool failed = false;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2017-04-07 11:06:25 +00:00
|
|
|
retry:
|
2015-06-09 21:50:36 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuBlockJobDataPtr job;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
if (!diskPriv->migrating)
|
|
|
|
continue;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2018-11-27 12:55:28 +00:00
|
|
|
if (!(job = qemuBlockJobDiskGetJob(disk)))
|
|
|
|
continue;
|
|
|
|
|
2020-03-26 12:14:12 +00:00
|
|
|
qemuBlockJobUpdate(vm, job, asyncJob);
|
|
|
|
switch (job->state) {
|
2015-06-09 21:50:36 +00:00
|
|
|
case VIR_DOMAIN_BLOCK_JOB_FAILED:
|
|
|
|
if (check) {
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuMigrationNBDReportMirrorError(job, disk->dst);
|
2015-06-09 21:50:36 +00:00
|
|
|
failed = true;
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
2019-10-15 11:38:21 +00:00
|
|
|
G_GNUC_FALLTHROUGH;
|
2015-06-09 21:50:36 +00:00
|
|
|
case VIR_DOMAIN_BLOCK_JOB_CANCELED:
|
|
|
|
case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
|
|
|
|
diskPriv->migrating = false;
|
|
|
|
break;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
default:
|
|
|
|
active++;
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
2017-04-07 11:06:25 +00:00
|
|
|
|
2020-03-26 12:14:12 +00:00
|
|
|
if (job->state == VIR_DOMAIN_BLOCK_JOB_COMPLETED)
|
2017-04-07 11:06:25 +00:00
|
|
|
completed++;
|
2018-11-27 12:55:28 +00:00
|
|
|
|
|
|
|
virObjectUnref(job);
|
2017-04-07 11:06:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Updating completed block job drops the lock thus we have to recheck
|
|
|
|
* block jobs for disks that reside before the disk(s) with completed
|
|
|
|
* block job.
|
|
|
|
*/
|
|
|
|
if (completed > 0) {
|
|
|
|
completed = 0;
|
|
|
|
active = 0;
|
|
|
|
goto retry;
|
2015-06-09 21:50:36 +00:00
|
|
|
}
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
if (failed) {
|
|
|
|
if (active) {
|
|
|
|
VIR_DEBUG("Some disk mirrors failed; still waiting for %zu "
|
|
|
|
"disk mirrors to finish", active);
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("All disk mirrors are gone; some of them failed");
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (active) {
|
|
|
|
VIR_DEBUG("Waiting for %zu disk mirrors to finish", active);
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("All disk mirrors are gone");
|
|
|
|
return 1;
|
2015-05-14 12:28:12 +00:00
|
|
|
}
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
2015-06-09 21:50:36 +00:00
|
|
|
}
|
2015-04-16 09:24:23 +00:00
|
|
|
|
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
/*
|
|
|
|
* Returns 0 on success,
|
|
|
|
* 1 when job is already completed or it failed and failNoJob is false,
|
|
|
|
* -1 on error or when job failed and failNoJob is true.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDCopyCancelOne(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuBlockJobDataPtr job,
|
2018-02-22 15:11:59 +00:00
|
|
|
bool failNoJob,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2015-06-09 21:50:36 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int rv;
|
|
|
|
|
2020-03-26 12:14:12 +00:00
|
|
|
qemuBlockJobUpdate(vm, job, asyncJob);
|
|
|
|
switch (job->state) {
|
2015-06-09 21:50:36 +00:00
|
|
|
case VIR_DOMAIN_BLOCK_JOB_FAILED:
|
|
|
|
case VIR_DOMAIN_BLOCK_JOB_CANCELED:
|
|
|
|
if (failNoJob) {
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuMigrationNBDReportMirrorError(job, disk->dst);
|
2019-12-06 16:26:01 +00:00
|
|
|
return -1;
|
2015-06-09 21:50:36 +00:00
|
|
|
}
|
2019-10-15 11:38:21 +00:00
|
|
|
G_GNUC_FALLTHROUGH;
|
2015-06-09 21:50:36 +00:00
|
|
|
case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
|
2019-12-06 16:26:01 +00:00
|
|
|
return 1;
|
2015-06-09 21:50:36 +00:00
|
|
|
}
|
|
|
|
|
2015-05-20 08:28:37 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2019-12-06 16:26:01 +00:00
|
|
|
return -1;
|
2015-06-09 21:50:36 +00:00
|
|
|
|
2019-12-06 16:22:53 +00:00
|
|
|
rv = qemuMonitorBlockJobCancel(priv->mon, job->name);
|
2015-06-09 21:50:36 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
|
2019-12-06 16:26:01 +00:00
|
|
|
return -1;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2019-12-06 16:26:01 +00:00
|
|
|
return 0;
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2018-02-22 15:11:59 +00:00
|
|
|
* qemuMigrationSrcNBDCopyCancel:
|
2015-04-16 09:24:23 +00:00
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain
|
2015-06-09 21:50:36 +00:00
|
|
|
* @check: if true report an error when some of the mirrors fails
|
2015-04-16 09:24:23 +00:00
|
|
|
*
|
2018-02-22 15:11:59 +00:00
|
|
|
* Cancel all drive-mirrors started by qemuMigrationSrcNBDStorageCopy.
|
2018-10-18 11:13:21 +00:00
|
|
|
* Any pending block job events for the affected disks will be processed and
|
|
|
|
* synchronous block job terminated regardless of return value unless qemu
|
|
|
|
* has crashed.
|
2015-04-16 09:24:23 +00:00
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDCopyCancel(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
bool check,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
virConnectPtr dconn)
|
2015-04-16 09:24:23 +00:00
|
|
|
{
|
2015-05-11 18:15:43 +00:00
|
|
|
virErrorPtr err = NULL;
|
2015-06-09 21:50:36 +00:00
|
|
|
int ret = -1;
|
2015-04-16 09:24:23 +00:00
|
|
|
size_t i;
|
2015-06-09 21:50:36 +00:00
|
|
|
int rv;
|
|
|
|
bool failed = false;
|
|
|
|
|
|
|
|
VIR_DEBUG("Cancelling drive mirrors for domain %s", vm->def->name);
|
2015-04-16 09:24:23 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
2015-05-13 09:20:36 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2019-01-17 15:34:11 +00:00
|
|
|
qemuBlockJobDataPtr job;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2019-01-17 15:34:11 +00:00
|
|
|
if (!(job = qemuBlockJobDiskGetJob(disk)) ||
|
|
|
|
!qemuBlockJobIsRunning(job))
|
2018-10-18 11:13:21 +00:00
|
|
|
diskPriv->migrating = false;
|
|
|
|
|
2019-01-17 15:34:11 +00:00
|
|
|
if (!diskPriv->migrating) {
|
|
|
|
virObjectUnref(job);
|
2015-04-16 09:24:23 +00:00
|
|
|
continue;
|
2019-01-17 15:34:11 +00:00
|
|
|
}
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2018-11-27 12:55:28 +00:00
|
|
|
rv = qemuMigrationSrcNBDCopyCancelOne(driver, vm, disk, job,
|
2018-02-22 15:11:59 +00:00
|
|
|
check, asyncJob);
|
2015-06-09 21:50:36 +00:00
|
|
|
if (rv != 0) {
|
|
|
|
if (rv < 0) {
|
|
|
|
if (!err)
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&err);
|
2015-06-09 21:50:36 +00:00
|
|
|
failed = true;
|
|
|
|
}
|
2018-11-27 17:06:28 +00:00
|
|
|
qemuBlockJobSyncEnd(vm, job, asyncJob);
|
2015-06-09 21:50:36 +00:00
|
|
|
diskPriv->migrating = false;
|
|
|
|
}
|
2019-01-17 15:34:11 +00:00
|
|
|
|
|
|
|
virObjectUnref(job);
|
2015-06-09 21:50:36 +00:00
|
|
|
}
|
|
|
|
|
2018-02-22 15:11:59 +00:00
|
|
|
while ((rv = qemuMigrationSrcNBDCopyCancelled(vm, asyncJob, check)) != 1) {
|
2015-06-01 12:41:50 +00:00
|
|
|
if (check && !failed &&
|
|
|
|
dconn && virConnectIsAlive(dconn) <= 0) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Lost connection to destination host"));
|
|
|
|
failed = true;
|
|
|
|
}
|
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
if (rv < 0) {
|
|
|
|
failed = true;
|
|
|
|
if (rv == -2)
|
|
|
|
break;
|
2015-05-11 18:15:43 +00:00
|
|
|
}
|
2015-05-11 18:10:06 +00:00
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
if (failed && !err)
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&err);
|
2015-06-09 21:50:36 +00:00
|
|
|
|
|
|
|
if (virDomainObjWait(vm) < 0)
|
|
|
|
goto cleanup;
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
|
|
|
|
2018-02-28 14:20:17 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
|
|
|
|
if (!diskPriv->migrSource)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
qemuBlockStorageSourceDetachOneBlockdev(driver, vm, asyncJob,
|
|
|
|
diskPriv->migrSource);
|
2019-02-15 12:03:58 +00:00
|
|
|
virObjectUnref(diskPriv->migrSource);
|
2018-02-28 14:20:17 +00:00
|
|
|
diskPriv->migrSource = NULL;
|
|
|
|
}
|
|
|
|
|
2015-06-09 21:50:36 +00:00
|
|
|
ret = failed ? -1 : 0;
|
|
|
|
|
|
|
|
cleanup:
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&err);
|
2015-05-11 18:15:43 +00:00
|
|
|
return ret;
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-06 16:47:46 +00:00
|
|
|
static virStorageSourcePtr
|
|
|
|
qemuMigrationSrcNBDStorageCopyBlockdevPrepareSource(virDomainDiskDefPtr disk,
|
|
|
|
const char *host,
|
|
|
|
int port,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *socket,
|
2019-12-06 16:47:46 +00:00
|
|
|
const char *tlsAlias)
|
2018-02-28 14:20:17 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virStorageSource) copysrc = NULL;
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2020-09-22 09:04:17 +00:00
|
|
|
copysrc = virStorageSourceNew();
|
2018-02-28 14:20:17 +00:00
|
|
|
copysrc->type = VIR_STORAGE_TYPE_NETWORK;
|
|
|
|
copysrc->protocol = VIR_STORAGE_NET_PROTOCOL_NBD;
|
|
|
|
copysrc->format = VIR_STORAGE_FILE_RAW;
|
|
|
|
|
2020-09-22 09:04:17 +00:00
|
|
|
copysrc->backingStore = virStorageSourceNew();
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-12-06 16:47:46 +00:00
|
|
|
if (!(copysrc->path = qemuAliasDiskDriveFromDisk(disk)))
|
|
|
|
return NULL;
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-12-06 16:47:46 +00:00
|
|
|
copysrc->hosts = g_new0(virStorageNetHostDef, 1);
|
2018-02-28 14:20:17 +00:00
|
|
|
|
|
|
|
copysrc->nhosts = 1;
|
2020-08-24 13:42:31 +00:00
|
|
|
if (socket) {
|
|
|
|
copysrc->hosts->transport = VIR_STORAGE_NET_HOST_TRANS_UNIX;
|
|
|
|
copysrc->hosts->socket = g_strdup(socket);
|
|
|
|
} else {
|
|
|
|
copysrc->hosts->transport = VIR_STORAGE_NET_HOST_TRANS_TCP;
|
|
|
|
copysrc->hosts->port = port;
|
|
|
|
copysrc->hosts->name = g_strdup(host);
|
|
|
|
}
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
copysrc->tlsAlias = g_strdup(tlsAlias);
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
copysrc->nodestorage = g_strdup_printf("migration-%s-storage", disk->dst);
|
|
|
|
copysrc->nodeformat = g_strdup_printf("migration-%s-format", disk->dst);
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-12-06 16:47:46 +00:00
|
|
|
return g_steal_pointer(©src);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationSrcNBDStorageCopyBlockdev(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
2019-12-06 17:50:46 +00:00
|
|
|
const char *jobname,
|
|
|
|
const char *sourcename,
|
|
|
|
bool persistjob,
|
2019-12-06 16:47:46 +00:00
|
|
|
const char *host,
|
|
|
|
int port,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *socket,
|
2019-12-06 16:47:46 +00:00
|
|
|
unsigned long long mirror_speed,
|
|
|
|
unsigned int mirror_shallow,
|
|
|
|
const char *tlsAlias)
|
|
|
|
{
|
|
|
|
g_autoptr(qemuBlockStorageSourceAttachData) data = NULL;
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
int mon_ret = 0;
|
|
|
|
g_autoptr(virStorageSource) copysrc = NULL;
|
|
|
|
|
2019-12-06 17:44:22 +00:00
|
|
|
VIR_DEBUG("starting blockdev mirror for disk=%s to host=%s", disk->dst, host);
|
2019-12-06 16:47:46 +00:00
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (!(copysrc = qemuMigrationSrcNBDStorageCopyBlockdevPrepareSource(disk, host, port, socket, tlsAlias)))
|
2019-12-06 16:47:46 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-09-04 17:29:10 +00:00
|
|
|
/* Migration via blockdev-mirror was supported sooner than the auto-read-only
|
|
|
|
* feature was added to qemu */
|
2019-09-03 11:58:34 +00:00
|
|
|
if (!(data = qemuBlockStorageSourceAttachPrepareBlockdev(copysrc,
|
|
|
|
copysrc->backingStore,
|
|
|
|
false)))
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2018-02-28 14:20:17 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2018-02-28 14:20:17 +00:00
|
|
|
|
|
|
|
mon_ret = qemuBlockStorageSourceAttachApply(qemuDomainGetMonitor(vm), data);
|
|
|
|
|
|
|
|
if (mon_ret == 0)
|
2019-12-06 17:50:46 +00:00
|
|
|
mon_ret = qemuMonitorBlockdevMirror(qemuDomainGetMonitor(vm), jobname, persistjob,
|
|
|
|
sourcename, copysrc->nodeformat,
|
2019-05-17 16:13:53 +00:00
|
|
|
mirror_speed, 0, 0, mirror_shallow);
|
2018-02-28 14:20:17 +00:00
|
|
|
|
|
|
|
if (mon_ret != 0)
|
|
|
|
qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), data);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
diskPriv->migrSource = g_steal_pointer(©src);
|
2018-02-28 14:20:17 +00:00
|
|
|
|
2019-09-04 10:23:16 +00:00
|
|
|
return 0;
|
2018-02-28 14:20:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-23 09:29:08 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSrcNBDStorageCopyDriveMirror(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *diskAlias,
|
|
|
|
const char *host,
|
|
|
|
int port,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *socket,
|
2018-02-23 09:29:08 +00:00
|
|
|
unsigned long long mirror_speed,
|
2019-05-20 12:48:22 +00:00
|
|
|
bool mirror_shallow)
|
2018-02-23 09:29:08 +00:00
|
|
|
{
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *nbd_dest = NULL;
|
2018-02-23 09:29:08 +00:00
|
|
|
int mon_ret;
|
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (socket) {
|
|
|
|
nbd_dest = g_strdup_printf("nbd+unix:///%s?socket=%s",
|
|
|
|
diskAlias, socket);
|
|
|
|
} else if (strchr(host, ':')) {
|
2019-10-22 13:26:14 +00:00
|
|
|
nbd_dest = g_strdup_printf("nbd:[%s]:%d:exportname=%s", host, port,
|
|
|
|
diskAlias);
|
2018-02-23 09:29:08 +00:00
|
|
|
} else {
|
2019-10-22 13:26:14 +00:00
|
|
|
nbd_dest = g_strdup_printf("nbd:%s:%d:exportname=%s", host, port,
|
|
|
|
diskAlias);
|
2018-02-23 09:29:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2018-02-23 09:29:08 +00:00
|
|
|
|
|
|
|
mon_ret = qemuMonitorDriveMirror(qemuDomainGetMonitor(vm),
|
|
|
|
diskAlias, nbd_dest, "raw",
|
2019-05-17 16:13:53 +00:00
|
|
|
mirror_speed, 0, 0, mirror_shallow, true);
|
2018-02-23 09:29:08 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2018-02-23 09:29:08 +00:00
|
|
|
|
2019-09-04 10:23:16 +00:00
|
|
|
return 0;
|
2018-02-23 09:29:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-22 15:16:24 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSrcNBDStorageCopyOne(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
const char *host,
|
|
|
|
int port,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *socket,
|
2018-11-22 15:16:24 +00:00
|
|
|
unsigned long long mirror_speed,
|
2019-05-20 12:48:22 +00:00
|
|
|
bool mirror_shallow,
|
2018-11-22 15:16:24 +00:00
|
|
|
const char *tlsAlias,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2019-09-04 11:20:41 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2018-11-22 15:16:24 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2018-11-19 15:48:09 +00:00
|
|
|
qemuBlockJobDataPtr job = NULL;
|
2018-11-22 15:16:24 +00:00
|
|
|
char *diskAlias = NULL;
|
2019-12-06 17:50:46 +00:00
|
|
|
const char *jobname = NULL;
|
|
|
|
const char *sourcename = NULL;
|
|
|
|
bool persistjob = false;
|
2018-11-22 15:16:24 +00:00
|
|
|
int rc;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!(diskAlias = qemuAliasDiskDriveFromDisk(disk)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-11-29 16:35:52 +00:00
|
|
|
if (!(job = qemuBlockJobDiskNew(vm, disk, QEMU_BLOCKJOB_TYPE_COPY, diskAlias)))
|
2018-11-19 15:48:09 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2019-12-06 17:50:46 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
|
|
|
jobname = diskAlias;
|
2020-01-20 12:19:43 +00:00
|
|
|
sourcename = qemuDomainDiskGetTopNodename(disk);
|
2019-12-06 17:50:46 +00:00
|
|
|
persistjob = true;
|
|
|
|
} else {
|
|
|
|
jobname = NULL;
|
|
|
|
sourcename = diskAlias;
|
|
|
|
persistjob = false;
|
|
|
|
}
|
|
|
|
|
2018-10-19 07:14:54 +00:00
|
|
|
qemuBlockJobSyncBegin(job);
|
2018-11-22 15:16:24 +00:00
|
|
|
|
2019-09-04 11:20:41 +00:00
|
|
|
if (flags & VIR_MIGRATE_TLS ||
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
2018-11-22 15:16:24 +00:00
|
|
|
rc = qemuMigrationSrcNBDStorageCopyBlockdev(driver, vm,
|
2019-12-06 17:50:46 +00:00
|
|
|
disk, jobname,
|
|
|
|
sourcename, persistjob,
|
2020-08-24 13:42:31 +00:00
|
|
|
host, port, socket,
|
2018-11-22 15:16:24 +00:00
|
|
|
mirror_speed,
|
2019-05-20 12:48:22 +00:00
|
|
|
mirror_shallow,
|
2018-11-22 15:16:24 +00:00
|
|
|
tlsAlias);
|
|
|
|
} else {
|
|
|
|
rc = qemuMigrationSrcNBDStorageCopyDriveMirror(driver, vm, diskAlias,
|
2020-08-24 13:42:31 +00:00
|
|
|
host, port, socket,
|
2018-11-22 15:16:24 +00:00
|
|
|
mirror_speed,
|
2019-05-20 12:48:22 +00:00
|
|
|
mirror_shallow);
|
2018-11-22 15:16:24 +00:00
|
|
|
}
|
|
|
|
|
2018-11-22 17:34:42 +00:00
|
|
|
if (rc < 0)
|
2018-11-22 15:16:24 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
diskPriv->migrating = true;
|
2019-05-15 08:58:42 +00:00
|
|
|
qemuBlockJobStarted(job, vm);
|
2018-11-22 15:16:24 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobStartupFinalize(vm, job);
|
2018-11-22 15:16:24 +00:00
|
|
|
VIR_FREE(diskAlias);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-11-23 14:42:51 +00:00
|
|
|
/**
|
2018-02-22 15:11:59 +00:00
|
|
|
* qemuMigrationSrcNBDStorageCopy:
|
2012-11-23 14:42:51 +00:00
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain
|
|
|
|
* @mig: migration cookie
|
|
|
|
* @host: where are we migrating to
|
2016-04-01 02:16:53 +00:00
|
|
|
* @speed: bandwidth limit in MiB/s
|
2012-11-23 14:42:51 +00:00
|
|
|
* @migrate_flags: migrate monitor command flags
|
|
|
|
*
|
2018-02-22 15:11:59 +00:00
|
|
|
* Migrate non-shared storage using the NBD protocol to the server running
|
|
|
|
* inside the qemu process on dst and wait until the copy converges.
|
|
|
|
* On success update @migrate_flags so we don't tell 'migrate' command
|
2015-04-16 09:24:23 +00:00
|
|
|
* to do the very same operation. On failure, the caller is
|
2018-02-22 15:11:59 +00:00
|
|
|
* expected to call qemuMigrationSrcNBDCopyCancel to stop all
|
|
|
|
* running copy operations.
|
2012-11-23 14:42:51 +00:00
|
|
|
*
|
|
|
|
* Returns 0 on success (@migrate_flags updated),
|
|
|
|
* -1 otherwise.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDStorageCopy(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr mig,
|
|
|
|
const char *host,
|
|
|
|
unsigned long speed,
|
|
|
|
unsigned int *migrate_flags,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
2018-02-28 14:20:17 +00:00
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *tlsAlias,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-02-28 14:20:17 +00:00
|
|
|
unsigned int flags)
|
2012-11-23 14:42:51 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int port;
|
2015-04-16 09:24:23 +00:00
|
|
|
size_t i;
|
2016-04-01 02:16:53 +00:00
|
|
|
unsigned long long mirror_speed = speed;
|
2019-05-20 12:48:22 +00:00
|
|
|
bool mirror_shallow = *migrate_flags & QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
|
2015-05-14 12:28:12 +00:00
|
|
|
int rv;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2020-08-24 13:42:31 +00:00
|
|
|
g_autoptr(virURI) uri = NULL;
|
|
|
|
const char *socket = NULL;
|
2015-05-14 12:28:12 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Starting drive mirrors for domain %s", vm->def->name);
|
2012-11-23 14:42:51 +00:00
|
|
|
|
2016-04-01 02:16:53 +00:00
|
|
|
if (mirror_speed > LLONG_MAX >> 20) {
|
|
|
|
virReportError(VIR_ERR_OVERFLOW,
|
|
|
|
_("bandwidth must be less than %llu"),
|
|
|
|
LLONG_MAX >> 20);
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2016-04-01 02:16:53 +00:00
|
|
|
}
|
|
|
|
mirror_speed <<= 20;
|
|
|
|
|
2012-11-23 14:42:51 +00:00
|
|
|
/* steal NBD port and thus prevent its propagation back to destination */
|
|
|
|
port = mig->nbd->port;
|
|
|
|
mig->nbd->port = 0;
|
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (nbdURI) {
|
|
|
|
uri = virURIParse(nbdURI);
|
|
|
|
if (!uri)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (STREQ(uri->scheme, "tcp")) {
|
|
|
|
if (uri->server && STRNEQ(uri->server, ""))
|
|
|
|
host = (char *)uri->server;
|
|
|
|
if (uri->port)
|
|
|
|
port = uri->port;
|
|
|
|
} else if (STREQ(uri->scheme, "unix")) {
|
2020-11-18 12:51:05 +00:00
|
|
|
if (flags & VIR_MIGRATE_TLS) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("NBD migration with TLS is not supported over UNIX socket"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-24 13:42:31 +00:00
|
|
|
if (!uri->path) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, "%s",
|
|
|
|
_("UNIX disks URI does not include path"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
socket = uri->path;
|
|
|
|
|
|
|
|
if (qemuSecurityDomainSetPathLabel(driver, vm, socket, false) < 0)
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("Unsupported scheme in disks URI: %s"),
|
|
|
|
uri->scheme);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-23 14:42:51 +00:00
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
/* check whether disk should be migrated */
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
|
2012-11-23 14:42:51 +00:00
|
|
|
continue;
|
|
|
|
|
2018-11-22 15:16:24 +00:00
|
|
|
if (qemuMigrationSrcNBDStorageCopyOne(driver, vm, disk, host, port,
|
2020-08-24 13:42:31 +00:00
|
|
|
socket,
|
2019-05-20 12:48:22 +00:00
|
|
|
mirror_speed, mirror_shallow,
|
2018-11-22 15:16:24 +00:00
|
|
|
tlsAlias, flags) < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) {
|
2015-05-19 15:28:25 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2015-05-19 15:28:25 +00:00
|
|
|
}
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
2012-11-23 14:42:51 +00:00
|
|
|
|
2018-02-22 15:11:59 +00:00
|
|
|
while ((rv = qemuMigrationSrcNBDStorageCopyReady(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
|
2015-05-14 12:28:12 +00:00
|
|
|
if (rv < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2012-11-23 14:42:51 +00:00
|
|
|
|
2015-05-15 13:59:49 +00:00
|
|
|
if (priv->job.abortJob) {
|
2017-09-01 06:49:20 +00:00
|
|
|
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
|
2015-05-14 12:28:12 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
_("canceled by client"));
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2012-11-23 14:42:51 +00:00
|
|
|
}
|
2015-05-14 12:28:12 +00:00
|
|
|
|
2015-06-01 12:41:50 +00:00
|
|
|
if (dconn && virConnectIsAlive(dconn) <= 0) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Lost connection to destination host"));
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2015-06-01 12:41:50 +00:00
|
|
|
}
|
|
|
|
|
2015-05-15 13:59:49 +00:00
|
|
|
if (virDomainObjWait(vm) < 0)
|
2019-09-04 10:23:16 +00:00
|
|
|
return -1;
|
2012-11-23 14:42:51 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
priv->job.current);
|
2017-09-01 06:49:30 +00:00
|
|
|
|
2015-04-16 09:24:23 +00:00
|
|
|
/* Okay, all disks are ready. Modify migrate_flags */
|
2012-11-23 14:42:51 +00:00
|
|
|
*migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
QEMU_MONITOR_MIGRATE_NON_SHARED_INC);
|
|
|
|
|
2019-09-04 10:23:16 +00:00
|
|
|
return 0;
|
2012-11-23 14:42:51 +00:00
|
|
|
}
|
2013-01-31 13:48:06 +00:00
|
|
|
|
2012-11-27 15:34:24 +00:00
|
|
|
|
2015-10-06 13:39:48 +00:00
|
|
|
/**
|
2018-02-12 17:11:41 +00:00
|
|
|
* qemuMigrationSrcIsAllowedHostdev:
|
2015-10-06 13:39:48 +00:00
|
|
|
* @def: domain definition
|
|
|
|
*
|
2018-12-04 17:08:14 +00:00
|
|
|
* Checks that @def does not contain any host devices unsupported across
|
2015-10-06 13:39:48 +00:00
|
|
|
* migrations. Returns true if the vm is allowed to migrate.
|
|
|
|
*/
|
|
|
|
static bool
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
|
2015-10-06 13:39:48 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
/* Migration with USB host devices is allowed, all other devices are
|
|
|
|
* forbidden. */
|
|
|
|
for (i = 0; i < def->nhostdevs; i++) {
|
|
|
|
virDomainHostdevDefPtr hostdev = def->hostdevs[i];
|
2020-01-10 00:39:47 +00:00
|
|
|
switch ((virDomainHostdevMode)hostdev->mode) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_MODE_CAPABILITIES:
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("cannot migrate a domain with <hostdev mode='capabilities'>"));
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_MODE_SUBSYS:
|
|
|
|
switch ((virDomainHostdevSubsysType)hostdev->source.subsys.type) {
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_USB:
|
|
|
|
/* USB devices can be "migrated" */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI:
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_SCSI_HOST:
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_MDEV:
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("cannot migrate a domain with <hostdev mode='subsystem' type='%s'>"),
|
|
|
|
virDomainHostdevSubsysTypeToString(hostdev->source.subsys.type));
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_PCI:
|
|
|
|
/*
|
|
|
|
* if this is a network interface with <teaming
|
|
|
|
* type='transient'>, migration *is* allowed because
|
|
|
|
* the device will be auto-unplugged by QEMU during
|
|
|
|
* migration.
|
|
|
|
*/
|
|
|
|
if (hostdev->parentnet &&
|
|
|
|
hostdev->parentnet->teaming.type == VIR_DOMAIN_NET_TEAMING_TYPE_TRANSIENT) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all other PCI hostdevs can't be migrated */
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
|
|
|
_("cannot migrate a domain with <hostdev mode='subsystem' type='%s'>"),
|
|
|
|
virDomainHostdevSubsysTypeToString(hostdev->source.subsys.type));
|
|
|
|
return false;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_SUBSYS_TYPE_LAST:
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("invalid hostdev subsystem type"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_HOSTDEV_MODE_LAST:
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("invalid hostdev mode"));
|
2015-10-06 13:39:48 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-06 14:24:48 +00:00
|
|
|
/**
|
2018-02-12 17:11:41 +00:00
|
|
|
* qemuMigrationSrcIsAllowed:
|
2015-10-06 14:24:48 +00:00
|
|
|
* @driver: qemu driver struct
|
|
|
|
* @vm: domain object
|
|
|
|
* @remote: migration is remote
|
|
|
|
* @flags: migration flags (see struct virDomainMigrateFlags)
|
|
|
|
*
|
|
|
|
* Validates that the configuration of @vm can be migrated in various
|
|
|
|
* situations. If @remote is true, the migration happens to remote host. @flags
|
|
|
|
* is used to check various special migration types according to the request.
|
|
|
|
*
|
|
|
|
* Returns true if migration is supported. Reports libvirt error and returns
|
|
|
|
* false otherwise.
|
|
|
|
*/
|
2012-12-07 10:59:24 +00:00
|
|
|
bool
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
bool remote,
|
|
|
|
unsigned int flags)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2020-02-25 09:55:11 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
int nsnapshots;
|
2013-06-10 14:30:48 +00:00
|
|
|
int pauseReason;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
|
2015-10-06 13:53:02 +00:00
|
|
|
/* perform these checks only when migrating to remote hosts */
|
|
|
|
if (remote) {
|
|
|
|
nsnapshots = virDomainSnapshotObjListNum(vm->snapshots, NULL, 0);
|
|
|
|
if (nsnapshots < 0)
|
|
|
|
return false;
|
2013-06-10 14:30:48 +00:00
|
|
|
|
2015-10-06 13:53:02 +00:00
|
|
|
if (nsnapshots > 0) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("cannot migrate domain with %d snapshots"),
|
|
|
|
nsnapshots);
|
|
|
|
return false;
|
snapshot: prevent migration from stranding snapshot data
Migration is another case of stranding metadata. And since
snapshot metadata is arbitrarily large, there's no way to
shoehorn it into the migration cookie of migration v3.
This patch consolidates two existing locations for migration
validation into one helper function, then enhances that function
to also do the new checks. If we could always trust the source
to validate migration, then the destination would not have to
do anything; but since older servers that did not do checking
can migrate to newer destinations, we have to repeat some of
the same checks on the destination; meanwhile, we want to
detect failures as soon as possible. With migration v2, this
means that validation will reject things at Prepare on the
destination if the XML exposes the problem, otherwise at Perform
on the source; with migration v3, this means that validation
will reject things at Begin on the source, or if the source
is old and the XML exposes the problem, then at Prepare on the
destination.
This patch is necessarily over-strict. Once a later patch
properly handles auto-cleanup of snapshot metadata on the
death of a transient domain, then the only time we actually
need snapshots to prevent migration is when using the
--undefinesource flag on a persistent source domain.
It is possible to recreate snapshot metadata on the destination
with VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE and
VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT. But for now, that is limited,
since if we delete the snapshot metadata prior to migration,
then we won't know the name of the current snapshot to pass
along; and if we delete the snapshot metadata after migration
and use the v3 migration cookie to pass along the name of the
current snapshot, then we need a way to bypass the fact that
this patch refuses migration with snapshot metadata present.
So eventually, we may have to introduce migration protocol v4
that allows feature negotiation and an arbitrary number of
handshake exchanges, so as to pass as many rpc calls as needed
to transfer all the snapshot xml hierarchy.
But all of that is thoughts for the future; for now, the best
course of action is to quit early, rather than get into a
funky state of stale metadata; then relax restrictions later.
* src/qemu/qemu_migration.h (qemuMigrationIsAllowed): Make static.
* src/qemu/qemu_migration.c (qemuMigrationIsAllowed): Alter
signature, and allow checks for both outgoing and incoming.
(qemuMigrationBegin, qemuMigrationPrepareAny)
(qemuMigrationPerformJob): Update callers.
2011-08-12 19:23:09 +00:00
|
|
|
}
|
2019-08-08 14:55:09 +00:00
|
|
|
}
|
|
|
|
|
2015-10-06 15:02:09 +00:00
|
|
|
/* following checks don't make sense for offline migration */
|
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
2020-02-20 10:54:44 +00:00
|
|
|
if (remote) {
|
|
|
|
/* cancel migration if disk I/O error is emitted while migrating */
|
|
|
|
if (flags & VIR_MIGRATE_ABORT_ON_ERROR &&
|
|
|
|
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
|
|
|
|
pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot migrate domain with I/O error"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuProcessAutoDestroyActive(driver, vm)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is marked for auto destroy"));
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-06 15:02:09 +00:00
|
|
|
}
|
2015-10-06 13:53:02 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-10-06 15:02:09 +00:00
|
|
|
if (qemuDomainHasBlockjob(vm, false)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
2016-06-09 11:19:41 +00:00
|
|
|
_("domain has active block job"));
|
2015-10-06 15:02:09 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationSrcIsAllowedHostdev(vm->def))
|
2015-10-06 15:02:09 +00:00
|
|
|
return false;
|
|
|
|
|
2017-01-28 10:32:13 +00:00
|
|
|
if (vm->def->cpu) {
|
2017-03-24 08:27:26 +00:00
|
|
|
/* QEMU blocks migration and save with invariant TSC enabled
|
|
|
|
* unless TSC frequency is explicitly set.
|
|
|
|
*/
|
2017-03-24 08:27:16 +00:00
|
|
|
if (virCPUCheckFeature(vm->def->os.arch, vm->def->cpu,
|
|
|
|
"invtsc") == 1) {
|
2017-03-24 08:27:26 +00:00
|
|
|
bool block = true;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->clock.ntimers; i++) {
|
|
|
|
virDomainTimerDefPtr timer = vm->def->clock.timers[i];
|
|
|
|
|
|
|
|
if (timer->name == VIR_DOMAIN_TIMER_NAME_TSC &&
|
|
|
|
timer->frequency > 0) {
|
|
|
|
block = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (block) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("domain has 'invtsc' CPU feature but "
|
|
|
|
"TSC frequency is not specified"));
|
|
|
|
return false;
|
|
|
|
}
|
2014-09-29 13:58:38 +00:00
|
|
|
}
|
2014-05-06 11:55:44 +00:00
|
|
|
}
|
|
|
|
|
2015-10-06 15:02:09 +00:00
|
|
|
/* Verify that memory device config can be transferred reliably */
|
|
|
|
for (i = 0; i < vm->def->nmems; i++) {
|
|
|
|
virDomainMemoryDefPtr mem = vm->def->mems[i];
|
2015-01-19 13:28:14 +00:00
|
|
|
|
2015-10-06 15:02:09 +00:00
|
|
|
if (mem->model == VIR_DOMAIN_MEMORY_MODEL_DIMM &&
|
|
|
|
mem->info.type != VIR_DOMAIN_DEVICE_ADDRESS_TYPE_DIMM) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("domain's dimm info lacks slot ID "
|
|
|
|
"or base address"));
|
2015-01-19 13:28:14 +00:00
|
|
|
|
2015-10-06 15:02:09 +00:00
|
|
|
return false;
|
|
|
|
}
|
2015-01-19 13:28:14 +00:00
|
|
|
}
|
2016-09-20 09:24:49 +00:00
|
|
|
|
2020-07-24 03:34:11 +00:00
|
|
|
for (i = 0; i < vm->def->nshmems; i++) {
|
|
|
|
virDomainShmemDefPtr shmem = vm->def->shmems[i];
|
|
|
|
|
|
|
|
if (shmem->model == VIR_DOMAIN_SHMEM_MODEL_IVSHMEM) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("migration with legacy shmem device is not supported"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (shmem->role != VIR_DOMAIN_SHMEM_ROLE_MASTER) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
_("shmem device '%s' cannot be migrated, "
|
|
|
|
"only shmem with role='%s' can be migrated"),
|
|
|
|
shmem->name,
|
|
|
|
virDomainShmemRoleTypeToString(VIR_DOMAIN_SHMEM_ROLE_MASTER));
|
|
|
|
return false;
|
|
|
|
}
|
2016-09-20 09:24:49 +00:00
|
|
|
}
|
2020-02-20 10:54:44 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nnets; i++) {
|
|
|
|
virDomainNetDefPtr net = vm->def->nets[i];
|
2020-10-14 17:08:27 +00:00
|
|
|
qemuSlirpPtr slirp;
|
|
|
|
|
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_VDPA) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("vDPA devices cannot be migrated"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
slirp = QEMU_DOMAIN_NETWORK_PRIVATE(net)->slirp;
|
2020-02-20 10:54:44 +00:00
|
|
|
|
|
|
|
if (slirp && !qemuSlirpHasFeature(slirp, QEMU_SLIRP_FEATURE_MIGRATE)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("a slirp-helper cannot be migrated"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2020-01-30 16:28:27 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->nfss; i++) {
|
|
|
|
virDomainFSDefPtr fs = vm->def->fss[i];
|
|
|
|
|
|
|
|
if (fs->fsdriver == VIR_DOMAIN_FS_DRIVER_TYPE_VIRTIOFS) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("migration with virtiofs device is not supported"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2020-02-25 09:55:11 +00:00
|
|
|
|
2021-02-05 09:48:51 +00:00
|
|
|
if (priv->dbusVMStateIds &&
|
2020-02-25 09:55:11 +00:00
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_DBUS_VMSTATE)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot migrate this domain without dbus-vmstate support"));
|
|
|
|
return false;
|
|
|
|
}
|
2020-09-17 13:30:42 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
|
|
|
|
if (disk->transient) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("migration with transient disk is not supported"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2015-01-19 13:28:14 +00:00
|
|
|
}
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-02-21 12:20:06 +00:00
|
|
|
static bool
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcIsSafe(virDomainDefPtr def,
|
2019-08-13 13:17:53 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2018-02-12 17:11:41 +00:00
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
unsigned int flags)
|
2017-04-07 15:27:17 +00:00
|
|
|
|
2012-02-21 12:20:06 +00:00
|
|
|
{
|
2017-04-07 15:27:17 +00:00
|
|
|
bool storagemigration = flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
VIR_MIGRATE_NON_SHARED_INC);
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2017-04-07 15:27:17 +00:00
|
|
|
int rc;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2013-05-21 07:21:20 +00:00
|
|
|
for (i = 0; i < def->ndisks; i++) {
|
2012-02-21 12:20:06 +00:00
|
|
|
virDomainDiskDefPtr disk = def->disks[i];
|
conf: use disk source accessors in qemu/
Part of a series of cleanups to use new accessor methods.
* src/qemu/qemu_conf.c (qemuCheckSharedDevice)
(qemuAddSharedDevice, qemuRemoveSharedDevice, qemuSetUnprivSGIO):
Use accessors.
* src/qemu/qemu_domain.c (qemuDomainDeviceDefPostParse)
(qemuDomainObjCheckDiskTaint, qemuDomainSnapshotForEachQcow2Raw)
(qemuDomainCheckRemoveOptionalDisk, qemuDomainCheckDiskPresence)
(qemuDiskChainCheckBroken, qemuDomainDetermineDiskChain):
Likewise.
* src/qemu/qemu_hotplug.c (qemuDomainChangeEjectableMedia)
(qemuDomainCheckEjectableMedia)
(qemuDomainAttachVirtioDiskDevice, qemuDomainAttachSCSIDisk)
(qemuDomainAttachUSBMassstorageDevice)
(qemuDomainAttachDeviceDiskLive, qemuDomainRemoveDiskDevice)
(qemuDomainDetachVirtioDiskDevice, qemuDomainDetachDiskDevice):
Likewise.
* src/qemu/qemu_migration.c (qemuMigrationStartNBDServer)
(qemuMigrationDriveMirror, qemuMigrationCancelDriveMirror)
(qemuMigrationIsSafe): Likewise.
* src/qemu/qemu_process.c (qemuProcessGetVolumeQcowPassphrase)
(qemuProcessHandleIOError, qemuProcessHandleBlockJob)
(qemuProcessInitPasswords): Likewise.
* src/qemu/qemu_driver.c (qemuDomainChangeDiskMediaLive)
(qemuDomainGetBlockInfo, qemuDiskPathToAlias): Likewise.
Signed-off-by: Eric Blake <eblake@redhat.com>
2014-03-18 19:16:47 +00:00
|
|
|
const char *src = virDomainDiskGetSource(disk);
|
2019-08-01 08:12:49 +00:00
|
|
|
int actualType = virStorageSourceGetActualType(disk->src);
|
|
|
|
bool unsafe = false;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2018-02-26 08:35:25 +00:00
|
|
|
/* Disks without any source (i.e. floppies and CD-ROMs)
|
|
|
|
* OR readonly are safe. */
|
2017-04-07 15:27:17 +00:00
|
|
|
if (virStorageSourceIsEmpty(disk->src) ||
|
2018-02-26 08:35:25 +00:00
|
|
|
disk->src->readonly)
|
2017-04-07 15:27:17 +00:00
|
|
|
continue;
|
2012-06-06 18:36:31 +00:00
|
|
|
|
2018-02-26 08:35:25 +00:00
|
|
|
/* Disks which are migrated by qemu are safe too. */
|
2017-04-07 15:27:17 +00:00
|
|
|
if (storagemigration &&
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
|
2017-04-07 15:27:17 +00:00
|
|
|
continue;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2018-02-26 08:35:25 +00:00
|
|
|
/* However, disks on local FS (e.g. ext4) are not safe. */
|
2019-08-01 08:12:49 +00:00
|
|
|
switch ((virStorageType) actualType) {
|
|
|
|
case VIR_STORAGE_TYPE_FILE:
|
2018-02-26 08:35:25 +00:00
|
|
|
if ((rc = virFileIsSharedFS(src)) < 0) {
|
2017-04-07 15:27:17 +00:00
|
|
|
return false;
|
2018-02-26 08:35:25 +00:00
|
|
|
} else if (rc == 0) {
|
2019-08-01 08:12:49 +00:00
|
|
|
unsafe = true;
|
2018-02-26 08:35:25 +00:00
|
|
|
}
|
2020-12-07 11:04:41 +00:00
|
|
|
if ((rc = virFileIsClusterFS(src)) < 0)
|
2017-04-07 15:27:17 +00:00
|
|
|
return false;
|
|
|
|
else if (rc == 1)
|
|
|
|
continue;
|
2019-08-01 08:12:49 +00:00
|
|
|
break;
|
|
|
|
case VIR_STORAGE_TYPE_NETWORK:
|
2019-01-24 08:58:38 +00:00
|
|
|
/* But network disks are safe again. */
|
2017-04-07 15:27:17 +00:00
|
|
|
continue;
|
2019-08-01 08:12:49 +00:00
|
|
|
|
2019-06-03 15:31:13 +00:00
|
|
|
case VIR_STORAGE_TYPE_NVME:
|
|
|
|
unsafe = true;
|
|
|
|
break;
|
|
|
|
|
2021-01-25 17:13:29 +00:00
|
|
|
case VIR_STORAGE_TYPE_VHOST_USER:
|
2019-08-01 08:12:49 +00:00
|
|
|
case VIR_STORAGE_TYPE_NONE:
|
|
|
|
case VIR_STORAGE_TYPE_BLOCK:
|
|
|
|
case VIR_STORAGE_TYPE_DIR:
|
|
|
|
case VIR_STORAGE_TYPE_VOLUME:
|
|
|
|
case VIR_STORAGE_TYPE_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unsafe) {
|
|
|
|
virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
|
|
|
|
_("Migration without shared storage is unsafe"));
|
|
|
|
return false;
|
2012-02-21 12:20:06 +00:00
|
|
|
}
|
2017-04-07 15:27:17 +00:00
|
|
|
|
2018-02-26 08:35:25 +00:00
|
|
|
/* Our code elsewhere guarantees shared disks are either readonly (in
|
|
|
|
* which case cache mode doesn't matter) or used with cache=none or used with cache=directsync */
|
|
|
|
if (disk->src->shared ||
|
|
|
|
disk->cachemode == VIR_DOMAIN_DISK_CACHE_DISABLE ||
|
|
|
|
disk->cachemode == VIR_DOMAIN_DISK_CACHE_DIRECTSYNC)
|
|
|
|
continue;
|
|
|
|
|
2019-08-13 13:17:53 +00:00
|
|
|
if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATION_FILE_DROP_CACHE)) {
|
|
|
|
VIR_DEBUG("QEMU supports flushing caches; migration is safe");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-07 15:27:17 +00:00
|
|
|
virReportError(VIR_ERR_MIGRATE_UNSAFE, "%s",
|
|
|
|
_("Migration may lead to data corruption if disks"
|
2019-08-13 11:16:20 +00:00
|
|
|
" use cache other than none or directsync"));
|
2017-04-07 15:27:17 +00:00
|
|
|
return false;
|
2012-02-21 12:20:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-01-13 15:29:58 +00:00
|
|
|
|
|
|
|
void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
2016-01-13 15:29:58 +00:00
|
|
|
{
|
|
|
|
virDomainState state;
|
|
|
|
int reason;
|
|
|
|
|
|
|
|
state = virDomainObjGetState(vm, &reason);
|
|
|
|
|
|
|
|
if (state != VIR_DOMAIN_PAUSED &&
|
|
|
|
state != VIR_DOMAIN_RUNNING)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (state == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY_FAILED)
|
|
|
|
return;
|
|
|
|
|
|
|
|
VIR_WARN("Migration of domain %s failed during post-copy; "
|
|
|
|
"leaving the domain paused", vm->def->name);
|
|
|
|
|
|
|
|
if (state == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuProcessStopCPUs(driver, vm,
|
|
|
|
VIR_DOMAIN_PAUSED_POSTCOPY_FAILED,
|
2019-02-08 07:36:56 +00:00
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
2016-01-13 15:29:58 +00:00
|
|
|
VIR_WARN("Unable to pause guest CPUs for %s", vm->def->name);
|
|
|
|
} else {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED,
|
|
|
|
VIR_DOMAIN_PAUSED_POSTCOPY_FAILED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-10 13:35:03 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcWaitForSpice(virDomainObjPtr vm)
|
2013-06-10 13:35:03 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
2013-06-10 13:35:03 +00:00
|
|
|
|
2020-07-16 11:48:34 +00:00
|
|
|
if (!jobPriv->spiceMigration)
|
2015-05-25 14:57:49 +00:00
|
|
|
return 0;
|
|
|
|
|
2015-12-16 12:14:42 +00:00
|
|
|
VIR_DEBUG("Waiting for SPICE to finish migration");
|
2020-07-16 11:48:34 +00:00
|
|
|
while (!jobPriv->spiceMigrated && !priv->job.abortJob) {
|
2015-05-25 14:57:49 +00:00
|
|
|
if (virDomainObjWait(vm) < 0)
|
2013-06-10 13:35:03 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-14 11:45:20 +00:00
|
|
|
|
2015-05-26 12:37:30 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
|
|
|
|
{
|
2018-01-26 17:30:50 +00:00
|
|
|
switch ((qemuMonitorMigrationStatus) jobInfo->stats.mig.status) {
|
2017-09-01 06:49:21 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY:
|
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY;
|
|
|
|
break;
|
|
|
|
|
2015-05-26 12:37:30 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
|
2017-09-01 06:49:31 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED;
|
2015-05-26 12:37:30 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
|
2017-09-01 06:49:20 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE;
|
2015-05-26 12:37:30 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
|
2017-09-01 06:49:20 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
|
2015-05-26 12:37:30 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
|
2017-09-01 06:49:20 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
|
2015-05-26 12:37:30 +00:00
|
|
|
break;
|
|
|
|
|
2017-10-20 08:11:32 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_PAUSED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_DEVICE:
|
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
|
|
|
|
break;
|
|
|
|
|
2015-05-26 12:37:30 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_SETUP:
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING:
|
2020-01-15 21:38:57 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_WAIT_UNPLUG:
|
2015-11-26 12:24:31 +00:00
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_LAST:
|
2015-05-26 12:37:30 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
qemuDomainJobInfoPtr jobInfo,
|
|
|
|
char **error)
|
2015-05-26 12:37:30 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2017-09-01 06:49:24 +00:00
|
|
|
qemuMonitorMigrationStats stats;
|
2015-05-26 12:37:30 +00:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2017-10-12 13:19:19 +00:00
|
|
|
rv = qemuMonitorGetMigrationStats(priv->mon, &stats, error);
|
2015-05-26 12:37:30 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
|
|
|
|
return -1;
|
|
|
|
|
2018-01-26 17:30:50 +00:00
|
|
|
jobInfo->stats.mig = stats;
|
2017-09-01 06:49:24 +00:00
|
|
|
|
|
|
|
return 0;
|
2015-05-26 12:37:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-28 09:17:17 +00:00
|
|
|
static const char *
|
|
|
|
qemuMigrationJobName(virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
switch (priv->job.asyncJob) {
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
2018-02-14 09:43:59 +00:00
|
|
|
return _("migration out job");
|
2015-05-28 09:17:17 +00:00
|
|
|
case QEMU_ASYNC_JOB_SAVE:
|
|
|
|
return _("domain save job");
|
|
|
|
case QEMU_ASYNC_JOB_DUMP:
|
|
|
|
return _("domain core dump job");
|
2018-02-14 09:43:59 +00:00
|
|
|
case QEMU_ASYNC_JOB_NONE:
|
|
|
|
return _("undefined");
|
|
|
|
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
|
|
|
return _("migration in job");
|
|
|
|
case QEMU_ASYNC_JOB_SNAPSHOT:
|
|
|
|
return _("snapshot job");
|
|
|
|
case QEMU_ASYNC_JOB_START:
|
|
|
|
return _("start job");
|
2019-11-22 16:19:49 +00:00
|
|
|
case QEMU_ASYNC_JOB_BACKUP:
|
|
|
|
return _("backup job");
|
2018-02-14 09:43:59 +00:00
|
|
|
case QEMU_ASYNC_JOB_LAST:
|
2015-05-28 09:17:17 +00:00
|
|
|
default:
|
|
|
|
return _("job");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-26 12:37:30 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationJobCheckStatus(virQEMUDriverPtr driver,
|
2015-05-26 12:37:30 +00:00
|
|
|
virDomainObjPtr vm,
|
2017-09-01 06:49:22 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2015-05-26 12:37:30 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
qemuDomainJobInfoPtr jobInfo = priv->job.current;
|
2017-10-12 13:19:19 +00:00
|
|
|
char *error = NULL;
|
2015-05-29 06:37:59 +00:00
|
|
|
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
|
2017-10-12 13:19:19 +00:00
|
|
|
int ret = -1;
|
2015-05-29 06:37:59 +00:00
|
|
|
|
2017-10-12 13:19:19 +00:00
|
|
|
if (!events ||
|
2018-01-26 17:30:50 +00:00
|
|
|
jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
|
2017-10-12 13:19:19 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2011-04-20 16:33:27 +00:00
|
|
|
|
2017-09-01 06:49:24 +00:00
|
|
|
qemuMigrationUpdateJobType(jobInfo);
|
|
|
|
|
2017-09-01 06:49:20 +00:00
|
|
|
switch (jobInfo->status) {
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_NONE:
|
2015-05-28 09:17:17 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
|
|
|
|
qemuMigrationJobName(vm), _("is not active"));
|
2017-10-12 13:19:19 +00:00
|
|
|
goto cleanup;
|
2011-04-20 16:33:27 +00:00
|
|
|
|
2017-09-01 06:49:20 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_FAILED:
|
2015-05-28 09:17:17 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
|
2017-10-12 13:19:19 +00:00
|
|
|
qemuMigrationJobName(vm),
|
|
|
|
error ? error : _("unexpectedly failed"));
|
|
|
|
goto cleanup;
|
2011-04-20 16:33:27 +00:00
|
|
|
|
2017-09-01 06:49:20 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_CANCELED:
|
2015-05-28 09:17:17 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
|
|
|
qemuMigrationJobName(vm), _("canceled by client"));
|
2017-10-12 13:19:19 +00:00
|
|
|
goto cleanup;
|
2015-05-26 12:37:30 +00:00
|
|
|
|
2017-09-01 06:49:20 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
|
2017-09-01 06:49:28 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
|
2017-09-01 06:49:31 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
|
2017-09-01 06:49:21 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
|
2017-10-20 08:11:32 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_PAUSED:
|
2011-04-20 16:33:27 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-10-12 13:19:19 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(error);
|
|
|
|
return ret;
|
2011-04-20 16:33:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-05 12:36:15 +00:00
|
|
|
enum qemuMigrationCompletedFlags {
|
|
|
|
QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR = (1 << 0),
|
2018-02-12 17:11:41 +00:00
|
|
|
/* This flag should only be set when run on src host */
|
2015-10-05 12:36:15 +00:00
|
|
|
QEMU_MIGRATION_COMPLETED_CHECK_STORAGE = (1 << 1),
|
2017-09-01 06:49:22 +00:00
|
|
|
QEMU_MIGRATION_COMPLETED_POSTCOPY = (1 << 2),
|
2017-10-20 08:11:32 +00:00
|
|
|
QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER = (1 << 3),
|
2015-10-05 12:36:15 +00:00
|
|
|
};
|
|
|
|
|
2017-09-01 06:49:22 +00:00
|
|
|
|
2015-05-28 09:58:11 +00:00
|
|
|
/**
|
|
|
|
* Returns 1 if migration completed successfully,
|
|
|
|
* 0 if the domain is still being migrated,
|
|
|
|
* -1 migration failed,
|
|
|
|
* -2 something else failed, we need to cancel migration.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyCompleted(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
unsigned int flags)
|
2015-05-28 09:58:11 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
qemuDomainJobInfoPtr jobInfo = priv->job.current;
|
|
|
|
int pauseReason;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
|
2015-05-28 09:58:11 +00:00
|
|
|
goto error;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
/* This flag should only be set when run on src host */
|
2015-10-05 12:36:15 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE &&
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDStorageCopyReady(vm, asyncJob) < 0)
|
2015-05-28 09:58:11 +00:00
|
|
|
goto error;
|
|
|
|
|
2015-10-05 12:36:15 +00:00
|
|
|
if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR &&
|
2015-05-28 09:58:11 +00:00
|
|
|
virDomainObjGetState(vm, &pauseReason) == VIR_DOMAIN_PAUSED &&
|
|
|
|
pauseReason == VIR_DOMAIN_PAUSED_IOERROR) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"),
|
|
|
|
qemuMigrationJobName(vm), _("failed due to I/O error"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dconn && virConnectIsAlive(dconn) <= 0) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Lost connection to destination host"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-10-20 08:12:21 +00:00
|
|
|
/* Migration was paused before serializing device state, let's return to
|
|
|
|
* the caller so that it can finish all block jobs, resume migration, and
|
|
|
|
* wait again for the real end of the migration.
|
|
|
|
*/
|
|
|
|
if (flags & QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER &&
|
|
|
|
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
|
|
|
|
VIR_DEBUG("Migration paused before switchover");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-11-26 14:37:23 +00:00
|
|
|
/* In case of postcopy the source considers migration completed at the
|
|
|
|
* moment it switched from active to postcopy-active state. The destination
|
|
|
|
* will continue waiting until the migrate state changes to completed.
|
|
|
|
*/
|
|
|
|
if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY &&
|
2017-09-01 06:49:21 +00:00
|
|
|
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
|
2015-11-26 14:37:23 +00:00
|
|
|
VIR_DEBUG("Migration switched to post-copy");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-09-01 06:49:31 +00:00
|
|
|
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
|
2015-05-28 09:58:11 +00:00
|
|
|
return 1;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2017-10-17 19:27:55 +00:00
|
|
|
switch (jobInfo->status) {
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_MIGRATING:
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_POSTCOPY:
|
2017-10-20 08:11:32 +00:00
|
|
|
case QEMU_DOMAIN_JOB_STATUS_PAUSED:
|
2015-05-28 09:58:11 +00:00
|
|
|
/* The migration was aborted by us rather than QEMU itself. */
|
2017-09-01 06:49:20 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
|
2015-05-28 09:58:11 +00:00
|
|
|
return -2;
|
2017-10-17 19:27:55 +00:00
|
|
|
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED:
|
|
|
|
/* Something failed after QEMU already finished the migration. */
|
2017-09-01 06:49:20 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
|
2015-05-28 09:58:11 +00:00
|
|
|
return -1;
|
2017-10-17 19:27:55 +00:00
|
|
|
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_FAILED:
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_CANCELED:
|
|
|
|
/* QEMU aborted the migration. */
|
2015-05-28 09:58:11 +00:00
|
|
|
return -1;
|
2017-10-17 19:27:55 +00:00
|
|
|
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_ACTIVE:
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_COMPLETED:
|
|
|
|
case QEMU_DOMAIN_JOB_STATUS_NONE:
|
|
|
|
/* Impossible. */
|
|
|
|
break;
|
2015-05-28 09:58:11 +00:00
|
|
|
}
|
2017-10-17 19:27:55 +00:00
|
|
|
|
|
|
|
return -1;
|
2015-05-28 09:58:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-22 10:29:20 +00:00
|
|
|
/* Returns 0 on success, -2 when migration needs to be cancelled, or -1 when
|
|
|
|
* QEMU reports failed migration.
|
|
|
|
*/
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
unsigned int flags)
|
2011-04-20 16:33:27 +00:00
|
|
|
{
|
2011-01-31 10:47:03 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2014-08-21 13:08:39 +00:00
|
|
|
qemuDomainJobInfoPtr jobInfo = priv->job.current;
|
2015-05-29 06:38:44 +00:00
|
|
|
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
|
2015-05-28 09:58:11 +00:00
|
|
|
int rv;
|
2011-05-13 10:11:47 +00:00
|
|
|
|
2017-09-01 06:49:28 +00:00
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
|
|
|
|
dconn, flags)) != 1) {
|
2015-05-28 09:58:11 +00:00
|
|
|
if (rv < 0)
|
|
|
|
return rv;
|
2011-09-23 06:56:56 +00:00
|
|
|
|
2015-05-29 06:38:44 +00:00
|
|
|
if (events) {
|
|
|
|
if (virDomainObjWait(vm) < 0) {
|
2018-08-02 14:56:02 +00:00
|
|
|
if (virDomainObjIsActive(vm))
|
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
|
2015-05-29 06:38:44 +00:00
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Poll every 50ms for progress & to allow cancellation */
|
|
|
|
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
|
|
|
|
|
|
|
virObjectUnlock(vm);
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
virObjectLock(vm);
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2017-09-01 06:49:22 +00:00
|
|
|
if (events)
|
2018-02-12 17:11:41 +00:00
|
|
|
ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
|
2017-09-01 06:49:22 +00:00
|
|
|
|
2017-09-01 06:49:24 +00:00
|
|
|
qemuDomainJobInfoUpdateTime(jobInfo);
|
2015-05-13 16:08:50 +00:00
|
|
|
qemuDomainJobInfoUpdateDowntime(jobInfo);
|
2020-03-26 16:55:00 +00:00
|
|
|
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
|
|
|
|
priv->job.completed = qemuDomainJobInfoCopy(jobInfo);
|
|
|
|
priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
|
2015-05-13 16:08:50 +00:00
|
|
|
|
2017-09-01 06:49:31 +00:00
|
|
|
if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT &&
|
|
|
|
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED)
|
|
|
|
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
|
|
|
|
|
2015-05-28 09:58:11 +00:00
|
|
|
return 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-15 17:49:22 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
bool postcopy)
|
2015-10-15 17:49:22 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2015-11-26 14:37:23 +00:00
|
|
|
unsigned int flags = 0;
|
2015-10-15 17:49:22 +00:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
VIR_DEBUG("Waiting for incoming migration to complete");
|
|
|
|
|
2015-11-26 14:37:23 +00:00
|
|
|
if (postcopy)
|
|
|
|
flags = QEMU_MIGRATION_COMPLETED_POSTCOPY;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
|
|
|
|
NULL, flags)) != 1) {
|
2015-10-15 17:49:22 +00:00
|
|
|
if (rv < 0 || virDomainObjWait(vm) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 13:39:36 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr cookie,
|
|
|
|
const char *graphicsuri)
|
2011-02-17 13:39:36 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-06-18 10:17:18 +00:00
|
|
|
int ret = -1;
|
|
|
|
const char *listenAddress = NULL;
|
qemu: Reformat listen address prior to checking
Currently, a listen address for a SPICE server can be specified. Later,
when the domain is migrated, we need to relocate the graphics which
involves telling new destination to the SPICE server. However, we can't
just assume the listen address is the new location, because the listen
address can be ANYCAST (0.0.0.0 for IPv4, :: for IPv6). In which case,
we want to pass the remote hostname. But there are some troubles with
ANYCAST. In both IPv4 and IPv6 it has many ways for specifying such
address. For instance, in IPv4: 0, 0.0, 0.0.0, 0.0.0.0. The number of
variations gets bigger in IPv6 world. Hence, in order to check for
ANYCAST address sanely, we should take the provided listen address,
parse it and format back in it's full form. Which is exactly what this
patch does.
2013-06-05 15:05:50 +00:00
|
|
|
virSocketAddr addr;
|
2013-06-18 10:17:18 +00:00
|
|
|
virURIPtr uri = NULL;
|
|
|
|
int type = -1;
|
|
|
|
int port = -1;
|
|
|
|
int tlsPort = -1;
|
|
|
|
const char *tlsSubject = NULL;
|
2011-02-17 13:39:36 +00:00
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (!cookie || (!cookie->graphics && !graphicsuri))
|
2011-02-17 13:39:36 +00:00
|
|
|
return 0;
|
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (graphicsuri && !(uri = virURIParse(graphicsuri)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (cookie->graphics) {
|
|
|
|
type = cookie->graphics->type;
|
|
|
|
|
|
|
|
listenAddress = cookie->graphics->listen;
|
|
|
|
|
|
|
|
if (!listenAddress ||
|
|
|
|
(virSocketAddrParse(&addr, listenAddress, AF_UNSPEC) > 0 &&
|
|
|
|
virSocketAddrIsWildcard(&addr)))
|
|
|
|
listenAddress = cookie->remoteHostname;
|
|
|
|
|
|
|
|
port = cookie->graphics->port;
|
|
|
|
tlsPort = cookie->graphics->tlsPort;
|
|
|
|
tlsSubject = cookie->graphics->tlsSubject;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uri) {
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2013-06-18 10:17:18 +00:00
|
|
|
|
|
|
|
if ((type = virDomainGraphicsTypeFromString(uri->scheme)) < 0) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("unknown graphics type %s"), uri->scheme);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uri->server)
|
|
|
|
listenAddress = uri->server;
|
|
|
|
if (uri->port > 0)
|
|
|
|
port = uri->port;
|
|
|
|
|
|
|
|
for (i = 0; i < uri->paramsCount; i++) {
|
|
|
|
virURIParamPtr param = uri->params + i;
|
|
|
|
|
|
|
|
if (STRCASEEQ(param->name, "tlsPort")) {
|
|
|
|
if (virStrToLong_i(param->value, NULL, 10, &tlsPort) < 0) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("invalid tlsPort number: %s"),
|
|
|
|
param->value);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else if (STRCASEEQ(param->name, "tlsSubject")) {
|
|
|
|
tlsSubject = param->value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-17 13:39:36 +00:00
|
|
|
|
|
|
|
/* QEMU doesn't support VNC relocation yet, so
|
|
|
|
* skip it to avoid generating an error
|
|
|
|
*/
|
2013-06-18 10:17:18 +00:00
|
|
|
if (type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2013-04-10 15:16:06 +00:00
|
|
|
|
2016-09-20 15:27:03 +00:00
|
|
|
/* Older libvirt sends port == 0 for listen type='none' graphics. It's
|
|
|
|
* safe to ignore such requests since relocation to unknown port does
|
|
|
|
* not make sense in general.
|
|
|
|
*/
|
|
|
|
if (port <= 0 && tlsPort <= 0) {
|
|
|
|
ret = 0;
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
|
|
|
|
2013-06-18 10:17:18 +00:00
|
|
|
ret = qemuMonitorGraphicsRelocate(priv->mon, type, listenAddress,
|
|
|
|
port, tlsPort, tlsSubject);
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->spiceMigration = !ret;
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
2011-06-30 09:23:50 +00:00
|
|
|
}
|
2011-02-17 13:39:36 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-06-18 10:17:18 +00:00
|
|
|
virURIFree(uri);
|
2011-02-17 13:39:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-01 15:18:20 +00:00
|
|
|
static int
|
2019-10-14 12:45:33 +00:00
|
|
|
qemuMigrationDstOPDRelocate(virQEMUDriverPtr driver G_GNUC_UNUSED,
|
2018-02-12 17:11:41 +00:00
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr cookie)
|
2012-10-01 15:18:20 +00:00
|
|
|
{
|
2012-10-01 15:18:22 +00:00
|
|
|
virDomainNetDefPtr netptr;
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2012-10-01 15:18:20 +00:00
|
|
|
|
|
|
|
for (i = 0; i < cookie->network->nnets; i++) {
|
|
|
|
netptr = vm->def->nets[i];
|
|
|
|
|
|
|
|
switch (cookie->network->net[i].vporttype) {
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_NONE:
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_8021QBG:
|
|
|
|
case VIR_NETDEV_VPORT_PROFILE_8021QBH:
|
2012-10-01 15:18:22 +00:00
|
|
|
break;
|
2012-10-01 15:18:20 +00:00
|
|
|
case VIR_NETDEV_VPORT_PROFILE_OPENVSWITCH:
|
2012-10-01 15:18:22 +00:00
|
|
|
if (virNetDevOpenvswitchSetMigrateData(cookie->network->net[i].portdata,
|
|
|
|
netptr->ifname) != 0) {
|
2014-11-28 08:37:42 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to run command to set OVS port data for "
|
|
|
|
"interface %s"), netptr->ifname);
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2012-10-01 15:18:22 +00:00
|
|
|
}
|
|
|
|
break;
|
2012-10-01 15:18:20 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2012-10-01 15:18:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-20 13:48:33 +00:00
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
|
|
|
|
const char *migrateFrom)
|
2015-10-20 13:48:33 +00:00
|
|
|
{
|
|
|
|
if (STRPREFIX(migrateFrom, "rdma")) {
|
|
|
|
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("incoming RDMA migration is not supported "
|
|
|
|
"with this QEMU binary"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else if (!STRPREFIX(migrateFrom, "tcp") &&
|
|
|
|
!STRPREFIX(migrateFrom, "exec") &&
|
|
|
|
!STRPREFIX(migrateFrom, "fd") &&
|
|
|
|
!STRPREFIX(migrateFrom, "unix") &&
|
|
|
|
STRNEQ(migrateFrom, "stdio")) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("unknown migration protocol"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
char *
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstGetURI(const char *migrateFrom,
|
|
|
|
int migrateFd)
|
2015-10-20 13:48:33 +00:00
|
|
|
{
|
|
|
|
char *uri = NULL;
|
|
|
|
|
|
|
|
if (STREQ(migrateFrom, "stdio"))
|
2019-10-22 13:26:14 +00:00
|
|
|
uri = g_strdup_printf("fd:%d", migrateFd);
|
2015-10-20 13:48:33 +00:00
|
|
|
else
|
2019-10-18 11:27:03 +00:00
|
|
|
uri = g_strdup(migrateFrom);
|
2015-10-20 13:48:33 +00:00
|
|
|
|
|
|
|
return uri;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-11 17:02:23 +00:00
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstRun(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *uri,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2015-11-11 17:02:23 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int rv;
|
|
|
|
|
|
|
|
VIR_DEBUG("Setting up incoming migration with URI %s", uri);
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2021-02-05 09:48:51 +00:00
|
|
|
rv = qemuMonitorSetDBusVMStateIdList(priv->mon, priv->dbusVMStateIds);
|
2020-02-25 09:55:11 +00:00
|
|
|
if (rv < 0)
|
|
|
|
goto exit_monitor;
|
|
|
|
|
2015-11-11 17:02:23 +00:00
|
|
|
rv = qemuMonitorMigrateIncoming(priv->mon, uri);
|
|
|
|
|
2020-02-25 09:55:11 +00:00
|
|
|
exit_monitor:
|
2015-11-11 17:02:23 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rv < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-11-11 17:02:23 +00:00
|
|
|
|
|
|
|
if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
2018-02-12 17:11:41 +00:00
|
|
|
/* qemuMigrationDstWaitForCompletion is called from the Finish phase */
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2015-11-11 17:02:23 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-11-11 17:02:23 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2015-11-11 17:02:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-03-19 15:48:43 +00:00
|
|
|
/* This is called for outgoing non-p2p migrations when a connection to the
|
|
|
|
* client which initiated the migration was closed but we were waiting for it
|
|
|
|
* to follow up with the next phase, that is, in between
|
|
|
|
* qemuDomainMigrateBegin3 and qemuDomainMigratePerform3 or
|
|
|
|
* qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
|
|
|
|
*/
|
2018-03-27 15:39:53 +00:00
|
|
|
static void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcCleanup(virDomainObjPtr vm,
|
|
|
|
virConnectPtr conn,
|
|
|
|
void *opaque)
|
2012-03-19 15:48:43 +00:00
|
|
|
{
|
2013-07-15 14:53:13 +00:00
|
|
|
virQEMUDriverPtr driver = opaque;
|
2012-03-19 15:48:43 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
2012-03-19 15:48:43 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("vm=%s, conn=%p, asyncJob=%s, phase=%s",
|
|
|
|
vm->def->name, conn,
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
qemuDomainAsyncJobPhaseToString(priv->job.asyncJob,
|
|
|
|
priv->job.phase));
|
|
|
|
|
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
2018-03-27 15:39:53 +00:00
|
|
|
return;
|
2012-03-19 15:48:43 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("The connection which started outgoing migration of domain %s"
|
|
|
|
" was closed; canceling the migration",
|
|
|
|
vm->def->name);
|
|
|
|
|
2014-06-15 16:32:56 +00:00
|
|
|
switch ((qemuMigrationJobPhase) priv->job.phase) {
|
2012-03-19 15:48:43 +00:00
|
|
|
case QEMU_MIGRATION_PHASE_BEGIN3:
|
|
|
|
/* just forget we were about to migrate */
|
2020-08-18 08:01:40 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2012-03-19 15:48:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3_DONE:
|
|
|
|
VIR_WARN("Migration of domain %s finished but we don't know if the"
|
|
|
|
" domain was successfully started on destination or not",
|
|
|
|
vm->def->name);
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, priv->job.apiFlags);
|
2012-03-19 15:48:43 +00:00
|
|
|
/* clear the job and let higher levels decide what to do */
|
2020-08-18 08:01:40 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2012-03-19 15:48:43 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM3:
|
|
|
|
/* cannot be seen without an active migration API; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3:
|
|
|
|
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
|
|
|
|
/* all done; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_PREPARE:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH2:
|
|
|
|
case QEMU_MIGRATION_PHASE_FINISH3:
|
|
|
|
/* incoming migration; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_PERFORM2:
|
|
|
|
/* single phase outgoing migration; unreachable */
|
|
|
|
case QEMU_MIGRATION_PHASE_NONE:
|
|
|
|
case QEMU_MIGRATION_PHASE_LAST:
|
|
|
|
/* unreachable */
|
|
|
|
;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-25 07:44:14 +00:00
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* The caller is supposed to lock the vm and start a migration job. */
|
2015-06-15 22:42:04 +00:00
|
|
|
static char *
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *dname,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
unsigned long flags)
|
2011-02-03 11:09:28 +00:00
|
|
|
{
|
2020-07-13 09:49:47 +00:00
|
|
|
g_autoptr(qemuMigrationCookie) mig = NULL;
|
|
|
|
g_autoptr(virDomainDef) def = NULL;
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2013-01-29 12:38:50 +00:00
|
|
|
unsigned int cookieFlags = QEMU_MIGRATION_COOKIE_LOCKSTATE;
|
2011-07-19 00:27:32 +00:00
|
|
|
|
2011-10-14 19:24:18 +00:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, dname=%s,"
|
2015-06-15 22:42:10 +00:00
|
|
|
" cookieout=%p, cookieoutlen=%p,"
|
2017-09-25 10:43:33 +00:00
|
|
|
" nmigrate_disks=%zu, migrate_disks=%p, flags=0x%lx",
|
2011-10-14 19:24:18 +00:00
|
|
|
driver, vm, NULLSTR(xmlin), NULLSTR(dname),
|
2015-06-15 22:42:10 +00:00
|
|
|
cookieout, cookieoutlen, nmigrate_disks,
|
|
|
|
migrate_disks, flags);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Only set the phase if we are inside QEMU_ASYNC_JOB_MIGRATION_OUT.
|
|
|
|
* Otherwise we will start the async job later in the perform phase losing
|
|
|
|
* change protection.
|
|
|
|
*/
|
|
|
|
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
|
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2017-08-17 16:36:46 +00:00
|
|
|
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
|
2019-08-13 13:17:53 +00:00
|
|
|
!qemuMigrationSrcIsSafe(vm->def, priv->qemuCaps,
|
|
|
|
nmigrate_disks, migrate_disks, flags))
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2014-12-01 15:59:55 +00:00
|
|
|
if (flags & VIR_MIGRATE_POSTCOPY &&
|
|
|
|
(!(flags & VIR_MIGRATE_LIVE) ||
|
|
|
|
flags & VIR_MIGRATE_PAUSED)) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("post-copy migration is not supported with non-live "
|
|
|
|
"or paused migration"));
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2014-12-01 15:59:55 +00:00
|
|
|
}
|
|
|
|
|
2016-11-22 19:23:56 +00:00
|
|
|
if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("post-copy is not supported with tunnelled migration"));
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2016-11-22 19:23:56 +00:00
|
|
|
}
|
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
if (flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC)) {
|
2020-11-24 07:11:18 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
2020-11-24 07:32:32 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("migration of non-shared storage is not supported with tunnelled migration and this QEMU"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-11-24 07:11:18 +00:00
|
|
|
if (nmigrate_disks) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("Selecting disks to migrate is not implemented for tunnelled migration"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
priv->nbdPort = 0;
|
|
|
|
}
|
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
if (nmigrate_disks) {
|
2018-06-05 11:42:44 +00:00
|
|
|
size_t i, j;
|
|
|
|
/* Check user requested only known disk targets. */
|
|
|
|
for (i = 0; i < nmigrate_disks; i++) {
|
|
|
|
for (j = 0; j < vm->def->ndisks; j++) {
|
|
|
|
if (STREQ(vm->def->disks[j]->dst, migrate_disks[i]))
|
|
|
|
break;
|
2015-06-15 22:42:10 +00:00
|
|
|
}
|
|
|
|
|
2018-06-05 11:42:44 +00:00
|
|
|
if (j == vm->def->ndisks) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("disk target %s not found"),
|
|
|
|
migrate_disks[i]);
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2015-06-15 22:42:10 +00:00
|
|
|
}
|
2018-06-05 11:42:44 +00:00
|
|
|
}
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-14 14:42:46 +00:00
|
|
|
if (virDomainDefHasMemoryHotplug(vm->def) ||
|
2015-09-08 13:06:26 +00:00
|
|
|
((flags & VIR_MIGRATE_PERSIST_DEST) &&
|
2015-09-14 14:42:46 +00:00
|
|
|
vm->newDef && virDomainDefHasMemoryHotplug(vm->newDef)))
|
2015-07-30 13:27:07 +00:00
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG;
|
|
|
|
|
2016-08-04 11:57:46 +00:00
|
|
|
if (!qemuDomainVcpuHotplugIsInOrder(vm->def) ||
|
|
|
|
((flags & VIR_MIGRATE_PERSIST_DEST) &&
|
|
|
|
vm->newDef && !qemuDomainVcpuHotplugIsInOrder(vm->newDef)))
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_CPU_HOTPLUG;
|
|
|
|
|
2017-05-26 15:06:56 +00:00
|
|
|
if (priv->origCPU)
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_CPU;
|
|
|
|
|
2017-10-13 10:06:54 +00:00
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_ALLOW_REBOOT;
|
|
|
|
|
2018-04-06 12:02:04 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE))
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_CAPS;
|
|
|
|
|
2020-09-28 15:07:02 +00:00
|
|
|
if (!(mig = qemuMigrationCookieNew(vm->def, priv->origname)))
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2020-09-28 14:49:28 +00:00
|
|
|
if (qemuMigrationCookieFormat(mig, driver, vm,
|
|
|
|
QEMU_MIGRATION_SOURCE,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
cookieFlags) < 0)
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE) {
|
|
|
|
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
VIR_MIGRATE_NON_SHARED_INC)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration cannot handle "
|
|
|
|
"non-shared storage"));
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2012-11-21 08:28:49 +00:00
|
|
|
}
|
|
|
|
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration must be specified with "
|
|
|
|
"the persistent flag set"));
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2012-11-21 08:28:49 +00:00
|
|
|
}
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("tunnelled offline migration does not "
|
|
|
|
"make sense"));
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2012-11-21 08:28:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-27 10:30:26 +00:00
|
|
|
if (xmlin) {
|
2019-11-27 12:29:21 +00:00
|
|
|
if (!(def = virDomainDefParseString(xmlin, driver->xmlopt, priv->qemuCaps,
|
2016-05-26 13:58:53 +00:00
|
|
|
VIR_DOMAIN_DEF_PARSE_INACTIVE |
|
|
|
|
VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2011-05-27 10:30:26 +00:00
|
|
|
|
2017-06-14 12:56:21 +00:00
|
|
|
if (!qemuDomainCheckABIStability(driver, vm, def))
|
2020-07-13 09:49:47 +00:00
|
|
|
return NULL;
|
2011-05-27 10:30:26 +00:00
|
|
|
|
2020-07-13 09:49:47 +00:00
|
|
|
return qemuDomainDefFormatLive(driver, priv->qemuCaps, def, NULL, false, true);
|
2011-05-27 10:30:26 +00:00
|
|
|
} else {
|
2020-07-13 09:49:47 +00:00
|
|
|
return qemuDomainDefFormatLive(driver, priv->qemuCaps, vm->def, priv->origCPU,
|
|
|
|
false, true);
|
2011-05-27 10:30:26 +00:00
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
}
|
|
|
|
|
2013-06-25 07:44:14 +00:00
|
|
|
char *
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcBegin(virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *dname,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
unsigned long flags)
|
2013-06-25 07:44:14 +00:00
|
|
|
{
|
|
|
|
virQEMUDriverPtr driver = conn->privateData;
|
2020-11-24 13:08:04 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2013-06-25 07:44:14 +00:00
|
|
|
char *xml = NULL;
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob asyncJob;
|
2013-06-25 07:44:14 +00:00
|
|
|
|
2020-11-24 13:08:04 +00:00
|
|
|
if (cfg->migrateTLSForce &&
|
|
|
|
!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
!(flags & VIR_MIGRATE_TLS)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("this libvirtd instance allows migration only with VIR_MIGRATE_TLS flag"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-06-25 07:44:14 +00:00
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
2018-03-21 12:01:59 +00:00
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
flags) < 0)
|
2013-06-25 07:44:14 +00:00
|
|
|
goto cleanup;
|
|
|
|
asyncJob = QEMU_ASYNC_JOB_MIGRATION_OUT;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
asyncJob = QEMU_ASYNC_JOB_NONE;
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcStoreDomainState(vm);
|
2014-02-06 13:30:59 +00:00
|
|
|
|
2018-05-13 22:32:13 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
|
2013-06-25 07:44:14 +00:00
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
/* Check if there is any ejected media.
|
|
|
|
* We don't want to require them on the destination.
|
|
|
|
*/
|
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE) &&
|
2016-05-23 12:00:35 +00:00
|
|
|
qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
|
2013-06-25 07:44:14 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
nmigrate_disks, migrate_disks, flags)))
|
2013-06-25 07:44:14 +00:00
|
|
|
goto endjob;
|
|
|
|
|
|
|
|
if ((flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
|
|
|
/* We keep the job active across API calls until the confirm() call.
|
|
|
|
* This prevents any other APIs being invoked while migration is taking
|
|
|
|
* place.
|
|
|
|
*/
|
2013-07-15 14:53:13 +00:00
|
|
|
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcCleanup) < 0) {
|
2015-03-30 02:40:59 +00:00
|
|
|
VIR_FREE(xml);
|
2013-06-25 07:44:14 +00:00
|
|
|
goto endjob;
|
2015-03-30 02:40:59 +00:00
|
|
|
}
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobContinue(vm);
|
2013-06-25 07:44:14 +00:00
|
|
|
} else {
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-04-23 15:27:58 +00:00
|
|
|
virDomainObjEndAPI(&vm);
|
2013-06-25 07:44:14 +00:00
|
|
|
return xml;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
endjob:
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
if (flags & VIR_MIGRATE_CHANGE_PROTECTION)
|
|
|
|
qemuMigrationJobFinish(driver, vm);
|
|
|
|
else
|
|
|
|
qemuDomainObjEndJob(driver, vm);
|
2013-06-25 07:44:14 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* Prepare is the first step, and it runs on the destination host.
|
|
|
|
*/
|
2011-06-13 08:48:34 +00:00
|
|
|
|
2012-03-16 06:56:19 +00:00
|
|
|
static void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
2012-03-16 06:56:19 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%s, job=%s, asyncJob=%s",
|
|
|
|
driver,
|
|
|
|
vm->def->name,
|
|
|
|
qemuDomainJobTypeToString(priv->job.active),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
|
|
|
|
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(priv->migrationPort);
|
2013-10-11 03:27:13 +00:00
|
|
|
priv->migrationPort = 0;
|
|
|
|
|
2012-03-16 06:56:19 +00:00
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN))
|
|
|
|
return;
|
|
|
|
qemuDomainObjDiscardAsyncJob(driver, vm);
|
|
|
|
}
|
|
|
|
|
2015-11-10 12:43:04 +00:00
|
|
|
static qemuProcessIncomingDefPtr
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPrepare(virDomainObjPtr vm,
|
|
|
|
bool tunnel,
|
|
|
|
const char *protocol,
|
|
|
|
const char *listenAddress,
|
|
|
|
unsigned short port,
|
|
|
|
int fd)
|
2015-11-05 14:21:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-13 09:49:39 +00:00
|
|
|
g_autofree char *migrateFrom = NULL;
|
2015-11-05 14:21:02 +00:00
|
|
|
|
|
|
|
if (tunnel) {
|
2019-10-20 11:49:46 +00:00
|
|
|
migrateFrom = g_strdup("stdio");
|
2020-09-02 10:06:12 +00:00
|
|
|
} else if (g_strcmp0(protocol, "unix") == 0) {
|
|
|
|
migrateFrom = g_strdup_printf("%s:%s", protocol, listenAddress);
|
2015-11-05 14:21:02 +00:00
|
|
|
} else {
|
|
|
|
bool encloseAddress = false;
|
|
|
|
bool hostIPv6Capable = false;
|
|
|
|
struct addrinfo *info = NULL;
|
|
|
|
struct addrinfo hints = { .ai_flags = AI_ADDRCONFIG,
|
|
|
|
.ai_socktype = SOCK_STREAM };
|
|
|
|
const char *incFormat;
|
|
|
|
|
|
|
|
if (getaddrinfo("::", NULL, &hints, &info) == 0) {
|
|
|
|
freeaddrinfo(info);
|
|
|
|
hostIPv6Capable = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (listenAddress) {
|
|
|
|
if (virSocketAddrNumericFamily(listenAddress) == AF_INET6) {
|
|
|
|
if (!hostIPv6Capable) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("host isn't capable of IPv6"));
|
2020-07-13 09:49:39 +00:00
|
|
|
return NULL;
|
2015-11-05 14:21:02 +00:00
|
|
|
}
|
|
|
|
/* IPv6 address must be escaped in brackets on the cmd line */
|
|
|
|
encloseAddress = true;
|
|
|
|
} else {
|
|
|
|
/* listenAddress is a hostname or IPv4 */
|
|
|
|
}
|
2018-03-29 10:51:55 +00:00
|
|
|
} else if (hostIPv6Capable) {
|
2015-11-05 14:21:02 +00:00
|
|
|
/* Listen on :: instead of 0.0.0.0 if QEMU understands it
|
|
|
|
* and there is at least one IPv6 address configured
|
|
|
|
*/
|
|
|
|
listenAddress = "::";
|
|
|
|
encloseAddress = true;
|
|
|
|
} else {
|
|
|
|
listenAddress = "0.0.0.0";
|
|
|
|
}
|
|
|
|
|
|
|
|
/* QEMU will be started with
|
|
|
|
* -incoming protocol:[<IPv6 addr>]:port,
|
|
|
|
* -incoming protocol:<IPv4 addr>:port, or
|
|
|
|
* -incoming protocol:<hostname>:port
|
|
|
|
*/
|
|
|
|
if (encloseAddress)
|
|
|
|
incFormat = "%s:[%s]:%d";
|
|
|
|
else
|
|
|
|
incFormat = "%s:%s:%d";
|
2019-10-22 13:26:14 +00:00
|
|
|
migrateFrom = g_strdup_printf(incFormat, protocol, listenAddress, port);
|
2015-11-05 14:21:02 +00:00
|
|
|
}
|
|
|
|
|
2020-07-13 09:49:39 +00:00
|
|
|
return qemuProcessIncomingDefNew(priv->qemuCaps, listenAddress,
|
|
|
|
migrateFrom, fd, NULL);
|
2015-11-05 14:21:02 +00:00
|
|
|
}
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
|
2018-02-26 09:20:06 +00:00
|
|
|
virConnectPtr dconn,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
virDomainDefPtr *def,
|
|
|
|
const char *origname,
|
|
|
|
virStreamPtr st,
|
|
|
|
const char *protocol,
|
|
|
|
unsigned short port,
|
|
|
|
bool autoPort,
|
|
|
|
const char *listenAddress,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-03-07 14:32:26 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
unsigned long flags)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2020-11-24 13:08:04 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2011-01-31 10:47:03 +00:00
|
|
|
virDomainObjPtr vm = NULL;
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2018-11-21 08:01:59 +00:00
|
|
|
virErrorPtr origErr;
|
2011-01-31 10:47:03 +00:00
|
|
|
int ret = -1;
|
2010-12-23 18:24:42 +00:00
|
|
|
int dataFD[2] = { -1, -1 };
|
2011-01-31 10:47:03 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = NULL;
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = NULL;
|
2011-06-13 08:48:34 +00:00
|
|
|
bool tunnel = !!st;
|
2020-07-13 09:49:40 +00:00
|
|
|
g_autofree char *xmlout = NULL;
|
2012-11-21 08:28:49 +00:00
|
|
|
unsigned int cookieFlags;
|
2017-10-03 07:59:03 +00:00
|
|
|
unsigned int startFlags;
|
2015-11-10 12:43:04 +00:00
|
|
|
qemuProcessIncomingDefPtr incoming = NULL;
|
2014-02-04 15:42:13 +00:00
|
|
|
bool taint_hook = false;
|
2015-11-10 12:43:04 +00:00
|
|
|
bool stopProcess = false;
|
|
|
|
bool relabel = false;
|
|
|
|
int rv;
|
2020-07-13 09:49:40 +00:00
|
|
|
g_autofree char *tlsAlias = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-11-05 13:28:57 +00:00
|
|
|
virNWFilterReadLockFilterUpdates();
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE) {
|
|
|
|
if (flags & (VIR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
VIR_MIGRATE_NON_SHARED_INC)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration cannot handle "
|
|
|
|
"non-shared storage"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (!(flags & VIR_MIGRATE_PERSIST_DEST)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("offline migration must be specified with "
|
|
|
|
"the persistent flag set"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (tunnel) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("tunnelled offline migration does not "
|
|
|
|
"make sense"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2015-11-10 11:56:29 +00:00
|
|
|
cookieFlags = 0;
|
|
|
|
} else {
|
2018-04-06 12:02:04 +00:00
|
|
|
cookieFlags = QEMU_MIGRATION_COOKIE_GRAPHICS |
|
|
|
|
QEMU_MIGRATION_COOKIE_CAPS;
|
2012-11-21 08:28:49 +00:00
|
|
|
}
|
|
|
|
|
2014-12-01 15:59:55 +00:00
|
|
|
if (flags & VIR_MIGRATE_POSTCOPY &&
|
|
|
|
(!(flags & VIR_MIGRATE_LIVE) ||
|
|
|
|
flags & VIR_MIGRATE_PAUSED)) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("post-copy migration is not supported with non-live "
|
|
|
|
"or paused migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-11-22 19:23:56 +00:00
|
|
|
if (flags & VIR_MIGRATE_POSTCOPY && flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("post-copy is not supported with tunnelled migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-11-24 13:08:04 +00:00
|
|
|
if (cfg->migrateTLSForce &&
|
|
|
|
!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
!(flags & VIR_MIGRATE_TLS)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("this libvirtd instance allows migration only with VIR_MIGRATE_TLS flag"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationSrcIsAllowedHostdev(*def))
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2012-02-28 12:42:42 +00:00
|
|
|
/* Let migration hook filter domain XML */
|
|
|
|
if (virHookPresent(VIR_HOOK_DRIVER_QEMU)) {
|
2020-07-13 09:49:40 +00:00
|
|
|
g_autofree char *xml = NULL;
|
2012-02-28 12:42:42 +00:00
|
|
|
int hookret;
|
|
|
|
|
2019-08-05 14:05:20 +00:00
|
|
|
if (!(xml = qemuDomainDefFormatXML(driver, NULL, *def,
|
2012-10-09 11:15:46 +00:00
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE)))
|
2012-02-28 12:42:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
hookret = virHookCall(VIR_HOOK_DRIVER_QEMU, (*def)->name,
|
2012-02-28 12:42:42 +00:00
|
|
|
VIR_HOOK_QEMU_OP_MIGRATE, VIR_HOOK_SUBOP_BEGIN,
|
|
|
|
NULL, xml, &xmlout);
|
|
|
|
|
|
|
|
if (hookret < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
} else if (hookret == 0) {
|
2014-10-22 09:27:36 +00:00
|
|
|
if (virStringIsEmpty(xmlout)) {
|
2012-02-28 12:42:42 +00:00
|
|
|
VIR_DEBUG("Migrate hook filter returned nothing; using the"
|
|
|
|
" original XML");
|
|
|
|
} else {
|
2020-07-13 09:49:40 +00:00
|
|
|
g_autoptr(virDomainDef) newdef = NULL;
|
2012-02-28 12:42:42 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("Using hook-filtered domain XML: %s", xmlout);
|
2019-11-27 12:29:21 +00:00
|
|
|
newdef = virDomainDefParseString(xmlout, driver->xmlopt, NULL,
|
2016-05-26 13:58:53 +00:00
|
|
|
VIR_DOMAIN_DEF_PARSE_INACTIVE |
|
|
|
|
VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE);
|
2012-02-28 12:42:42 +00:00
|
|
|
if (!newdef)
|
|
|
|
goto cleanup;
|
|
|
|
|
2020-07-13 09:49:40 +00:00
|
|
|
if (!qemuDomainDefCheckABIStability(driver, NULL, *def, newdef))
|
2012-02-28 12:42:42 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
virDomainDefFree(*def);
|
2020-07-13 09:49:40 +00:00
|
|
|
*def = g_steal_pointer(&newdef);
|
2014-02-04 15:42:13 +00:00
|
|
|
/* We should taint the domain here. However, @vm and therefore
|
|
|
|
* privateData too are still NULL, so just notice the fact and
|
|
|
|
* taint it later. */
|
|
|
|
taint_hook = true;
|
2012-02-28 12:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-22 10:31:00 +00:00
|
|
|
/* Parse cookie earlier than adding the domain onto the
|
|
|
|
* domain list. Parsing/validation may fail and there's no
|
|
|
|
* point in having the domain in the list at that point. */
|
2020-09-28 15:43:46 +00:00
|
|
|
if (!(mig = qemuMigrationCookieParse(driver, *def, origname, NULL,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE |
|
|
|
|
QEMU_MIGRATION_COOKIE_NBD |
|
|
|
|
QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG |
|
|
|
|
QEMU_MIGRATION_COOKIE_CPU_HOTPLUG |
|
|
|
|
QEMU_MIGRATION_COOKIE_CPU |
|
|
|
|
QEMU_MIGRATION_COOKIE_ALLOW_REBOOT |
|
|
|
|
QEMU_MIGRATION_COOKIE_CAPS)))
|
2018-11-22 10:31:00 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-07 12:12:28 +00:00
|
|
|
if (!(vm = virDomainObjListAdd(driver->domains, *def,
|
2013-03-31 18:03:42 +00:00
|
|
|
driver->xmlopt,
|
Merge virDomainObjListIsDuplicate into virDomainObjListAdd
The duplicate VM checking should be done atomically with
virDomainObjListAdd, so shoud not be a separate function.
Instead just use flags to indicate what kind of checks are
required.
This pair, used in virDomainCreateXML:
if (virDomainObjListIsDuplicate(privconn->domains, def, 1) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, false)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
This pair, used in virDomainRestoreFlags:
if (virDomainObjListIsDuplicate(privconn->domains, def, 1) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, true)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
NULL)))
goto cleanup;
This pair, used in virDomainDefineXML:
if (virDomainObjListIsDuplicate(privconn->domains, def, 0) < 0)
goto cleanup;
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def, false)))
goto cleanup;
Changes to
if (!(dom = virDomainObjListAdd(privconn->domains,
privconn->caps,
def,
0, NULL)))
goto cleanup;
2013-01-14 14:46:58 +00:00
|
|
|
VIR_DOMAIN_OBJ_LIST_ADD_LIVE |
|
|
|
|
VIR_DOMAIN_OBJ_LIST_ADD_CHECK_LIVE,
|
|
|
|
NULL)))
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
2013-06-07 12:12:28 +00:00
|
|
|
*def = NULL;
|
2018-04-23 14:40:48 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv = priv->job.privateData;
|
2019-10-20 11:49:46 +00:00
|
|
|
priv->origname = g_strdup(origname);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-02-04 15:42:13 +00:00
|
|
|
if (taint_hook) {
|
|
|
|
/* Domain XML has been altered by a hook script. */
|
|
|
|
priv->hookRun = true;
|
|
|
|
}
|
|
|
|
|
2015-03-02 19:04:12 +00:00
|
|
|
if (STREQ_NULLABLE(protocol, "rdma") &&
|
|
|
|
!virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
|
2014-01-13 06:28:11 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot start RDMA migration with no memory hard "
|
|
|
|
"limit set"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
|
|
|
|
nmigrate_disks, migrate_disks,
|
|
|
|
!!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
|
2014-11-25 13:19:07 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-03-21 12:01:59 +00:00
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
|
|
|
flags) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
2011-07-19 00:27:31 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PREPARE);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
/* Domain starts inactive, even if the domain XML had an id field. */
|
|
|
|
vm->def->id = -1;
|
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE)
|
|
|
|
goto done;
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
if (tunnel &&
|
2020-01-24 15:22:12 +00:00
|
|
|
virPipe(dataFD) < 0)
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2017-10-03 07:59:03 +00:00
|
|
|
startFlags = VIR_QEMU_PROCESS_START_AUTODESTROY;
|
|
|
|
|
2017-05-31 10:34:10 +00:00
|
|
|
if (qemuProcessInit(driver, vm, mig->cpu, QEMU_ASYNC_JOB_MIGRATION_IN,
|
2017-10-03 07:59:03 +00:00
|
|
|
true, startFlags) < 0)
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2015-11-10 12:43:04 +00:00
|
|
|
stopProcess = true;
|
2015-11-05 14:21:02 +00:00
|
|
|
|
2017-10-13 10:06:54 +00:00
|
|
|
priv->allowReboot = mig->allowReboot;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
|
|
|
|
listenAddress, port,
|
|
|
|
dataFD[0])))
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2015-11-10 12:43:04 +00:00
|
|
|
|
2018-02-09 16:36:24 +00:00
|
|
|
if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
|
2016-03-15 12:00:59 +00:00
|
|
|
goto stopjob;
|
|
|
|
|
2017-10-03 08:14:21 +00:00
|
|
|
if (qemuProcessPrepareHost(driver, vm, startFlags) < 0)
|
2016-03-22 12:16:05 +00:00
|
|
|
goto stopjob;
|
|
|
|
|
2018-02-26 09:20:06 +00:00
|
|
|
rv = qemuProcessLaunch(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
2015-11-10 12:43:04 +00:00
|
|
|
incoming, NULL,
|
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_START,
|
2017-10-03 07:59:03 +00:00
|
|
|
startFlags);
|
2015-11-10 12:43:04 +00:00
|
|
|
if (rv < 0) {
|
|
|
|
if (rv == -2)
|
|
|
|
relabel = true;
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2015-11-10 12:43:04 +00:00
|
|
|
relabel = true;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
if (tunnel) {
|
|
|
|
if (virFDStreamOpen(st, dataFD[1]) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot pass pipe for tunnelled migration"));
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-06-13 08:48:34 +00:00
|
|
|
dataFD[1] = -1; /* 'st' owns the FD now & will close it */
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2014-11-20 12:44:18 +00:00
|
|
|
if (STREQ_NULLABLE(protocol, "rdma") &&
|
2014-01-13 06:28:11 +00:00
|
|
|
virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2014-01-13 06:28:11 +00:00
|
|
|
}
|
|
|
|
|
2018-03-07 09:45:18 +00:00
|
|
|
if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
2018-04-06 12:02:04 +00:00
|
|
|
migParams, mig->caps->automatic) < 0)
|
2018-03-07 13:43:23 +00:00
|
|
|
goto stopjob;
|
|
|
|
|
|
|
|
/* Migrations using TLS need to add the "tls-creds-x509" object and
|
|
|
|
* set the migration TLS parameters */
|
|
|
|
if (flags & VIR_MIGRATE_TLS) {
|
|
|
|
if (qemuMigrationParamsEnableTLS(driver, vm, true,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN,
|
2018-05-29 17:56:05 +00:00
|
|
|
&tlsAlias, NULL,
|
2018-03-07 13:43:23 +00:00
|
|
|
migParams) < 0)
|
|
|
|
goto stopjob;
|
|
|
|
} else {
|
|
|
|
if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
|
|
|
|
goto stopjob;
|
|
|
|
}
|
|
|
|
|
2018-02-28 13:44:12 +00:00
|
|
|
if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
|
|
|
migParams) < 0)
|
2016-06-20 14:27:07 +00:00
|
|
|
goto stopjob;
|
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if (mig->nbd &&
|
|
|
|
flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
|
2018-02-21 15:55:15 +00:00
|
|
|
const char *nbdTLSAlias = NULL;
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_TLS) {
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_TLS)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("QEMU NBD server does not support TLS transport"));
|
|
|
|
goto stopjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
nbdTLSAlias = tlsAlias;
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
|
|
|
|
nmigrate_disks, migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
nbdPort, nbdURI,
|
|
|
|
nbdTLSAlias) < 0) {
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
2013-05-28 19:27:45 +00:00
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
2013-01-29 12:38:50 +00:00
|
|
|
}
|
|
|
|
|
2015-11-10 11:56:29 +00:00
|
|
|
if (mig->lockState) {
|
|
|
|
VIR_DEBUG("Received lockstate %s", mig->lockState);
|
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
priv->lockState = mig->lockState;
|
|
|
|
mig->lockState = NULL;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Received no lockstate");
|
|
|
|
}
|
|
|
|
|
2015-11-10 12:43:04 +00:00
|
|
|
if (incoming->deferredURI &&
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstRun(driver, vm, incoming->deferredURI,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
2015-11-10 12:43:04 +00:00
|
|
|
goto stopjob;
|
|
|
|
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
2015-11-10 12:43:04 +00:00
|
|
|
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
|
|
|
|
goto stopjob;
|
|
|
|
|
2015-11-10 11:56:29 +00:00
|
|
|
done:
|
2020-09-28 14:49:28 +00:00
|
|
|
if (qemuMigrationCookieFormat(mig, driver, vm,
|
|
|
|
QEMU_MIGRATION_DESTINATION,
|
|
|
|
cookieout, cookieoutlen, cookieFlags) < 0) {
|
2011-01-24 18:06:16 +00:00
|
|
|
/* We could tear down the whole guest here, but
|
|
|
|
* cookie data is (so far) non-critical, so that
|
|
|
|
* seems a little harsh. We'll just warn for now.
|
|
|
|
*/
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuDomainCleanupAdd(vm, qemuMigrationDstPrepareCleanup) < 0)
|
2015-11-10 11:41:01 +00:00
|
|
|
goto stopjob;
|
2012-03-16 06:56:19 +00:00
|
|
|
|
2012-11-21 08:28:49 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE)) {
|
|
|
|
virDomainAuditStart(vm, "migrated", true);
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2012-11-21 08:28:49 +00:00
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
/* We keep the job active across API calls until the finish() call.
|
|
|
|
* This prevents any other APIs being invoked while incoming
|
|
|
|
* migration is taking place.
|
|
|
|
*/
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobContinue(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-10-11 03:27:13 +00:00
|
|
|
if (autoPort)
|
|
|
|
priv->migrationPort = port;
|
2016-03-17 14:58:48 +00:00
|
|
|
/* in this case port is not auto selected and we don't need to manage it
|
|
|
|
* anymore after cookie is baked
|
|
|
|
*/
|
|
|
|
if (nbdPort != 0)
|
|
|
|
priv->nbdPort = 0;
|
2011-07-19 00:27:31 +00:00
|
|
|
ret = 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2018-11-21 08:01:59 +00:00
|
|
|
virErrorPreserveLast(&origErr);
|
2015-11-10 12:43:04 +00:00
|
|
|
qemuProcessIncomingDefFree(incoming);
|
2010-12-23 18:24:42 +00:00
|
|
|
VIR_FORCE_CLOSE(dataFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(dataFD[1]);
|
2014-12-23 04:32:45 +00:00
|
|
|
if (ret < 0 && priv) {
|
|
|
|
/* priv is set right after vm is added to the list of domains
|
|
|
|
* and there is no 'goto cleanup;' in the middle of those */
|
2015-01-13 02:15:52 +00:00
|
|
|
VIR_FREE(priv->origname);
|
2016-03-17 14:58:48 +00:00
|
|
|
/* release if port is auto selected which is not the case if
|
|
|
|
* it is given in parameters
|
|
|
|
*/
|
|
|
|
if (nbdPort == 0)
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(priv->nbdPort);
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
priv->nbdPort = 0;
|
2016-09-08 13:22:28 +00:00
|
|
|
virDomainObjRemoveTransientDef(vm);
|
2017-08-15 07:12:43 +00:00
|
|
|
qemuDomainRemoveInactiveJob(driver, vm);
|
2011-09-26 11:36:37 +00:00
|
|
|
}
|
2015-04-23 15:27:58 +00:00
|
|
|
virDomainObjEndAPI(&vm);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-01-24 18:06:16 +00:00
|
|
|
qemuMigrationCookieFree(mig);
|
2014-11-05 13:28:57 +00:00
|
|
|
virNWFilterUnlockFilterUpdates();
|
2018-11-21 08:01:59 +00:00
|
|
|
virErrorRestore(&origErr);
|
2011-01-31 10:47:03 +00:00
|
|
|
return ret;
|
2011-07-19 00:27:31 +00:00
|
|
|
|
2015-11-10 11:41:01 +00:00
|
|
|
stopjob:
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, priv->job.apiFlags);
|
2017-03-03 12:22:16 +00:00
|
|
|
|
2015-11-10 12:43:04 +00:00
|
|
|
if (stopProcess) {
|
|
|
|
unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
|
|
|
|
if (!relabel)
|
|
|
|
stopFlags |= VIR_QEMU_PROCESS_STOP_NO_RELABEL;
|
2015-11-10 11:41:01 +00:00
|
|
|
virDomainAuditStart(vm, "migrated", false);
|
2016-02-11 10:20:28 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN, stopFlags);
|
2015-11-10 11:41:01 +00:00
|
|
|
}
|
2013-01-14 11:45:20 +00:00
|
|
|
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2011-07-19 00:27:31 +00:00
|
|
|
goto cleanup;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-06-13 08:48:34 +00:00
|
|
|
/*
|
|
|
|
* This version starts an empty VM listening on a localhost TCP port, and
|
|
|
|
* sets up the corresponding virStream to handle the incoming data.
|
|
|
|
*/
|
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
|
2018-02-26 09:20:06 +00:00
|
|
|
virConnectPtr dconn,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
virStreamPtr st,
|
|
|
|
virDomainDefPtr *def,
|
|
|
|
const char *origname,
|
2018-03-07 14:32:26 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
unsigned long flags)
|
2011-06-13 08:48:34 +00:00
|
|
|
{
|
2018-02-26 09:20:06 +00:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
|
2013-04-29 10:29:02 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, st=%p, def=%p, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"origname=%s, flags=0x%lx",
|
2018-02-26 09:20:06 +00:00
|
|
|
driver, dconn, NULLSTR(cookiein), cookieinlen,
|
2013-09-03 13:17:03 +00:00
|
|
|
cookieout, cookieoutlen, st, *def, origname, flags);
|
2011-06-13 08:48:34 +00:00
|
|
|
|
2013-06-25 12:38:05 +00:00
|
|
|
if (st == NULL) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("tunnelled migration requested but NULL stream passed"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-03-12 15:22:11 +00:00
|
|
|
return qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen, def, origname,
|
|
|
|
st, NULL, 0, false, NULL, 0, NULL, 0,
|
2020-08-24 13:42:31 +00:00
|
|
|
NULL, migParams, flags);
|
2011-06-13 08:48:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-09-15 16:13:38 +00:00
|
|
|
static virURIPtr
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyParseURI(const char *uri, bool *wellFormed)
|
2014-09-15 16:13:38 +00:00
|
|
|
{
|
|
|
|
char *tmp = NULL;
|
|
|
|
virURIPtr parsed;
|
|
|
|
|
|
|
|
/* For compatibility reasons tcp://... URIs are sent as tcp:...
|
|
|
|
* We need to transform them to a well-formed URI before parsing. */
|
|
|
|
if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri + 4, "//")) {
|
2019-10-22 13:26:14 +00:00
|
|
|
tmp = g_strdup_printf("tcp://%s", uri + 4);
|
2014-09-15 16:13:38 +00:00
|
|
|
uri = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
parsed = virURIParse(uri);
|
|
|
|
if (parsed && wellFormed)
|
|
|
|
*wellFormed = !tmp;
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
|
|
|
return parsed;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
|
2018-02-26 09:20:06 +00:00
|
|
|
virConnectPtr dconn,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
const char *uri_in,
|
|
|
|
char **uri_out,
|
|
|
|
virDomainDefPtr *def,
|
|
|
|
const char *origname,
|
|
|
|
const char *listenAddress,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-03-07 14:32:26 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
unsigned long flags)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2013-10-11 03:27:13 +00:00
|
|
|
unsigned short port = 0;
|
|
|
|
bool autoPort = true;
|
2020-07-13 09:49:45 +00:00
|
|
|
g_autofree char *hostname = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
int ret = -1;
|
2020-07-13 09:49:45 +00:00
|
|
|
g_autoptr(virURI) uri = NULL;
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2014-05-20 06:08:05 +00:00
|
|
|
const char *migrateHost = cfg->migrateHost;
|
2011-06-01 10:35:18 +00:00
|
|
|
|
2018-02-26 09:20:06 +00:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
|
2011-05-20 10:03:04 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
|
2015-06-15 22:42:10 +00:00
|
|
|
"def=%p, origname=%s, listenAddress=%s, "
|
2020-08-24 13:42:31 +00:00
|
|
|
"nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
|
|
|
|
"nbdURI=%s, flags=0x%lx",
|
2018-02-26 09:20:06 +00:00
|
|
|
driver, dconn, NULLSTR(cookiein), cookieinlen,
|
2011-05-20 10:03:04 +00:00
|
|
|
cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
|
2015-06-15 22:42:10 +00:00
|
|
|
*def, origname, NULLSTR(listenAddress),
|
2020-08-24 13:42:31 +00:00
|
|
|
nmigrate_disks, migrate_disks, nbdPort, NULLSTR(nbdURI),
|
|
|
|
flags);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-06-25 12:38:05 +00:00
|
|
|
*uri_out = NULL;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* The URI passed in may be NULL or a string "tcp://somehostname:port".
|
|
|
|
*
|
|
|
|
* If the URI passed in is NULL then we allocate a port number
|
2014-05-20 06:08:05 +00:00
|
|
|
* from our pool of port numbers, and if the migrateHost is configured,
|
|
|
|
* we return a URI of "tcp://migrateHost:port", otherwise return a URI
|
|
|
|
* of "tcp://ourhostname:port".
|
2011-01-31 10:47:03 +00:00
|
|
|
*
|
|
|
|
* If the URI passed in is not NULL then we try to parse out the
|
|
|
|
* port number and use that (note that the hostname is assumed
|
|
|
|
* to be a correct hostname which refers to the target machine).
|
|
|
|
*/
|
|
|
|
if (uri_in == NULL) {
|
2014-10-07 04:07:30 +00:00
|
|
|
bool encloseAddress = false;
|
|
|
|
const char *incFormat;
|
|
|
|
|
2013-10-31 11:49:04 +00:00
|
|
|
if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
|
2013-10-11 03:27:13 +00:00
|
|
|
goto cleanup;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-05-20 06:08:05 +00:00
|
|
|
if (migrateHost != NULL) {
|
2014-10-07 04:07:30 +00:00
|
|
|
if (virSocketAddrNumericFamily(migrateHost) == AF_INET6)
|
|
|
|
encloseAddress = true;
|
2014-05-20 06:08:05 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
hostname = g_strdup(migrateHost);
|
2014-05-20 06:08:05 +00:00
|
|
|
} else {
|
|
|
|
if ((hostname = virGetHostname()) == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
if (STRPREFIX(hostname, "localhost")) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("hostname on destination resolved to localhost,"
|
|
|
|
" but migration requires an FQDN"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX this really should have been a properly well-formed
|
|
|
|
* URI, but we can't add in tcp:// now without breaking
|
2012-02-03 18:20:22 +00:00
|
|
|
* compatibility with old targets. We at least make the
|
2011-01-31 10:47:03 +00:00
|
|
|
* new targets accept both syntaxes though.
|
|
|
|
*/
|
2014-10-07 04:07:30 +00:00
|
|
|
if (encloseAddress)
|
|
|
|
incFormat = "%s:[%s]:%d";
|
|
|
|
else
|
|
|
|
incFormat = "%s:%s:%d";
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
*uri_out = g_strdup_printf(incFormat, "tcp", hostname, port);
|
2011-01-31 10:47:03 +00:00
|
|
|
} else {
|
2020-08-13 14:03:44 +00:00
|
|
|
bool well_formed_uri = false;
|
2013-03-22 13:52:25 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!(uri = qemuMigrationAnyParseURI(uri_in, &well_formed_uri)))
|
2014-09-15 16:13:38 +00:00
|
|
|
goto cleanup;
|
2013-03-22 13:52:25 +00:00
|
|
|
|
2015-02-11 08:30:32 +00:00
|
|
|
if (uri->scheme == NULL) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
_("missing scheme in migration URI: %s"),
|
|
|
|
uri_in);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2014-01-13 06:28:11 +00:00
|
|
|
if (STRNEQ(uri->scheme, "tcp") &&
|
2020-09-02 10:06:12 +00:00
|
|
|
STRNEQ(uri->scheme, "rdma") &&
|
|
|
|
STRNEQ(uri->scheme, "unix")) {
|
2014-09-15 16:13:38 +00:00
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED,
|
|
|
|
_("unsupported scheme %s in migration URI %s"),
|
|
|
|
uri->scheme, uri_in);
|
2013-03-22 13:52:25 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2020-09-02 10:06:12 +00:00
|
|
|
if (STREQ(uri->scheme, "unix")) {
|
|
|
|
autoPort = false;
|
|
|
|
listenAddress = uri->path;
|
|
|
|
} else {
|
|
|
|
if (uri->server == NULL) {
|
|
|
|
virReportError(VIR_ERR_INVALID_ARG, _("missing host in migration"
|
|
|
|
" URI: %s"), uri_in);
|
2013-10-11 03:27:13 +00:00
|
|
|
goto cleanup;
|
2020-09-02 10:06:12 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2020-09-02 10:06:12 +00:00
|
|
|
if (uri->port == 0) {
|
|
|
|
if (virPortAllocatorAcquire(driver->migrationPorts, &port) < 0)
|
2013-10-23 04:15:12 +00:00
|
|
|
goto cleanup;
|
2020-09-02 10:06:12 +00:00
|
|
|
|
|
|
|
/* Send well-formed URI only if uri_in was well-formed */
|
|
|
|
if (well_formed_uri) {
|
|
|
|
uri->port = port;
|
|
|
|
if (!(*uri_out = virURIFormat(uri)))
|
|
|
|
goto cleanup;
|
|
|
|
} else {
|
|
|
|
*uri_out = g_strdup_printf("%s:%d", uri_in, port);
|
|
|
|
}
|
2013-10-23 04:15:12 +00:00
|
|
|
} else {
|
2020-09-02 10:06:12 +00:00
|
|
|
port = uri->port;
|
|
|
|
autoPort = false;
|
2013-10-23 04:15:12 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*uri_out)
|
|
|
|
VIR_DEBUG("Generated uri_out=%s", *uri_out);
|
|
|
|
|
2018-02-26 09:20:06 +00:00
|
|
|
ret = qemuMigrationDstPrepareAny(driver, dconn, cookiein, cookieinlen,
|
2018-02-12 17:11:41 +00:00
|
|
|
cookieout, cookieoutlen, def, origname,
|
|
|
|
NULL, uri ? uri->scheme : "tcp",
|
|
|
|
port, autoPort, listenAddress,
|
|
|
|
nmigrate_disks, migrate_disks, nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
nbdURI, migParams, flags);
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-10-11 03:27:13 +00:00
|
|
|
if (ret != 0) {
|
2011-01-31 10:47:03 +00:00
|
|
|
VIR_FREE(*uri_out);
|
2013-10-11 03:27:13 +00:00
|
|
|
if (autoPort)
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(port);
|
2013-10-11 03:27:13 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-25 12:38:05 +00:00
|
|
|
virDomainDefPtr
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
|
2019-08-05 15:31:10 +00:00
|
|
|
virQEMUCapsPtr qemuCaps,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *dom_xml,
|
|
|
|
const char *dname,
|
|
|
|
char **origname)
|
2013-06-25 12:38:05 +00:00
|
|
|
{
|
|
|
|
virDomainDefPtr def;
|
2013-09-03 13:17:03 +00:00
|
|
|
char *name = NULL;
|
2013-06-25 12:38:05 +00:00
|
|
|
|
|
|
|
if (!dom_xml) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("no domain XML passed"));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-11-27 12:29:21 +00:00
|
|
|
if (!(def = virDomainDefParseString(dom_xml, driver->xmlopt,
|
2019-08-05 15:31:10 +00:00
|
|
|
qemuCaps,
|
2016-05-26 13:58:53 +00:00
|
|
|
VIR_DOMAIN_DEF_PARSE_INACTIVE |
|
|
|
|
VIR_DOMAIN_DEF_PARSE_SKIP_VALIDATE)))
|
2013-06-25 12:38:05 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (dname) {
|
2013-09-03 13:17:03 +00:00
|
|
|
name = def->name;
|
2019-10-20 11:49:46 +00:00
|
|
|
def->name = g_strdup(dname);
|
2013-06-25 12:38:05 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2013-09-03 13:17:03 +00:00
|
|
|
if (def && origname)
|
|
|
|
*origname = name;
|
|
|
|
else
|
|
|
|
VIR_FREE(name);
|
2013-06-25 12:38:05 +00:00
|
|
|
return def;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-06-25 12:55:10 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
unsigned int flags,
|
|
|
|
int retcode)
|
2013-06-25 12:55:10 +00:00
|
|
|
{
|
2020-07-13 09:49:56 +00:00
|
|
|
g_autoptr(qemuMigrationCookie) mig = NULL;
|
2016-02-17 23:12:33 +00:00
|
|
|
virObjectEventPtr event;
|
2020-07-13 09:49:56 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2016-02-18 13:44:48 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
2016-02-18 13:44:48 +00:00
|
|
|
qemuDomainJobInfoPtr jobInfo = NULL;
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2018-02-12 16:50:01 +00:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"flags=0x%x, retcode=%d",
|
2018-02-12 16:50:01 +00:00
|
|
|
driver, vm, NULLSTR(cookiein), cookieinlen,
|
2013-06-25 12:55:10 +00:00
|
|
|
flags, retcode);
|
|
|
|
|
|
|
|
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
|
|
|
|
|
|
|
|
qemuMigrationJobSetPhase(driver, vm,
|
|
|
|
retcode == 0
|
|
|
|
? QEMU_MIGRATION_PHASE_CONFIRM3
|
|
|
|
: QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED);
|
|
|
|
|
2020-09-28 15:43:46 +00:00
|
|
|
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_STATS)))
|
2020-07-13 09:49:56 +00:00
|
|
|
return -1;
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2016-02-18 13:44:48 +00:00
|
|
|
if (retcode == 0)
|
|
|
|
jobInfo = priv->job.completed;
|
|
|
|
else
|
2020-03-26 16:55:00 +00:00
|
|
|
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
|
2016-02-18 13:44:48 +00:00
|
|
|
|
|
|
|
/* Update times with the values sent by the destination daemon */
|
|
|
|
if (mig->jobInfo && jobInfo) {
|
2015-11-26 14:37:23 +00:00
|
|
|
int reason;
|
|
|
|
|
|
|
|
/* We need to refresh migration statistics after a completed post-copy
|
|
|
|
* migration since priv->job.completed contains obsolete data from the
|
|
|
|
* time we switched to post-copy mode.
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
jobInfo, NULL) < 0)
|
2015-11-26 14:37:23 +00:00
|
|
|
VIR_WARN("Could not refresh migration statistics");
|
|
|
|
|
2016-02-18 13:44:48 +00:00
|
|
|
qemuDomainJobInfoUpdateTime(jobInfo);
|
|
|
|
jobInfo->timeDeltaSet = mig->jobInfo->timeDeltaSet;
|
|
|
|
jobInfo->timeDelta = mig->jobInfo->timeDelta;
|
2018-01-26 17:30:50 +00:00
|
|
|
jobInfo->stats.mig.downtime_set = mig->jobInfo->stats.mig.downtime_set;
|
|
|
|
jobInfo->stats.mig.downtime = mig->jobInfo->stats.mig.downtime;
|
2014-08-28 14:39:58 +00:00
|
|
|
}
|
|
|
|
|
2013-06-25 12:55:10 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE)
|
2020-07-13 09:49:56 +00:00
|
|
|
return 0;
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2016-01-13 15:29:58 +00:00
|
|
|
/* Did the migration go as planned? If yes, kill off the domain object.
|
|
|
|
* If something failed, resume CPUs, but only if we didn't use post-copy.
|
2013-06-25 12:55:10 +00:00
|
|
|
*/
|
|
|
|
if (retcode == 0) {
|
|
|
|
/* If guest uses SPICE and supports seamless migration we have to hold
|
|
|
|
* up domain shutdown until SPICE server transfers its data */
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcWaitForSpice(vm);
|
2013-06-25 12:55:10 +00:00
|
|
|
|
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
|
2016-02-11 10:20:28 +00:00
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2013-06-25 12:55:10 +00:00
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
|
|
|
virDomainAuditStop(vm, "migrated");
|
|
|
|
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2013-06-25 12:55:10 +00:00
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2016-02-17 23:12:33 +00:00
|
|
|
qemuDomainEventEmitJobCompleted(driver, vm);
|
2013-06-25 12:55:10 +00:00
|
|
|
} else {
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPtr orig_err;
|
2016-01-13 15:29:58 +00:00
|
|
|
int reason;
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
|
|
|
|
2013-06-25 12:55:10 +00:00
|
|
|
/* cancel any outstanding NBD jobs */
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
|
2015-04-16 09:24:23 +00:00
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2016-01-13 15:29:58 +00:00
|
|
|
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
|
2018-09-12 12:34:33 +00:00
|
|
|
reason == VIR_DOMAIN_PAUSED_POSTCOPY)
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
2018-09-12 12:34:33 +00:00
|
|
|
else
|
|
|
|
qemuMigrationSrcRestoreDomainState(driver, vm);
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, priv->job.apiFlags);
|
2017-04-05 08:24:47 +00:00
|
|
|
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
|
2013-06-25 12:55:10 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
}
|
|
|
|
|
2020-07-13 09:49:56 +00:00
|
|
|
return 0;
|
2013-06-25 12:55:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
unsigned int flags,
|
|
|
|
int cancelled)
|
2013-06-25 12:55:10 +00:00
|
|
|
{
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuMigrationJobPhase phase;
|
2020-09-04 06:59:08 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2013-06-25 12:55:10 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (cancelled)
|
|
|
|
phase = QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED;
|
|
|
|
else
|
|
|
|
phase = QEMU_MIGRATION_PHASE_CONFIRM3;
|
|
|
|
|
|
|
|
qemuMigrationJobStartPhase(driver, vm, phase);
|
2013-07-15 14:53:13 +00:00
|
|
|
virCloseCallbacksUnset(driver->closeCallbacks, vm,
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcCleanup);
|
2013-06-25 12:55:10 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcConfirmPhase(driver, vm,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
flags, cancelled);
|
2013-06-25 12:55:10 +00:00
|
|
|
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2015-09-22 13:25:00 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
2018-11-22 13:12:14 +00:00
|
|
|
if (!cancelled && ret == 0 && flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
|
2013-06-25 12:55:10 +00:00
|
|
|
virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
|
2015-10-27 08:53:59 +00:00
|
|
|
vm->persistent = 0;
|
|
|
|
}
|
2017-08-15 07:12:43 +00:00
|
|
|
qemuDomainRemoveInactiveJob(driver, vm);
|
2013-06-25 12:55:10 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-04-23 15:27:58 +00:00
|
|
|
virDomainObjEndAPI(&vm);
|
2013-06-25 12:55:10 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
enum qemuMigrationDestinationType {
|
|
|
|
MIGRATION_DEST_HOST,
|
2012-02-02 15:34:08 +00:00
|
|
|
MIGRATION_DEST_CONNECT_HOST,
|
2020-09-02 10:06:12 +00:00
|
|
|
MIGRATION_DEST_SOCKET,
|
|
|
|
MIGRATION_DEST_CONNECT_SOCKET,
|
2011-08-11 13:47:02 +00:00
|
|
|
MIGRATION_DEST_FD,
|
2011-08-11 12:36:04 +00:00
|
|
|
};
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
enum qemuMigrationForwardType {
|
|
|
|
MIGRATION_FWD_DIRECT,
|
|
|
|
MIGRATION_FWD_STREAM,
|
|
|
|
};
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
typedef struct _qemuMigrationSpec qemuMigrationSpec;
|
|
|
|
typedef qemuMigrationSpec *qemuMigrationSpecPtr;
|
|
|
|
struct _qemuMigrationSpec {
|
|
|
|
enum qemuMigrationDestinationType destType;
|
|
|
|
union {
|
|
|
|
struct {
|
2014-09-15 16:15:42 +00:00
|
|
|
const char *protocol;
|
2011-08-11 12:36:04 +00:00
|
|
|
const char *name;
|
|
|
|
int port;
|
|
|
|
} host;
|
|
|
|
|
2020-09-02 10:06:12 +00:00
|
|
|
struct {
|
|
|
|
const char *path;
|
|
|
|
} socket;
|
|
|
|
|
2011-08-11 13:47:02 +00:00
|
|
|
struct {
|
|
|
|
int qemu;
|
|
|
|
int local;
|
|
|
|
} fd;
|
2011-08-11 12:36:04 +00:00
|
|
|
} dest;
|
|
|
|
|
|
|
|
enum qemuMigrationForwardType fwdType;
|
|
|
|
union {
|
|
|
|
virStreamPtr stream;
|
|
|
|
} fwd;
|
|
|
|
};
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
#define TUNNEL_SEND_BUF_SIZE 65536
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
|
|
|
|
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
|
|
|
|
struct _qemuMigrationIOThread {
|
|
|
|
virThread thread;
|
|
|
|
virStreamPtr st;
|
|
|
|
int sock;
|
|
|
|
virError err;
|
2012-04-23 14:17:55 +00:00
|
|
|
int wakeupRecvFD;
|
|
|
|
int wakeupSendFD;
|
2011-05-09 15:52:42 +00:00
|
|
|
};
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
static void qemuMigrationSrcIOFunc(void *arg)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2011-05-09 15:52:42 +00:00
|
|
|
qemuMigrationIOThreadPtr data = arg;
|
2012-04-23 14:17:55 +00:00
|
|
|
char *buffer = NULL;
|
|
|
|
struct pollfd fds[2];
|
|
|
|
int timeout = -1;
|
|
|
|
virErrorPtr err = NULL;
|
|
|
|
|
|
|
|
VIR_DEBUG("Running migration tunnel; stream=%p, sock=%d",
|
|
|
|
data->st, data->sock);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2020-10-05 10:28:26 +00:00
|
|
|
buffer = g_new0(char, TUNNEL_SEND_BUF_SIZE);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
fds[0].fd = data->sock;
|
|
|
|
fds[1].fd = data->wakeupRecvFD;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
for (;;) {
|
2012-04-23 14:17:55 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
fds[0].events = fds[1].events = POLLIN;
|
|
|
|
fds[0].revents = fds[1].revents = 0;
|
|
|
|
|
2019-10-15 11:55:26 +00:00
|
|
|
ret = poll(fds, G_N_ELEMENTS(fds), timeout);
|
2012-04-23 14:17:55 +00:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
if (errno == EAGAIN || errno == EINTR)
|
|
|
|
continue;
|
2011-01-31 10:47:03 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
2012-04-23 14:17:55 +00:00
|
|
|
_("poll failed in migration tunnel"));
|
|
|
|
goto abrt;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2012-04-23 14:17:55 +00:00
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
/* We were asked to gracefully stop but reading would block. This
|
|
|
|
* can only happen if qemu told us migration finished but didn't
|
|
|
|
* close the migration fd. We handle this in the same way as EOF.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("QEMU forgot to close migration fd");
|
2011-01-31 10:47:03 +00:00
|
|
|
break;
|
2012-04-23 14:17:55 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
if (fds[1].revents & (POLLIN | POLLERR | POLLHUP)) {
|
|
|
|
char stop = 0;
|
|
|
|
|
|
|
|
if (saferead(data->wakeupRecvFD, &stop, 1) != 1) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("failed to read from wakeup fd"));
|
|
|
|
goto abrt;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("Migration tunnel was asked to %s",
|
|
|
|
stop ? "abort" : "finish");
|
|
|
|
if (stop) {
|
|
|
|
goto abrt;
|
|
|
|
} else {
|
|
|
|
timeout = 0;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2012-04-23 14:17:55 +00:00
|
|
|
if (fds[0].revents & (POLLIN | POLLERR | POLLHUP)) {
|
|
|
|
int nbytes;
|
|
|
|
|
|
|
|
nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
|
|
|
|
if (nbytes > 0) {
|
|
|
|
if (virStreamSend(data->st, buffer, nbytes) < 0)
|
|
|
|
goto error;
|
|
|
|
} else if (nbytes < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("tunnelled migration failed to read from qemu"));
|
|
|
|
goto abrt;
|
|
|
|
} else {
|
|
|
|
/* EOF; get out of here */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
if (virStreamFinish(data->st) < 0)
|
|
|
|
goto error;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-10-08 12:29:07 +00:00
|
|
|
VIR_FORCE_CLOSE(data->sock);
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FREE(buffer);
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
return;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
abrt:
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&err);
|
2012-04-23 14:17:55 +00:00
|
|
|
if (err && err->code == VIR_ERR_OK) {
|
|
|
|
virFreeError(err);
|
|
|
|
err = NULL;
|
|
|
|
}
|
|
|
|
virStreamAbort(data->st);
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&err);
|
2012-04-23 14:17:55 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2020-07-09 04:42:21 +00:00
|
|
|
/* Let the source qemu know that the transfer can't continue anymore.
|
2015-10-08 12:29:07 +00:00
|
|
|
* Don't copy the error for EPIPE as destination has the actual error. */
|
|
|
|
VIR_FORCE_CLOSE(data->sock);
|
|
|
|
if (!virLastErrorIsSystemErrno(EPIPE))
|
|
|
|
virCopyLastError(&data->err);
|
2011-05-09 15:52:42 +00:00
|
|
|
virResetLastError();
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FREE(buffer);
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuMigrationIOThreadPtr
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcStartTunnel(virStreamPtr st,
|
|
|
|
int sock)
|
2011-05-09 15:52:42 +00:00
|
|
|
{
|
2012-04-23 14:17:55 +00:00
|
|
|
qemuMigrationIOThreadPtr io = NULL;
|
|
|
|
int wakeupFD[2] = { -1, -1 };
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2020-01-24 15:22:12 +00:00
|
|
|
if (virPipe(wakeupFD) < 0)
|
2012-04-23 14:17:55 +00:00
|
|
|
goto error;
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2020-10-05 10:28:26 +00:00
|
|
|
io = g_new0(qemuMigrationIOThread, 1);
|
2012-04-23 14:17:55 +00:00
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
io->st = st;
|
|
|
|
io->sock = sock;
|
2012-04-23 14:17:55 +00:00
|
|
|
io->wakeupRecvFD = wakeupFD[0];
|
|
|
|
io->wakeupSendFD = wakeupFD[1];
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2020-02-14 11:20:10 +00:00
|
|
|
if (virThreadCreateFull(&io->thread, true,
|
|
|
|
qemuMigrationSrcIOFunc,
|
|
|
|
"qemu-mig-tunnel",
|
|
|
|
false,
|
|
|
|
io) < 0) {
|
2011-05-09 15:52:42 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to create migration thread"));
|
2012-04-23 14:17:55 +00:00
|
|
|
goto error;
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return io;
|
2012-04-23 14:17:55 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
error:
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FORCE_CLOSE(wakeupFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(wakeupFD[1]);
|
|
|
|
VIR_FREE(io);
|
|
|
|
return NULL;
|
2011-05-09 15:52:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcStopTunnel(qemuMigrationIOThreadPtr io, bool error)
|
2011-05-09 15:52:42 +00:00
|
|
|
{
|
|
|
|
int rv = -1;
|
2012-04-23 14:17:55 +00:00
|
|
|
char stop = error ? 1 : 0;
|
|
|
|
|
|
|
|
/* make sure the thread finishes its job and is joinable */
|
|
|
|
if (safewrite(io->wakeupSendFD, &stop, 1) != 1) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("failed to wakeup migration tunnel"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-09 15:52:42 +00:00
|
|
|
virThreadJoin(&io->thread);
|
|
|
|
|
|
|
|
/* Forward error from the IO thread, to this thread */
|
|
|
|
if (io->err.code != VIR_ERR_OK) {
|
2012-04-23 14:17:55 +00:00
|
|
|
if (error)
|
|
|
|
rv = 0;
|
|
|
|
else
|
|
|
|
virSetError(&io->err);
|
2011-05-09 15:52:42 +00:00
|
|
|
virResetError(&io->err);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rv = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2012-04-23 14:17:55 +00:00
|
|
|
VIR_FORCE_CLOSE(io->wakeupSendFD);
|
|
|
|
VIR_FORCE_CLOSE(io->wakeupRecvFD);
|
2011-05-09 15:52:42 +00:00
|
|
|
VIR_FREE(io);
|
|
|
|
return rv;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcConnect(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationSpecPtr spec)
|
2012-02-02 15:34:08 +00:00
|
|
|
{
|
|
|
|
virNetSocketPtr sock;
|
2020-08-24 20:27:30 +00:00
|
|
|
g_autofree char *port = NULL;
|
2020-08-24 20:29:14 +00:00
|
|
|
int fd_qemu = -1;
|
2012-02-02 15:34:08 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetSocketLabel(driver->securityManager, vm->def) < 0)
|
2012-02-02 15:34:08 +00:00
|
|
|
goto cleanup;
|
2020-09-02 10:06:12 +00:00
|
|
|
|
|
|
|
switch (spec->destType) {
|
|
|
|
case MIGRATION_DEST_CONNECT_HOST:
|
|
|
|
port = g_strdup_printf("%d", spec->dest.host.port);
|
|
|
|
if (virNetSocketNewConnectTCP(spec->dest.host.name,
|
|
|
|
port,
|
|
|
|
AF_UNSPEC,
|
|
|
|
&sock) == 0) {
|
|
|
|
fd_qemu = virNetSocketDupFD(sock, true);
|
|
|
|
virObjectUnref(sock);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MIGRATION_DEST_CONNECT_SOCKET:
|
|
|
|
if (virNetSocketNewConnectUNIX(spec->dest.socket.path,
|
|
|
|
false, NULL,
|
|
|
|
&sock) == 0) {
|
|
|
|
fd_qemu = virNetSocketDupFD(sock, true);
|
|
|
|
virObjectUnref(sock);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MIGRATION_DEST_HOST:
|
|
|
|
case MIGRATION_DEST_SOCKET:
|
|
|
|
case MIGRATION_DEST_FD:
|
|
|
|
break;
|
2012-02-02 15:34:08 +00:00
|
|
|
}
|
2020-08-24 20:29:14 +00:00
|
|
|
|
|
|
|
spec->destType = MIGRATION_DEST_FD;
|
|
|
|
spec->dest.fd.qemu = fd_qemu;
|
|
|
|
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecurityClearSocketLabel(driver->securityManager, vm->def) < 0 ||
|
2012-02-02 15:34:08 +00:00
|
|
|
spec->dest.fd.qemu == -1)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-03-26 14:45:16 +00:00
|
|
|
/* Migration expects a blocking FD */
|
|
|
|
if (virSetBlocking(spec->dest.fd.qemu, true) < 0) {
|
|
|
|
virReportSystemError(errno, _("Unable to set FD %d blocking"),
|
|
|
|
spec->dest.fd.qemu);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2012-02-02 15:34:08 +00:00
|
|
|
if (ret < 0)
|
|
|
|
VIR_FORCE_CLOSE(spec->dest.fd.qemu);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-10-20 08:12:21 +00:00
|
|
|
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcContinue(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMonitorMigrationStatus status,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2017-10-20 08:12:21 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ret = qemuMonitorMigrateContinue(priv->mon, status);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
ret = -1;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-25 09:55:11 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSetDBusVMState(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2021-02-05 09:48:51 +00:00
|
|
|
if (priv->dbusVMStateIds) {
|
2020-02-25 09:55:11 +00:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
if (qemuHotplugAttachDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2021-02-05 09:48:51 +00:00
|
|
|
rv = qemuMonitorSetDBusVMStateIdList(priv->mon, priv->dbusVMStateIds);
|
2020-02-25 09:55:11 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
rv = -1;
|
|
|
|
|
|
|
|
return rv;
|
|
|
|
} else {
|
|
|
|
if (qemuHotplugRemoveDBusVMState(driver, vm, QEMU_ASYNC_JOB_NONE) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcRun(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
unsigned long resource,
|
|
|
|
qemuMigrationSpecPtr spec,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *graphicsuri,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
|
|
|
const char *nbdURI)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2011-04-20 12:12:43 +00:00
|
|
|
int ret = -1;
|
2011-08-11 12:36:04 +00:00
|
|
|
unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-13 09:49:51 +00:00
|
|
|
g_autoptr(qemuMigrationCookie) mig = NULL;
|
|
|
|
g_autofree char *tlsAlias = NULL;
|
2011-05-09 15:52:42 +00:00
|
|
|
qemuMigrationIOThreadPtr iothread = NULL;
|
2020-07-13 09:49:51 +00:00
|
|
|
VIR_AUTOCLOSE fd = -1;
|
2011-08-26 18:10:27 +00:00
|
|
|
unsigned long migrate_speed = resource ? resource : priv->migMaxBandwidth;
|
2012-04-20 12:07:49 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2013-01-29 12:38:50 +00:00
|
|
|
unsigned int cookieFlags = 0;
|
2013-06-12 14:11:21 +00:00
|
|
|
bool abort_on_error = !!(flags & VIR_MIGRATE_ABORT_ON_ERROR);
|
2015-05-29 06:38:44 +00:00
|
|
|
bool events = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_EVENT);
|
2020-06-10 14:13:15 +00:00
|
|
|
bool bwParam = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
|
2017-10-19 12:15:46 +00:00
|
|
|
bool cancel = false;
|
2015-12-17 12:46:18 +00:00
|
|
|
unsigned int waitFlags;
|
2020-07-13 09:49:51 +00:00
|
|
|
g_autoptr(virDomainDef) persistDef = NULL;
|
|
|
|
g_autofree char *timestamp = NULL;
|
2014-05-22 10:29:20 +00:00
|
|
|
int rc;
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
|
2015-06-15 22:42:10 +00:00
|
|
|
"spec=%p (dest=%d, fwd=%d), dconn=%p, graphicsuri=%s, "
|
|
|
|
"nmigrate_disks=%zu, migrate_disks=%p",
|
2011-08-11 12:36:04 +00:00
|
|
|
driver, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, resource,
|
2013-06-18 10:17:18 +00:00
|
|
|
spec, spec->destType, spec->fwdType, dconn,
|
2015-06-15 22:42:10 +00:00
|
|
|
NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-01-29 12:38:50 +00:00
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_DISK) {
|
|
|
|
migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_INC) {
|
|
|
|
migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
|
|
|
|
}
|
|
|
|
|
2011-05-18 16:34:21 +00:00
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager) &&
|
|
|
|
!cookieout) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Migration with lock driver %s requires"
|
|
|
|
" cookie support"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
2011-05-18 16:34:21 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-05-29 06:38:44 +00:00
|
|
|
if (events)
|
|
|
|
priv->signalIOError = abort_on_error;
|
|
|
|
|
2016-03-17 16:31:45 +00:00
|
|
|
if (flags & VIR_MIGRATE_PERSIST_DEST) {
|
|
|
|
if (persist_xml) {
|
2019-08-05 15:31:10 +00:00
|
|
|
if (!(persistDef = qemuMigrationAnyPrepareDef(driver,
|
|
|
|
priv->qemuCaps,
|
|
|
|
persist_xml,
|
2018-02-12 17:11:41 +00:00
|
|
|
NULL, NULL)))
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2017-06-07 07:35:25 +00:00
|
|
|
} else {
|
|
|
|
virDomainDefPtr def = vm->newDef ? vm->newDef : vm->def;
|
2019-08-02 15:36:56 +00:00
|
|
|
if (!(persistDef = qemuDomainDefCopy(driver, priv->qemuCaps, def,
|
2017-05-02 16:01:04 +00:00
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE)))
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2016-03-17 16:31:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-28 15:43:46 +00:00
|
|
|
mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieFlags |
|
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS |
|
|
|
|
QEMU_MIGRATION_COOKIE_CAPS);
|
2013-01-29 12:38:50 +00:00
|
|
|
if (!mig)
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_WARN("unable to provide data for graphics client relocation");
|
|
|
|
|
2018-03-07 09:45:18 +00:00
|
|
|
if (qemuMigrationParamsCheck(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2018-04-06 12:02:04 +00:00
|
|
|
migParams, mig->caps->automatic) < 0)
|
2018-03-07 13:43:23 +00:00
|
|
|
goto error;
|
|
|
|
|
2017-02-16 20:56:10 +00:00
|
|
|
if (flags & VIR_MIGRATE_TLS) {
|
2018-02-28 11:57:19 +00:00
|
|
|
const char *hostname = NULL;
|
2017-02-16 20:56:10 +00:00
|
|
|
|
|
|
|
/* We need to add tls-hostname whenever QEMU itself does not
|
|
|
|
* connect directly to the destination. */
|
|
|
|
if (spec->destType == MIGRATION_DEST_CONNECT_HOST ||
|
2018-02-28 11:57:19 +00:00
|
|
|
spec->destType == MIGRATION_DEST_FD)
|
|
|
|
hostname = spec->dest.host.name;
|
|
|
|
|
2018-02-28 12:08:53 +00:00
|
|
|
if (qemuMigrationParamsEnableTLS(driver, vm, false,
|
2018-02-28 11:57:19 +00:00
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2018-05-29 17:56:05 +00:00
|
|
|
&tlsAlias, hostname,
|
2018-02-28 11:57:19 +00:00
|
|
|
migParams) < 0)
|
|
|
|
goto error;
|
2017-02-16 20:56:10 +00:00
|
|
|
} else {
|
2018-02-28 08:35:53 +00:00
|
|
|
if (qemuMigrationParamsDisableTLS(vm, migParams) < 0)
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2017-02-16 20:56:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-10 14:13:15 +00:00
|
|
|
if (bwParam &&
|
|
|
|
qemuMigrationParamsSetULL(migParams, QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
|
|
|
|
migrate_speed * 1024 * 1024) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2018-03-07 13:43:23 +00:00
|
|
|
if (qemuMigrationParamsApply(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
migParams) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2015-04-16 09:24:23 +00:00
|
|
|
if (migrate_flags & (QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
|
|
|
|
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
|
|
|
|
if (mig->nbd) {
|
2020-09-02 10:06:12 +00:00
|
|
|
const char *host = "";
|
|
|
|
|
|
|
|
if (spec->destType == MIGRATION_DEST_HOST ||
|
|
|
|
spec->destType == MIGRATION_DEST_CONNECT_HOST) {
|
|
|
|
host = spec->dest.host.name;
|
|
|
|
}
|
|
|
|
|
2018-04-26 13:44:26 +00:00
|
|
|
/* Currently libvirt does not support setting up of the NBD
|
|
|
|
* non-shared storage migration with TLS. As we need to honour the
|
|
|
|
* VIR_MIGRATE_TLS flag, we need to reject such migration until
|
|
|
|
* we implement TLS for NBD. */
|
2018-02-28 14:20:17 +00:00
|
|
|
if (flags & VIR_MIGRATE_TLS &&
|
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_DEL)) {
|
2018-04-26 13:44:26 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("NBD migration with TLS is not supported"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2015-04-16 09:24:23 +00:00
|
|
|
/* This will update migrate_flags on success */
|
2018-02-22 15:11:59 +00:00
|
|
|
if (qemuMigrationSrcNBDStorageCopy(driver, vm, mig,
|
2020-09-02 10:06:12 +00:00
|
|
|
host,
|
2018-02-22 15:11:59 +00:00
|
|
|
migrate_speed,
|
|
|
|
&migrate_flags,
|
|
|
|
nmigrate_disks,
|
|
|
|
migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
dconn, tlsAlias,
|
|
|
|
nbdURI, flags) < 0) {
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2015-04-16 09:24:23 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Destination doesn't support NBD server.
|
|
|
|
* Fall back to previous implementation. */
|
|
|
|
VIR_DEBUG("Destination doesn't support NBD server "
|
|
|
|
"Falling back to previous implementation.");
|
|
|
|
}
|
2012-11-23 14:42:51 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 09:55:11 +00:00
|
|
|
if (qemuMigrationSetDBusVMState(driver, vm) < 0)
|
|
|
|
goto exit_monitor;
|
|
|
|
|
2019-02-08 07:36:56 +00:00
|
|
|
/* Before EnterMonitor, since already qemuProcessStopCPUs does that */
|
2011-05-20 12:29:42 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_LIVE) &&
|
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
2019-02-08 07:36:56 +00:00
|
|
|
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2011-05-20 12:29:42 +00:00
|
|
|
}
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2011-06-30 09:23:50 +00:00
|
|
|
|
2015-05-15 13:59:49 +00:00
|
|
|
if (priv->job.abortJob) {
|
2012-11-08 13:49:55 +00:00
|
|
|
/* explicitly do this *after* we entered the monitor,
|
|
|
|
* as this is a critical section so we are guaranteed
|
2015-05-15 13:59:49 +00:00
|
|
|
* priv->job.abortJob will not change */
|
2017-09-01 06:49:20 +00:00
|
|
|
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED;
|
2012-11-08 13:49:55 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"),
|
|
|
|
qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
|
|
|
|
_("canceled by client"));
|
2017-10-19 12:22:53 +00:00
|
|
|
goto exit_monitor;
|
2012-11-08 13:49:55 +00:00
|
|
|
}
|
|
|
|
|
2020-06-10 14:13:15 +00:00
|
|
|
if (!bwParam &&
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon, migrate_speed) < 0)
|
2014-12-16 09:40:58 +00:00
|
|
|
goto exit_monitor;
|
2011-04-20 12:12:43 +00:00
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
/* connect to the destination qemu if needed */
|
2020-09-02 10:06:12 +00:00
|
|
|
if ((spec->destType == MIGRATION_DEST_CONNECT_HOST ||
|
|
|
|
spec->destType == MIGRATION_DEST_CONNECT_SOCKET) &&
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcConnect(driver, vm, spec) < 0) {
|
2014-12-16 09:40:58 +00:00
|
|
|
goto exit_monitor;
|
2012-02-15 15:48:54 +00:00
|
|
|
}
|
2012-02-02 15:34:08 +00:00
|
|
|
|
2016-06-07 15:02:14 +00:00
|
|
|
/* log start of migration */
|
2020-07-13 09:49:51 +00:00
|
|
|
if ((timestamp = virTimeStringNow()) != NULL)
|
2016-06-07 15:02:14 +00:00
|
|
|
qemuDomainLogAppendMessage(driver, vm, "%s: initiating migration\n", timestamp);
|
|
|
|
|
2017-10-19 12:21:00 +00:00
|
|
|
rc = -1;
|
2011-08-11 12:36:04 +00:00
|
|
|
switch (spec->destType) {
|
|
|
|
case MIGRATION_DEST_HOST:
|
2014-01-13 06:28:11 +00:00
|
|
|
if (STREQ(spec->dest.host.protocol, "rdma") &&
|
|
|
|
virProcessSetMaxMemLock(vm->pid, vm->def->mem.hard_limit << 10) < 0) {
|
2014-12-16 09:40:58 +00:00
|
|
|
goto exit_monitor;
|
2014-01-13 06:28:11 +00:00
|
|
|
}
|
2017-10-19 12:21:00 +00:00
|
|
|
rc = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
|
|
|
|
spec->dest.host.protocol,
|
|
|
|
spec->dest.host.name,
|
|
|
|
spec->dest.host.port);
|
2011-08-11 12:36:04 +00:00
|
|
|
break;
|
|
|
|
|
2020-09-02 10:06:12 +00:00
|
|
|
case MIGRATION_DEST_SOCKET:
|
|
|
|
qemuSecurityDomainSetPathLabel(driver, vm, spec->dest.socket.path, false);
|
|
|
|
rc = qemuMonitorMigrateToSocket(priv->mon, migrate_flags,
|
|
|
|
spec->dest.socket.path);
|
|
|
|
break;
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
case MIGRATION_DEST_CONNECT_HOST:
|
2020-09-02 10:06:12 +00:00
|
|
|
case MIGRATION_DEST_CONNECT_SOCKET:
|
2012-02-02 15:34:08 +00:00
|
|
|
/* handled above and transformed into MIGRATION_DEST_FD */
|
|
|
|
break;
|
|
|
|
|
2011-08-11 13:47:02 +00:00
|
|
|
case MIGRATION_DEST_FD:
|
2012-05-30 09:20:44 +00:00
|
|
|
if (spec->fwdType != MIGRATION_FWD_DIRECT) {
|
2011-08-11 13:47:02 +00:00
|
|
|
fd = spec->dest.fd.local;
|
2012-05-30 09:20:44 +00:00
|
|
|
spec->dest.fd.local = -1;
|
|
|
|
}
|
2017-10-19 12:21:00 +00:00
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon, migrate_flags,
|
|
|
|
spec->dest.fd.qemu);
|
2011-08-11 13:47:02 +00:00
|
|
|
VIR_FORCE_CLOSE(spec->dest.fd.qemu);
|
|
|
|
break;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2017-10-19 12:21:00 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
/* From this point onwards we *must* call cancel to abort the
|
|
|
|
* migration on source if anything goes wrong */
|
2017-10-19 12:15:46 +00:00
|
|
|
cancel = true;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-10-08 12:29:07 +00:00
|
|
|
if (spec->fwdType != MIGRATION_FWD_DIRECT) {
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!(iothread = qemuMigrationSrcStartTunnel(spec->fwd.stream, fd)))
|
2017-10-19 12:15:46 +00:00
|
|
|
goto error;
|
2015-10-08 12:29:07 +00:00
|
|
|
/* If we've created a tunnel, then the 'fd' will be closed in the
|
|
|
|
* qemuMigrationIOFunc as data->sock.
|
|
|
|
*/
|
|
|
|
fd = -1;
|
|
|
|
}
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2017-10-20 08:12:21 +00:00
|
|
|
waitFlags = QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
|
2015-12-17 12:46:18 +00:00
|
|
|
if (abort_on_error)
|
|
|
|
waitFlags |= QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR;
|
|
|
|
if (mig->nbd)
|
|
|
|
waitFlags |= QEMU_MIGRATION_COMPLETED_CHECK_STORAGE;
|
|
|
|
if (flags & VIR_MIGRATE_POSTCOPY)
|
|
|
|
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
dconn, waitFlags);
|
2017-10-19 12:15:46 +00:00
|
|
|
if (rc == -2) {
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2017-10-19 12:15:46 +00:00
|
|
|
} else if (rc == -1) {
|
|
|
|
/* QEMU reported failed migration, nothing to cancel anymore */
|
|
|
|
cancel = false;
|
|
|
|
goto error;
|
|
|
|
}
|
2015-11-26 14:37:23 +00:00
|
|
|
|
2016-01-05 21:19:28 +00:00
|
|
|
/* When migration completed, QEMU will have paused the CPUs for us.
|
2019-06-14 19:13:58 +00:00
|
|
|
* Wait for the STOP event to be processed to release the lock state.
|
2011-06-02 15:40:33 +00:00
|
|
|
*/
|
2019-06-14 19:13:58 +00:00
|
|
|
while (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
priv->signalStop = true;
|
|
|
|
rc = virDomainObjWait(vm);
|
|
|
|
priv->signalStop = false;
|
|
|
|
if (rc < 0)
|
|
|
|
goto error;
|
2011-06-02 15:40:33 +00:00
|
|
|
}
|
2017-10-18 13:34:31 +00:00
|
|
|
|
2018-03-08 16:34:34 +00:00
|
|
|
if (mig->nbd &&
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDCopyCancel(driver, vm, true,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
dconn) < 0)
|
2017-10-19 12:15:46 +00:00
|
|
|
goto error;
|
2017-10-18 13:34:31 +00:00
|
|
|
|
2017-10-20 08:12:21 +00:00
|
|
|
/* When migration was paused before serializing device state we need to
|
|
|
|
* resume it now once we finished all block jobs and wait for the real
|
|
|
|
* end of the migration.
|
|
|
|
*/
|
|
|
|
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationSrcContinue(driver, vm,
|
|
|
|
QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
2017-10-20 08:12:21 +00:00
|
|
|
goto error;
|
|
|
|
|
|
|
|
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
rc = qemuMigrationSrcWaitForCompletion(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
dconn, waitFlags);
|
2017-10-20 08:12:21 +00:00
|
|
|
if (rc == -2) {
|
|
|
|
goto error;
|
|
|
|
} else if (rc == -1) {
|
|
|
|
/* QEMU reported failed migration, nothing to cancel anymore */
|
|
|
|
cancel = false;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-18 13:34:31 +00:00
|
|
|
if (iothread) {
|
|
|
|
qemuMigrationIOThreadPtr io;
|
|
|
|
|
2019-10-16 11:43:18 +00:00
|
|
|
io = g_steal_pointer(&iothread);
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationSrcStopTunnel(io, false) < 0)
|
2017-10-19 12:15:46 +00:00
|
|
|
goto error;
|
2017-10-18 13:34:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->job.completed) {
|
2016-02-23 09:47:01 +00:00
|
|
|
priv->job.completed->stopped = priv->job.current->stopped;
|
2017-10-18 13:34:31 +00:00
|
|
|
qemuDomainJobInfoUpdateTime(priv->job.completed);
|
|
|
|
qemuDomainJobInfoUpdateDowntime(priv->job.completed);
|
|
|
|
ignore_value(virTimeMillisNow(&priv->job.completed->sent));
|
|
|
|
}
|
|
|
|
|
|
|
|
cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK |
|
|
|
|
QEMU_MIGRATION_COOKIE_STATS;
|
|
|
|
|
|
|
|
if (qemuMigrationCookieAddPersistent(mig, &persistDef) < 0 ||
|
2020-09-28 14:49:28 +00:00
|
|
|
qemuMigrationCookieFormat(mig, driver, vm,
|
|
|
|
QEMU_MIGRATION_SOURCE,
|
|
|
|
cookieout, cookieoutlen, cookieFlags) < 0) {
|
2017-10-18 13:34:31 +00:00
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
}
|
2011-06-02 15:40:33 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
ret = 0;
|
2011-05-09 15:52:42 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2015-05-29 06:38:44 +00:00
|
|
|
if (events)
|
|
|
|
priv->signalIOError = false;
|
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2012-04-20 12:07:49 +00:00
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
return ret;
|
|
|
|
|
2017-10-19 13:09:25 +00:00
|
|
|
error:
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2017-10-19 12:15:46 +00:00
|
|
|
|
2018-04-27 14:20:15 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
if (cancel &&
|
|
|
|
priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED &&
|
|
|
|
qemuDomainObjEnterMonitorAsync(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
|
|
|
qemuMonitorMigrateCancel(priv->mon);
|
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
|
|
|
}
|
2017-10-19 13:09:25 +00:00
|
|
|
|
2018-04-27 14:20:15 +00:00
|
|
|
/* cancel any outstanding NBD jobs */
|
|
|
|
if (mig && mig->nbd)
|
2018-02-22 15:11:59 +00:00
|
|
|
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
dconn);
|
2018-04-27 14:20:15 +00:00
|
|
|
|
|
|
|
if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
|
|
|
|
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
|
|
|
|
}
|
2017-10-19 13:09:25 +00:00
|
|
|
|
|
|
|
if (iothread)
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcStopTunnel(iothread, true);
|
2017-10-19 13:09:25 +00:00
|
|
|
|
|
|
|
goto cleanup;
|
|
|
|
|
2014-12-16 09:40:58 +00:00
|
|
|
exit_monitor:
|
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
2017-10-19 13:09:25 +00:00
|
|
|
goto error;
|
2011-08-11 12:36:04 +00:00
|
|
|
}
|
|
|
|
|
2014-09-15 16:15:42 +00:00
|
|
|
/* Perform migration using QEMU's native migrate support,
|
2011-08-11 12:36:04 +00:00
|
|
|
* not encrypted obviously
|
|
|
|
*/
|
2018-02-12 17:11:41 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *uri,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
unsigned long resource,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *graphicsuri,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
|
|
|
const char *nbdURI)
|
2011-08-11 12:36:04 +00:00
|
|
|
{
|
2011-08-11 13:47:02 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-13 09:49:50 +00:00
|
|
|
g_autoptr(virURI) uribits = NULL;
|
2011-08-11 13:47:02 +00:00
|
|
|
int ret = -1;
|
2011-08-11 12:36:04 +00:00
|
|
|
qemuMigrationSpec spec;
|
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
|
2015-06-15 22:42:10 +00:00
|
|
|
"graphicsuri=%s, nmigrate_disks=%zu migrate_disks=%p",
|
2011-08-11 12:36:04 +00:00
|
|
|
driver, vm, uri, NULLSTR(cookiein), cookieinlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
cookieout, cookieoutlen, flags, resource,
|
2015-06-15 22:42:10 +00:00
|
|
|
NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
|
2011-08-11 12:36:04 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!(uribits = qemuMigrationAnyParseURI(uri, NULL)))
|
2011-08-11 12:36:04 +00:00
|
|
|
return -1;
|
|
|
|
|
2015-02-11 08:30:32 +00:00
|
|
|
if (uribits->scheme == NULL) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("missing scheme in migration URI: %s"),
|
|
|
|
uri);
|
2020-07-13 09:49:50 +00:00
|
|
|
return -1;
|
2015-02-11 08:30:32 +00:00
|
|
|
}
|
|
|
|
|
2014-01-13 06:28:11 +00:00
|
|
|
if (STREQ(uribits->scheme, "rdma")) {
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("outgoing RDMA migration is not supported "
|
|
|
|
"with this QEMU binary"));
|
2020-07-13 09:49:50 +00:00
|
|
|
return -1;
|
2014-01-13 06:28:11 +00:00
|
|
|
}
|
2015-03-02 19:04:12 +00:00
|
|
|
if (!virMemoryLimitIsSet(vm->def->mem.hard_limit)) {
|
2014-01-13 06:28:11 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("cannot start RDMA migration with no memory hard "
|
|
|
|
"limit set"));
|
2020-07-13 09:49:50 +00:00
|
|
|
return -1;
|
2014-01-13 06:28:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-02 10:06:12 +00:00
|
|
|
if (STREQ(uribits->scheme, "unix")) {
|
|
|
|
if ((flags & VIR_MIGRATE_TLS) &&
|
|
|
|
!qemuMigrationParamsTLSHostnameIsSet(migParams)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
|
|
|
_("Explicit destination hostname is required "
|
|
|
|
"for TLS migration over UNIX socket"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_PARALLEL)
|
|
|
|
spec.destType = MIGRATION_DEST_SOCKET;
|
|
|
|
else
|
|
|
|
spec.destType = MIGRATION_DEST_CONNECT_SOCKET;
|
|
|
|
|
|
|
|
spec.dest.socket.path = uribits->path;
|
|
|
|
} else {
|
|
|
|
/* RDMA and multi-fd migration requires QEMU to connect to the destination
|
|
|
|
* itself.
|
|
|
|
*/
|
|
|
|
if (STREQ(uribits->scheme, "rdma") || (flags & VIR_MIGRATE_PARALLEL))
|
|
|
|
spec.destType = MIGRATION_DEST_HOST;
|
|
|
|
else
|
|
|
|
spec.destType = MIGRATION_DEST_CONNECT_HOST;
|
|
|
|
|
|
|
|
spec.dest.host.protocol = uribits->scheme;
|
|
|
|
spec.dest.host.name = uribits->server;
|
|
|
|
spec.dest.host.port = uribits->port;
|
|
|
|
}
|
|
|
|
|
2012-02-02 15:34:08 +00:00
|
|
|
spec.fwdType = MIGRATION_FWD_DIRECT;
|
2011-08-11 13:47:02 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
|
|
|
|
cookieoutlen, flags, resource, &spec, dconn,
|
|
|
|
graphicsuri, nmigrate_disks, migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
migParams, nbdURI);
|
2011-08-11 13:47:02 +00:00
|
|
|
|
|
|
|
if (spec.destType == MIGRATION_DEST_FD)
|
|
|
|
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
|
|
|
|
|
2011-08-11 12:36:04 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virStreamPtr st,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
unsigned long resource,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *graphicsuri,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
2018-02-21 16:22:29 +00:00
|
|
|
qemuMigrationParamsPtr migParams)
|
2011-08-11 12:36:04 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
qemuMigrationSpec spec;
|
2015-11-06 16:50:26 +00:00
|
|
|
int fds[2] = { -1, -1 };
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=0x%lx, resource=%lu, "
|
2015-06-15 22:42:10 +00:00
|
|
|
"graphicsuri=%s, nmigrate_disks=%zu, migrate_disks=%p",
|
2011-08-11 12:36:04 +00:00
|
|
|
driver, vm, st, NULLSTR(cookiein), cookieinlen,
|
2013-06-18 10:17:18 +00:00
|
|
|
cookieout, cookieoutlen, flags, resource,
|
2015-06-15 22:42:10 +00:00
|
|
|
NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
|
2011-08-11 12:36:04 +00:00
|
|
|
|
|
|
|
spec.fwdType = MIGRATION_FWD_STREAM;
|
|
|
|
spec.fwd.stream = st;
|
|
|
|
|
2011-08-11 13:47:02 +00:00
|
|
|
|
2015-11-06 16:50:26 +00:00
|
|
|
spec.destType = MIGRATION_DEST_FD;
|
|
|
|
spec.dest.fd.qemu = -1;
|
|
|
|
spec.dest.fd.local = -1;
|
2011-08-11 13:47:02 +00:00
|
|
|
|
2020-01-24 15:22:12 +00:00
|
|
|
if (virPipe(fds) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
spec.dest.fd.qemu = fds[1];
|
|
|
|
spec.dest.fd.local = fds[0];
|
|
|
|
|
2015-11-06 16:50:26 +00:00
|
|
|
if (spec.dest.fd.qemu == -1 ||
|
2017-02-13 13:36:53 +00:00
|
|
|
qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
|
|
|
|
spec.dest.fd.qemu) < 0) {
|
2015-11-06 16:50:26 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot create pipe for tunnelled migration"));
|
|
|
|
goto cleanup;
|
2011-08-11 13:47:02 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, resource, &spec,
|
|
|
|
dconn, graphicsuri, nmigrate_disks, migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
migParams, NULL);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2016-12-20 07:05:49 +00:00
|
|
|
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
|
|
|
|
VIR_FORCE_CLOSE(spec.dest.fd.local);
|
2011-04-20 12:12:43 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* This is essentially a re-impl of virDomainMigrateVersion2
|
|
|
|
* from libvirt.c, but running in source libvirtd context,
|
|
|
|
* instead of client app context & also adding in tunnel
|
|
|
|
* handling */
|
2018-02-12 17:11:41 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr sconn,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *dconnuri,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
2018-03-07 14:32:26 +00:00
|
|
|
unsigned long resource,
|
|
|
|
qemuMigrationParamsPtr migParams)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
virDomainPtr ddomain = NULL;
|
|
|
|
char *uri_out = NULL;
|
2011-01-24 18:06:16 +00:00
|
|
|
char *cookie = NULL;
|
2011-04-20 12:48:58 +00:00
|
|
|
char *dom_xml = NULL;
|
|
|
|
int cookielen = 0, ret;
|
|
|
|
virErrorPtr orig_err = NULL;
|
2013-05-24 10:14:02 +00:00
|
|
|
bool cancelled;
|
2011-04-20 12:48:58 +00:00
|
|
|
virStreamPtr st = NULL;
|
2013-07-04 17:58:51 +00:00
|
|
|
unsigned long destflags;
|
2013-06-12 14:11:22 +00:00
|
|
|
|
2011-05-20 10:03:04 +00:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"flags=0x%lx, dname=%s, resource=%lu",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, sconn, dconn, vm, NULLSTR(dconnuri),
|
|
|
|
flags, NULLSTR(dname), resource);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* In version 2 of the protocol, the prepare step is slightly
|
|
|
|
* different. We fetch the domain XML of the source domain
|
|
|
|
* and pass it to Prepare2.
|
|
|
|
*/
|
|
|
|
if (!(dom_xml = qemuDomainFormatXML(driver, vm,
|
2012-10-08 09:58:05 +00:00
|
|
|
QEMU_DOMAIN_FORMAT_LIVE_FLAGS |
|
|
|
|
VIR_DOMAIN_XML_MIGRATABLE)))
|
2011-04-20 12:48:58 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
|
|
|
|
flags |= VIR_MIGRATE_PAUSED;
|
|
|
|
|
2014-02-06 23:44:36 +00:00
|
|
|
destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
|
|
|
|
VIR_MIGRATE_AUTO_CONVERGE);
|
2013-07-04 17:58:51 +00:00
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
VIR_DEBUG("Prepare2 %p", dconn);
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
/*
|
|
|
|
* Tunnelled Migrate Version 2 does not support cookies
|
|
|
|
* due to missing parameters in the prepareTunnel() API.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!(st = virStreamNew(dconn, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-04-20 12:48:58 +00:00
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel
|
2013-06-12 14:11:22 +00:00
|
|
|
(dconn, st, destflags, dname, resource, dom_xml);
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, true) < 0)
|
|
|
|
goto cleanup;
|
2011-04-20 12:48:58 +00:00
|
|
|
} else {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-04-20 12:48:58 +00:00
|
|
|
ret = dconn->driver->domainMigratePrepare2
|
|
|
|
(dconn, &cookie, &cookielen, NULL, &uri_out,
|
2013-06-12 14:11:22 +00:00
|
|
|
destflags, dname, resource, dom_xml);
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, true) < 0)
|
|
|
|
goto cleanup;
|
2011-04-20 12:48:58 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(dom_xml);
|
|
|
|
if (ret == -1)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
(uri_out == NULL)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("domainMigratePrepare2 did not set uri"));
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = true;
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2011-04-20 12:48:58 +00:00
|
|
|
goto finish;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* Perform the migration. The driver isn't supposed to return
|
|
|
|
* until the migration is complete.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Perform %p", sconn);
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
|
2011-04-20 12:48:58 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED)
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL,
|
|
|
|
NULL, 0, NULL, NULL,
|
|
|
|
flags, resource, dconn,
|
2018-03-12 14:20:54 +00:00
|
|
|
NULL, 0, NULL, migParams);
|
2011-04-20 12:48:58 +00:00
|
|
|
else
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out,
|
|
|
|
cookie, cookielen,
|
|
|
|
NULL, NULL, /* No out cookie with v2 migration */
|
|
|
|
flags, resource, dconn, NULL, 0, NULL,
|
2020-08-24 13:42:31 +00:00
|
|
|
migParams, NULL);
|
2011-04-20 12:48:58 +00:00
|
|
|
|
|
|
|
/* Perform failed. Make sure Finish doesn't overwrite the error */
|
|
|
|
if (ret < 0)
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-04-20 12:48:58 +00:00
|
|
|
/* If Perform returns < 0, then we need to cancel the VM
|
|
|
|
* startup on the destination
|
|
|
|
*/
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = ret < 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
finish:
|
2011-04-20 12:48:58 +00:00
|
|
|
/* In version 2 of the migration protocol, we pass the
|
|
|
|
* status code from the sender to the destination host,
|
|
|
|
* so it can do any cleanup if the migration failed.
|
|
|
|
*/
|
2011-01-31 10:47:03 +00:00
|
|
|
dname = dname ? dname : vm->def->name;
|
2011-04-20 12:48:58 +00:00
|
|
|
VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
ddomain = dconn->driver->domainMigrateFinish2
|
2011-04-20 12:48:58 +00:00
|
|
|
(dconn, dname, cookie, cookielen,
|
2013-06-12 14:11:22 +00:00
|
|
|
uri_out ? uri_out : dconnuri, destflags, cancelled);
|
2018-06-28 09:38:52 +00:00
|
|
|
/* The domain is already gone at this point */
|
|
|
|
ignore_value(qemuDomainObjExitRemote(vm, false));
|
2013-12-28 13:40:10 +00:00
|
|
|
if (cancelled && ddomain)
|
|
|
|
VIR_ERROR(_("finish step ignored that migration was cancelled"));
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2011-04-20 12:48:58 +00:00
|
|
|
if (ddomain) {
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(ddomain);
|
2011-04-20 12:48:58 +00:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -1;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(st);
|
2011-04-20 12:48:58 +00:00
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2011-04-20 12:48:58 +00:00
|
|
|
VIR_FREE(uri_out);
|
2011-01-24 18:06:16 +00:00
|
|
|
VIR_FREE(cookie);
|
2011-04-20 12:48:58 +00:00
|
|
|
|
|
|
|
return ret;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
/* This is essentially a re-impl of virDomainMigrateVersion3
|
|
|
|
* from libvirt.c, but running in source libvirtd context,
|
|
|
|
* instead of client app context & also adding in tunnel
|
|
|
|
* handling */
|
2013-06-25 13:49:21 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr sconn,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
const char *dconnuri,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *dname,
|
|
|
|
const char *uri,
|
|
|
|
const char *graphicsuri,
|
|
|
|
const char *listenAddress,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-02-21 16:22:29 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
unsigned long long bandwidth,
|
|
|
|
bool useParams,
|
|
|
|
unsigned long flags)
|
2011-02-03 11:09:28 +00:00
|
|
|
{
|
|
|
|
virDomainPtr ddomain = NULL;
|
|
|
|
char *uri_out = NULL;
|
|
|
|
char *cookiein = NULL;
|
|
|
|
char *cookieout = NULL;
|
|
|
|
char *dom_xml = NULL;
|
|
|
|
int cookieinlen = 0;
|
|
|
|
int cookieoutlen = 0;
|
|
|
|
int ret = -1;
|
|
|
|
virErrorPtr orig_err = NULL;
|
2013-06-25 13:49:21 +00:00
|
|
|
bool cancelled = true;
|
2011-02-03 11:09:28 +00:00
|
|
|
virStreamPtr st = NULL;
|
2013-07-04 17:58:51 +00:00
|
|
|
unsigned long destflags;
|
2013-06-25 13:49:21 +00:00
|
|
|
virTypedParameterPtr params = NULL;
|
|
|
|
int nparams = 0;
|
|
|
|
int maxparams = 0;
|
2015-06-15 22:42:10 +00:00
|
|
|
size_t i;
|
2018-06-28 09:38:52 +00:00
|
|
|
bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
|
2013-06-25 13:49:21 +00:00
|
|
|
|
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, dconnuri=%s, vm=%p, xmlin=%s, "
|
2013-10-08 12:41:44 +00:00
|
|
|
"dname=%s, uri=%s, graphicsuri=%s, listenAddress=%s, "
|
2020-11-24 23:19:41 +00:00
|
|
|
"nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, nbdURI=%s, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"bandwidth=%llu, useParams=%d, flags=0x%lx",
|
2013-06-25 13:49:21 +00:00
|
|
|
driver, sconn, dconn, NULLSTR(dconnuri), vm, NULLSTR(xmlin),
|
2013-10-08 12:41:44 +00:00
|
|
|
NULLSTR(dname), NULLSTR(uri), NULLSTR(graphicsuri),
|
2016-03-17 14:58:48 +00:00
|
|
|
NULLSTR(listenAddress), nmigrate_disks, migrate_disks, nbdPort,
|
2020-11-24 23:19:41 +00:00
|
|
|
NULLSTR(nbdURI), bandwidth, useParams, flags);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Unlike the virDomainMigrateVersion3 counterpart, we don't need
|
|
|
|
* to worry about auto-setting the VIR_MIGRATE_CHANGE_PROTECTION
|
|
|
|
* bit here, because we are already running inside the context of
|
|
|
|
* a single job. */
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
dom_xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
|
|
|
|
&cookieout, &cookieoutlen,
|
|
|
|
nmigrate_disks, migrate_disks, flags);
|
2011-02-03 11:09:28 +00:00
|
|
|
if (!dom_xml)
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
if (useParams) {
|
|
|
|
if (virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_XML, dom_xml) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (dname &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_NAME, dname) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (uri &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_URI, uri) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (bandwidth &&
|
|
|
|
virTypedParamsAddULLong(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_BANDWIDTH,
|
|
|
|
bandwidth) < 0)
|
|
|
|
goto cleanup;
|
2013-06-18 10:17:18 +00:00
|
|
|
|
|
|
|
if (graphicsuri &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_GRAPHICS_URI,
|
|
|
|
graphicsuri) < 0)
|
|
|
|
goto cleanup;
|
2013-10-08 12:41:44 +00:00
|
|
|
if (listenAddress &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_LISTEN_ADDRESS,
|
|
|
|
listenAddress) < 0)
|
|
|
|
goto cleanup;
|
2015-06-15 22:42:10 +00:00
|
|
|
for (i = 0; i < nmigrate_disks; i++)
|
|
|
|
if (virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_MIGRATE_DISKS,
|
|
|
|
migrate_disks[i]) < 0)
|
|
|
|
goto cleanup;
|
2016-03-17 14:58:48 +00:00
|
|
|
if (nbdPort &&
|
|
|
|
virTypedParamsAddInt(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_DISKS_PORT,
|
|
|
|
nbdPort) < 0)
|
|
|
|
goto cleanup;
|
2020-08-24 13:42:31 +00:00
|
|
|
if (nbdURI &&
|
|
|
|
virTypedParamsAddString(¶ms, &nparams, &maxparams,
|
|
|
|
VIR_MIGRATE_PARAM_DISKS_URI,
|
|
|
|
nbdURI) < 0)
|
|
|
|
goto cleanup;
|
2016-04-14 10:33:48 +00:00
|
|
|
|
2018-03-12 14:50:06 +00:00
|
|
|
if (qemuMigrationParamsDump(migParams, ¶ms, &nparams,
|
|
|
|
&maxparams, &flags) < 0)
|
2016-04-14 10:33:48 +00:00
|
|
|
goto cleanup;
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
|
|
|
|
flags |= VIR_MIGRATE_PAUSED;
|
|
|
|
|
2014-02-06 23:44:36 +00:00
|
|
|
destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
|
|
|
|
VIR_MIGRATE_AUTO_CONVERGE);
|
2013-07-04 17:58:51 +00:00
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_DEBUG("Prepare3 %p", dconn);
|
2019-10-16 11:43:18 +00:00
|
|
|
cookiein = g_steal_pointer(&cookieout);
|
2011-02-03 11:09:28 +00:00
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieoutlen = 0;
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
if (!(st = virStreamNew(dconn, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2013-06-25 13:49:21 +00:00
|
|
|
if (useParams) {
|
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel3Params
|
|
|
|
(dconn, st, params, nparams, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen, destflags);
|
|
|
|
} else {
|
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel3
|
|
|
|
(dconn, st, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
|
|
|
destflags, dname, bandwidth, dom_xml);
|
|
|
|
}
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, !offline) < 0)
|
|
|
|
goto cleanup;
|
2011-02-03 11:09:28 +00:00
|
|
|
} else {
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2013-06-25 13:49:21 +00:00
|
|
|
if (useParams) {
|
|
|
|
ret = dconn->driver->domainMigratePrepare3Params
|
|
|
|
(dconn, params, nparams, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen, &uri_out, destflags);
|
|
|
|
} else {
|
|
|
|
ret = dconn->driver->domainMigratePrepare3
|
|
|
|
(dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
|
|
|
uri, &uri_out, destflags, dname, bandwidth, dom_xml);
|
|
|
|
}
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, !offline) < 0)
|
|
|
|
goto cleanup;
|
2011-02-03 11:09:28 +00:00
|
|
|
}
|
|
|
|
VIR_FREE(dom_xml);
|
|
|
|
if (ret == -1)
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-06-28 09:38:52 +00:00
|
|
|
if (offline) {
|
2012-11-21 08:28:49 +00:00
|
|
|
VIR_DEBUG("Offline migration, skipping Perform phase");
|
|
|
|
VIR_FREE(cookieout);
|
|
|
|
cookieoutlen = 0;
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = false;
|
2012-11-21 08:28:49 +00:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
if (uri_out) {
|
|
|
|
uri = uri_out;
|
|
|
|
if (useParams &&
|
|
|
|
virTypedParamsReplaceString(¶ms, &nparams,
|
2013-12-28 13:40:10 +00:00
|
|
|
VIR_MIGRATE_PARAM_URI, uri_out) < 0) {
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2013-06-25 13:49:21 +00:00
|
|
|
goto finish;
|
2013-12-28 13:40:10 +00:00
|
|
|
}
|
2013-06-25 13:49:21 +00:00
|
|
|
} else if (!uri && !(flags & VIR_MIGRATE_TUNNELLED)) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("domainMigratePrepare3 did not set uri"));
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2011-02-03 11:09:28 +00:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the migration. The driver isn't supposed to return
|
|
|
|
* until the migration is complete. The src VM should remain
|
|
|
|
* running, but in paused state until the destination can
|
|
|
|
* confirm migration completion.
|
|
|
|
*/
|
2013-06-25 13:49:21 +00:00
|
|
|
VIR_DEBUG("Perform3 %p uri=%s", sconn, NULLSTR(uri));
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_FREE(cookiein);
|
2019-10-16 11:43:18 +00:00
|
|
|
cookiein = g_steal_pointer(&cookieout);
|
2011-02-03 11:09:28 +00:00
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieoutlen = 0;
|
2013-06-25 13:49:21 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
|
|
|
flags, bandwidth, dconn, graphicsuri,
|
2018-03-12 14:20:54 +00:00
|
|
|
nmigrate_disks, migrate_disks,
|
2018-02-12 17:11:41 +00:00
|
|
|
migParams);
|
2013-06-25 13:49:21 +00:00
|
|
|
} else {
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
|
|
|
flags, bandwidth, dconn, graphicsuri,
|
2018-03-12 14:20:54 +00:00
|
|
|
nmigrate_disks, migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
migParams, nbdURI);
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
/* Perform failed. Make sure Finish doesn't overwrite the error */
|
2011-07-19 00:27:32 +00:00
|
|
|
if (ret < 0) {
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2011-07-19 00:27:32 +00:00
|
|
|
} else {
|
|
|
|
qemuMigrationJobSetPhase(driver, vm,
|
|
|
|
QEMU_MIGRATION_PHASE_PERFORM3_DONE);
|
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
/* If Perform returns < 0, then we need to cancel the VM
|
|
|
|
* startup on the destination
|
|
|
|
*/
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = ret < 0;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
finish:
|
2011-02-03 11:09:28 +00:00
|
|
|
/*
|
|
|
|
* The status code from the source is passed to the destination.
|
|
|
|
* The dest can cleanup in the source indicated it failed to
|
|
|
|
* send all migration data. Returns NULL for ddomain if
|
|
|
|
* the dest was unable to complete migration.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
|
|
|
|
VIR_FREE(cookiein);
|
2019-10-16 11:43:18 +00:00
|
|
|
cookiein = g_steal_pointer(&cookieout);
|
2011-02-03 11:09:28 +00:00
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieoutlen = 0;
|
2013-06-25 13:49:21 +00:00
|
|
|
|
|
|
|
if (useParams) {
|
|
|
|
if (virTypedParamsGetString(params, nparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_NAME, NULL) <= 0 &&
|
|
|
|
virTypedParamsReplaceString(¶ms, &nparams,
|
|
|
|
VIR_MIGRATE_PARAM_DEST_NAME,
|
|
|
|
vm->def->name) < 0) {
|
|
|
|
ddomain = NULL;
|
|
|
|
} else {
|
|
|
|
qemuDomainObjEnterRemote(vm);
|
|
|
|
ddomain = dconn->driver->domainMigrateFinish3Params
|
|
|
|
(dconn, params, nparams, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen, destflags, cancelled);
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, !offline) < 0)
|
|
|
|
goto cleanup;
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dname = dname ? dname : vm->def->name;
|
|
|
|
qemuDomainObjEnterRemote(vm);
|
|
|
|
ddomain = dconn->driver->domainMigrateFinish3
|
|
|
|
(dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
|
|
|
dconnuri, uri, destflags, cancelled);
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, !offline) < 0)
|
|
|
|
goto cleanup;
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
2015-07-02 19:46:56 +00:00
|
|
|
|
|
|
|
if (cancelled) {
|
|
|
|
if (ddomain) {
|
|
|
|
VIR_ERROR(_("finish step ignored that migration was cancelled"));
|
|
|
|
} else {
|
|
|
|
/* If Finish reported a useful error, use it instead of the
|
|
|
|
* original "migration unexpectedly failed" error.
|
|
|
|
*
|
|
|
|
* This is ugly but we can't do better with the APIs we have. We
|
|
|
|
* only replace the error if Finish was called with cancelled == 1
|
|
|
|
* and reported a real error (old libvirt would report an error
|
|
|
|
* from RPC instead of MIGRATE_FINISH_OK), which only happens when
|
|
|
|
* the domain died on destination. To further reduce a possibility
|
|
|
|
* of false positives we also check that Perform returned
|
|
|
|
* VIR_ERR_OPERATION_FAILED.
|
|
|
|
*/
|
|
|
|
if (orig_err &&
|
|
|
|
orig_err->domain == VIR_FROM_QEMU &&
|
|
|
|
orig_err->code == VIR_ERR_OPERATION_FAILED) {
|
|
|
|
virErrorPtr err = virGetLastError();
|
2015-09-01 10:47:55 +00:00
|
|
|
if (err &&
|
|
|
|
err->domain == VIR_FROM_QEMU &&
|
2015-07-02 19:46:56 +00:00
|
|
|
err->code != VIR_ERR_MIGRATE_FINISH_OK) {
|
|
|
|
virFreeError(orig_err);
|
|
|
|
orig_err = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-03 11:09:28 +00:00
|
|
|
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 12:05:33 +00:00
|
|
|
/* If ddomain is NULL, then we were unable to start
|
|
|
|
* the guest on the target, and must restart on the
|
|
|
|
* source. There is a small chance that the ddomain
|
|
|
|
* is NULL due to an RPC failure, in which case
|
|
|
|
* ddomain could in fact be running on the dest.
|
|
|
|
* The lock manager plugins should take care of
|
|
|
|
* safety in this scenario.
|
2011-02-03 11:09:28 +00:00
|
|
|
*/
|
2013-05-24 10:14:02 +00:00
|
|
|
cancelled = ddomain == NULL;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-05-23 16:48:36 +00:00
|
|
|
/* If finish3 set an error, and we don't have an earlier
|
|
|
|
* one we need to preserve it in case confirm3 overwrites
|
|
|
|
*/
|
|
|
|
if (!orig_err)
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2011-05-23 16:48:36 +00:00
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
/*
|
|
|
|
* If cancelled, then src VM will be restarted, else
|
|
|
|
* it will be killed
|
|
|
|
*/
|
2012-08-02 10:11:20 +00:00
|
|
|
VIR_DEBUG("Confirm3 %p cancelled=%d vm=%p", sconn, cancelled, vm);
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_FREE(cookiein);
|
2019-10-16 11:43:18 +00:00
|
|
|
cookiein = g_steal_pointer(&cookieout);
|
2011-02-03 11:09:28 +00:00
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieoutlen = 0;
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcConfirmPhase(driver, vm,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
flags, cancelled);
|
2011-02-03 11:09:28 +00:00
|
|
|
/* If Confirm3 returns -1, there's nothing more we can
|
|
|
|
* do, but fortunately worst case is that there is a
|
|
|
|
* domain left in 'paused' state on source.
|
|
|
|
*/
|
2011-08-16 09:24:25 +00:00
|
|
|
if (ret < 0)
|
|
|
|
VIR_WARN("Guest %s probably left in 'paused' state on source",
|
|
|
|
vm->def->name);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (ddomain) {
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(ddomain);
|
2011-02-03 11:09:28 +00:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
Convert public datatypes to inherit from virObject
This converts the following public API datatypes to use the
virObject infrastructure:
virConnectPtr
virDomainPtr
virDomainSnapshotPtr
virInterfacePtr
virNetworkPtr
virNodeDevicePtr
virNWFilterPtr
virSecretPtr
virStreamPtr
virStorageVolPtr
virStoragePoolPtr
The code is significantly simplified, since the mutex in the
virConnectPtr object now only needs to be held when accessing
the per-connection virError object instance. All other operations
are completely lock free.
* src/datatypes.c, src/datatypes.h, src/libvirt.c: Convert
public datatypes to use virObject
* src/conf/domain_event.c, src/phyp/phyp_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c, src/storage/storage_driver.c,
src/vbox/vbox_tmpl.c, src/xen/xend_internal.c,
tests/qemuxml2argvtest.c, tests/qemuxmlnstest.c,
tests/sexpr2xmltest.c, tests/xmconfigtest.c: Convert
to use virObjectUnref/virObjectRef
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-31 16:55:36 +00:00
|
|
|
virObjectUnref(st);
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_FREE(uri_out);
|
|
|
|
VIR_FREE(cookiein);
|
|
|
|
VIR_FREE(cookieout);
|
2013-06-25 13:49:21 +00:00
|
|
|
virTypedParamsFree(params, nparams);
|
2011-02-03 11:09:28 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-05-29 06:38:44 +00:00
|
|
|
static void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcConnectionClosed(virConnectPtr conn,
|
|
|
|
int reason,
|
|
|
|
void *opaque)
|
2015-05-29 06:38:44 +00:00
|
|
|
{
|
|
|
|
virDomainObjPtr vm = opaque;
|
|
|
|
|
|
|
|
VIR_DEBUG("conn=%p, reason=%d, vm=%s", conn, reason, vm->def->name);
|
|
|
|
virDomainObjBroadcast(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-04-11 19:17:47 +00:00
|
|
|
static int virConnectCredType[] = {
|
|
|
|
VIR_CRED_AUTHNAME,
|
|
|
|
VIR_CRED_PASSPHRASE,
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static virConnectAuth virConnectAuthConfig = {
|
|
|
|
.credtype = virConnectCredType,
|
2019-10-15 11:55:26 +00:00
|
|
|
.ncredtype = G_N_ELEMENTS(virConnectCredType),
|
2014-04-11 19:17:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
static int
|
|
|
|
qemuMigrationSrcPerformPeer2Peer(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr sconn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *dconnuri,
|
|
|
|
const char *uri,
|
|
|
|
const char *graphicsuri,
|
|
|
|
const char *listenAddress,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-02-21 16:22:29 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource,
|
|
|
|
bool *v3proto)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
int ret = -1;
|
2020-07-13 09:49:52 +00:00
|
|
|
g_autoptr(virConnect) dconn = NULL;
|
2020-12-17 12:28:25 +00:00
|
|
|
int p2p;
|
2011-07-19 14:51:08 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2018-06-28 09:38:52 +00:00
|
|
|
bool offline = !!(flags & VIR_MIGRATE_OFFLINE);
|
2021-01-06 15:40:51 +00:00
|
|
|
int dstOffline = 0;
|
2020-07-13 09:49:52 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2020-12-17 12:28:25 +00:00
|
|
|
int useParams;
|
|
|
|
int rc;
|
2011-07-19 14:51:08 +00:00
|
|
|
|
2015-06-15 22:42:10 +00:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, uri=%s, "
|
|
|
|
"graphicsuri=%s, listenAddress=%s, nmigrate_disks=%zu, "
|
2020-08-24 13:42:31 +00:00
|
|
|
"migrate_disks=%p, nbdPort=%d, nbdURI=%s, flags=0x%lx, "
|
|
|
|
"dname=%s, resource=%lu",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
|
2013-10-08 12:41:44 +00:00
|
|
|
NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
|
2020-08-24 13:42:31 +00:00
|
|
|
nmigrate_disks, migrate_disks, nbdPort, NULLSTR(nbdURI),
|
|
|
|
flags, NULLSTR(dname), resource);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-04-30 13:56:07 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED && uri) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("migration URI is not supported by tunnelled "
|
|
|
|
"migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED && listenAddress) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("listen address is not supported by tunnelled "
|
|
|
|
"migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2016-03-17 14:58:48 +00:00
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED && nbdPort) {
|
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("disk port address is not supported by tunnelled "
|
|
|
|
"migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
/* the order of operations is important here; we make sure the
|
|
|
|
* destination side is completely setup before we touch the source
|
|
|
|
*/
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2014-04-11 19:17:47 +00:00
|
|
|
dconn = virConnectOpenAuth(dconnuri, &virConnectAuthConfig, 0);
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, !offline) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
if (dconn == NULL) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
2013-05-28 19:12:01 +00:00
|
|
|
_("Failed to connect to remote libvirt URI %s: %s"),
|
|
|
|
dconnuri, virGetLastErrorMessage());
|
2011-01-31 10:47:03 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-01-10 21:03:14 +00:00
|
|
|
if (virConnectSetKeepAlive(dconn, cfg->keepAliveInterval,
|
|
|
|
cfg->keepAliveCount) < 0)
|
2011-09-16 11:50:56 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (virConnectRegisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed,
|
2015-05-29 06:38:44 +00:00
|
|
|
vm, NULL) < 0) {
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_P2P);
|
2020-12-17 12:28:25 +00:00
|
|
|
if (p2p < 0)
|
|
|
|
goto cleanup;
|
|
|
|
/* v3proto reflects whether the caller used Perform3, but with
|
|
|
|
* p2p migrate, regardless of whether Perform2 or Perform3
|
|
|
|
* were used, we decide protocol based on what target supports
|
|
|
|
*/
|
|
|
|
rc = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_V3);
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
*v3proto = !!rc;
|
2013-06-25 13:49:21 +00:00
|
|
|
useParams = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_PARAMS);
|
2020-12-17 12:28:25 +00:00
|
|
|
if (useParams < 0)
|
|
|
|
goto cleanup;
|
|
|
|
if (offline) {
|
2018-06-28 12:09:47 +00:00
|
|
|
dstOffline = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_OFFLINE);
|
2020-12-17 12:28:25 +00:00
|
|
|
if (dstOffline < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2018-06-28 09:38:52 +00:00
|
|
|
if (qemuDomainObjExitRemote(vm, !offline) < 0)
|
|
|
|
goto cleanup;
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
if (!p2p) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Destination libvirt does not support peer-to-peer migration protocol"));
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
/* Only xmlin, dname, uri, and bandwidth parameters can be used with
|
|
|
|
* old-style APIs. */
|
2015-06-15 22:42:10 +00:00
|
|
|
if (!useParams && (graphicsuri || listenAddress || nmigrate_disks)) {
|
2013-06-25 13:49:21 +00:00
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("Migration APIs with extensible parameters are not "
|
|
|
|
"supported but extended parameters were passed"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2018-06-28 09:38:52 +00:00
|
|
|
if (offline && !dstOffline) {
|
2012-11-21 08:28:49 +00:00
|
|
|
virReportError(VIR_ERR_ARGUMENT_UNSUPPORTED, "%s",
|
|
|
|
_("offline migration is not supported by "
|
|
|
|
"the destination host"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/* Change protection is only required on the source side (us), and
|
|
|
|
* only for v3 migration when begin and perform are separate jobs.
|
|
|
|
* But peer-2-peer is already a single job, and we still want to
|
|
|
|
* talk to older destinations that would reject the flag.
|
|
|
|
* Therefore it is safe to clear the bit here. */
|
|
|
|
flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
|
|
|
|
|
2013-06-25 13:49:21 +00:00
|
|
|
if (*v3proto) {
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformPeer2Peer3(driver, sconn, dconn, dconnuri, vm, xmlin,
|
|
|
|
persist_xml, dname, uri, graphicsuri,
|
|
|
|
listenAddress, nmigrate_disks, migrate_disks,
|
2020-08-24 13:42:31 +00:00
|
|
|
nbdPort, nbdURI, migParams, resource,
|
2020-12-17 12:28:25 +00:00
|
|
|
!!useParams, flags);
|
2013-06-25 13:49:21 +00:00
|
|
|
} else {
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformPeer2Peer2(driver, sconn, dconn, vm,
|
2018-03-07 14:32:26 +00:00
|
|
|
dconnuri, flags, dname, resource,
|
2018-03-12 14:20:54 +00:00
|
|
|
migParams);
|
2013-06-25 13:49:21 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2013-02-06 18:17:20 +00:00
|
|
|
qemuDomainObjEnterRemote(vm);
|
2018-02-12 17:11:41 +00:00
|
|
|
virConnectUnregisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed);
|
2018-06-28 09:38:52 +00:00
|
|
|
ignore_value(qemuDomainObjExitRemote(vm, false));
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2011-01-31 10:47:03 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
/*
|
|
|
|
* This implements perform part of the migration protocol when migration job
|
|
|
|
* does not need to be active across several APIs, i.e., peer2peer migration or
|
|
|
|
* perform phase of v2 non-peer2peer migration.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *dconnuri,
|
|
|
|
const char *uri,
|
|
|
|
const char *graphicsuri,
|
|
|
|
const char *listenAddress,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-02-21 16:22:29 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource,
|
|
|
|
bool v3proto)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
2013-11-22 14:38:05 +00:00
|
|
|
virObjectEventPtr event = NULL;
|
2011-01-31 10:47:03 +00:00
|
|
|
int ret = -1;
|
2012-04-20 12:07:49 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2020-07-13 09:49:52 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2018-03-21 12:01:59 +00:00
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
flags) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2018-05-13 22:32:13 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_OFFLINE) && virDomainObjCheckActive(vm) < 0)
|
2011-01-31 10:47:03 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
|
2012-10-17 12:08:17 +00:00
|
|
|
goto endjob;
|
2011-06-23 10:41:57 +00:00
|
|
|
|
2017-08-17 16:36:46 +00:00
|
|
|
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
|
2019-08-13 13:17:53 +00:00
|
|
|
!qemuMigrationSrcIsSafe(vm->def, priv->qemuCaps,
|
|
|
|
nmigrate_disks, migrate_disks, flags))
|
2012-10-17 12:08:17 +00:00
|
|
|
goto endjob;
|
2012-02-21 12:20:06 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcStoreDomainState(vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformPeer2Peer(driver, conn, vm, xmlin, persist_xml,
|
|
|
|
dconnuri, uri, graphicsuri, listenAddress,
|
|
|
|
nmigrate_disks, migrate_disks, nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
nbdURI,
|
2018-03-12 14:50:06 +00:00
|
|
|
migParams, flags, dname, resource,
|
2018-02-12 17:11:41 +00:00
|
|
|
&v3proto);
|
2011-01-31 10:47:03 +00:00
|
|
|
} else {
|
2011-07-19 00:27:32 +00:00
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags, resource, NULL, NULL, 0, NULL,
|
2020-08-24 13:42:31 +00:00
|
|
|
migParams, nbdURI);
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2011-07-19 00:27:32 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2011-05-23 12:50:11 +00:00
|
|
|
/*
|
|
|
|
* In v3 protocol, the source VM is not killed off until the
|
|
|
|
* confirm step.
|
|
|
|
*/
|
2011-07-19 00:27:32 +00:00
|
|
|
if (!v3proto) {
|
2012-06-11 13:20:44 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
|
2016-02-11 10:20:28 +00:00
|
|
|
QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2012-06-11 13:20:44 +00:00
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
Move qemu_audit.h helpers into shared code
The LXC and UML drivers can both make use of auditing. Move
the qemu_audit.{c,h} files to src/conf/domain_audit.{c,h}
* src/conf/domain_audit.c: Rename from src/qemu/qemu_audit.c
* src/conf/domain_audit.h: Rename from src/qemu/qemu_audit.h
* src/Makefile.am: Remove qemu_audit.{c,h}, add domain_audit.{c,h}
* src/qemu/qemu_audit.h, src/qemu/qemu_cgroup.c,
src/qemu/qemu_command.c, src/qemu/qemu_driver.c,
src/qemu/qemu_hotplug.c, src/qemu/qemu_migration.c,
src/qemu/qemu_process.c: Update for changed audit API names
2011-07-04 10:56:13 +00:00
|
|
|
virDomainAuditStop(vm, "migrated");
|
2013-11-21 17:03:26 +00:00
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
2011-02-03 11:09:28 +00:00
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
endjob:
|
2012-04-20 12:07:49 +00:00
|
|
|
if (ret < 0)
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2012-04-20 12:07:49 +00:00
|
|
|
|
2017-04-05 08:24:47 +00:00
|
|
|
/* v2 proto has no confirm phase so we need to reset migration parameters
|
|
|
|
* here
|
|
|
|
*/
|
|
|
|
if (!v3proto && ret < 0)
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, priv->job.apiFlags);
|
2017-04-05 08:24:47 +00:00
|
|
|
|
2018-09-12 12:34:33 +00:00
|
|
|
qemuMigrationSrcRestoreDomainState(driver, vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2015-09-22 13:25:00 +00:00
|
|
|
if (!virDomainObjIsActive(vm) && ret == 0) {
|
2015-10-27 08:53:59 +00:00
|
|
|
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE) {
|
2013-01-10 21:03:14 +00:00
|
|
|
virDomainDeleteConfig(cfg->configDir, cfg->autostartDir, vm);
|
2015-10-27 08:53:59 +00:00
|
|
|
vm->persistent = 0;
|
|
|
|
}
|
2017-08-15 07:12:43 +00:00
|
|
|
qemuDomainRemoveInactiveJob(driver, vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
}
|
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2012-04-20 12:07:49 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2011-07-19 00:27:32 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This implements perform phase of v3 migration protocol.
|
|
|
|
*/
|
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *uri,
|
|
|
|
const char *graphicsuri,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
2018-02-21 16:22:29 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
2020-08-24 13:42:31 +00:00
|
|
|
unsigned long resource,
|
|
|
|
const char *nbdURI)
|
2011-07-19 00:27:32 +00:00
|
|
|
{
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
2011-07-19 00:27:32 +00:00
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
/* If we didn't start the job in the begin phase, start it now. */
|
|
|
|
if (!(flags & VIR_MIGRATE_CHANGE_PROTECTION)) {
|
2018-03-21 12:01:59 +00:00
|
|
|
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
|
|
|
flags) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return ret;
|
2011-07-19 00:27:32 +00:00
|
|
|
} else if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT)) {
|
2019-11-12 20:46:27 +00:00
|
|
|
return ret;
|
2011-07-19 00:27:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
|
2013-07-15 14:53:13 +00:00
|
|
|
virCloseCallbacksUnset(driver->closeCallbacks, vm,
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcCleanup);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags, resource, NULL, graphicsuri,
|
2020-08-24 13:42:31 +00:00
|
|
|
nmigrate_disks, migrate_disks, migParams, nbdURI);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
2014-02-06 13:30:59 +00:00
|
|
|
if (ret < 0) {
|
2018-09-12 12:34:33 +00:00
|
|
|
qemuMigrationSrcRestoreDomainState(driver, vm);
|
2011-07-19 00:27:32 +00:00
|
|
|
goto endjob;
|
2014-02-06 13:30:59 +00:00
|
|
|
}
|
2011-07-19 00:27:32 +00:00
|
|
|
|
|
|
|
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);
|
|
|
|
|
2013-07-15 14:53:13 +00:00
|
|
|
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcCleanup) < 0)
|
2012-03-19 15:48:43 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
endjob:
|
2017-04-26 19:46:28 +00:00
|
|
|
if (ret < 0) {
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, priv->job.apiFlags);
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2017-04-26 19:46:28 +00:00
|
|
|
} else {
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobContinue(vm);
|
2017-04-26 19:46:28 +00:00
|
|
|
}
|
|
|
|
|
2015-09-22 13:25:00 +00:00
|
|
|
if (!virDomainObjIsActive(vm))
|
2017-08-15 07:12:43 +00:00
|
|
|
qemuDomainRemoveInactiveJob(driver, vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcPerform(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *xmlin,
|
|
|
|
const char *persist_xml,
|
|
|
|
const char *dconnuri,
|
|
|
|
const char *uri,
|
|
|
|
const char *graphicsuri,
|
|
|
|
const char *listenAddress,
|
|
|
|
size_t nmigrate_disks,
|
|
|
|
const char **migrate_disks,
|
|
|
|
int nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
const char *nbdURI,
|
2018-02-21 16:22:29 +00:00
|
|
|
qemuMigrationParamsPtr migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource,
|
|
|
|
bool v3proto)
|
2011-07-19 00:27:32 +00:00
|
|
|
{
|
2020-11-24 13:08:04 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
|
2015-06-15 22:42:10 +00:00
|
|
|
"uri=%s, graphicsuri=%s, listenAddress=%s, "
|
2016-03-17 14:58:48 +00:00
|
|
|
"nmigrate_disks=%zu, migrate_disks=%p, nbdPort=%d, "
|
2020-08-24 13:42:31 +00:00
|
|
|
"nbdURI=%s, "
|
2013-06-18 10:17:18 +00:00
|
|
|
"cookiein=%s, cookieinlen=%d, cookieout=%p, cookieoutlen=%p, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"flags=0x%lx, dname=%s, resource=%lu, v3proto=%d",
|
2011-07-19 00:27:32 +00:00
|
|
|
driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
|
2013-10-11 12:15:32 +00:00
|
|
|
NULLSTR(uri), NULLSTR(graphicsuri), NULLSTR(listenAddress),
|
2020-08-24 13:42:31 +00:00
|
|
|
nmigrate_disks, migrate_disks, nbdPort, NULLSTR(nbdURI),
|
2016-03-17 14:58:48 +00:00
|
|
|
NULLSTR(cookiein), cookieinlen, cookieout, cookieoutlen,
|
|
|
|
flags, NULLSTR(dname), resource, v3proto);
|
2011-07-19 00:27:32 +00:00
|
|
|
|
2020-11-24 13:08:04 +00:00
|
|
|
if (cfg->migrateTLSForce &&
|
|
|
|
!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
!(flags & VIR_MIGRATE_TLS)) {
|
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
|
|
|
_("this libvirtd instance allows migration only with VIR_MIGRATE_TLS flag"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-07-19 00:27:32 +00:00
|
|
|
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
|
|
|
|
if (cookieinlen) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("received unexpected cookie with P2P migration"));
|
2011-07-19 00:27:32 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
|
|
|
|
graphicsuri, listenAddress,
|
|
|
|
nmigrate_disks, migrate_disks, nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
nbdURI, migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags, dname, resource, v3proto);
|
2011-07-19 00:27:32 +00:00
|
|
|
} else {
|
|
|
|
if (dconnuri) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
|
2011-07-19 00:27:32 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (v3proto) {
|
2018-02-12 17:11:41 +00:00
|
|
|
return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri,
|
|
|
|
graphicsuri,
|
|
|
|
nmigrate_disks, migrate_disks,
|
2018-03-12 14:20:54 +00:00
|
|
|
migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
2020-08-24 13:42:31 +00:00
|
|
|
flags, resource, nbdURI);
|
2011-07-19 00:27:32 +00:00
|
|
|
} else {
|
2018-02-12 17:11:41 +00:00
|
|
|
return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
|
|
|
|
uri, graphicsuri, listenAddress,
|
|
|
|
nmigrate_disks, migrate_disks, nbdPort,
|
2020-08-24 13:42:31 +00:00
|
|
|
nbdURI, migParams,
|
2018-02-12 17:11:41 +00:00
|
|
|
cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags,
|
|
|
|
dname, resource, v3proto);
|
2011-07-19 00:27:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2012-03-27 20:00:01 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstVPAssociatePortProfiles(virDomainDefPtr def)
|
2014-03-18 08:15:21 +00:00
|
|
|
{
|
Convert 'int i' to 'size_t i' in src/qemu files
Convert the type of loop iterators named 'i', 'j', k',
'ii', 'jj', 'kk', to be 'size_t' instead of 'int' or
'unsigned int', also santizing 'ii', 'jj', 'kk' to use
the normal 'i', 'j', 'k' naming
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2013-07-08 14:09:33 +00:00
|
|
|
size_t i;
|
2011-01-31 10:47:03 +00:00
|
|
|
int last_good_net = -1;
|
|
|
|
virDomainNetDefPtr net;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
net = def->nets[i];
|
2011-07-04 01:57:45 +00:00
|
|
|
if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
if (virNetDevVPortProfileAssociate(net->ifname,
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2012-07-17 12:07:59 +00:00
|
|
|
&net->mac,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
2012-03-06 01:12:39 +00:00
|
|
|
-1,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
def->uuid,
|
2012-03-27 20:00:01 +00:00
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH,
|
|
|
|
false) < 0) {
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("Port profile Associate failed for %s"),
|
|
|
|
net->ifname);
|
2011-01-31 10:47:03 +00:00
|
|
|
goto err_exit;
|
2012-03-27 20:00:01 +00:00
|
|
|
}
|
2015-04-07 18:40:15 +00:00
|
|
|
last_good_net = i;
|
2012-03-27 20:00:01 +00:00
|
|
|
VIR_DEBUG("Port profile Associate succeeded for %s", net->ifname);
|
2012-03-29 11:15:00 +00:00
|
|
|
|
2012-07-17 12:07:59 +00:00
|
|
|
if (virNetDevMacVLanVPortProfileRegisterCallback(net->ifname, &net->mac,
|
2012-03-29 11:15:00 +00:00
|
|
|
virDomainNetGetActualDirectDev(net), def->uuid,
|
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_CREATE))
|
|
|
|
goto err_exit;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-27 20:00:01 +00:00
|
|
|
return 0;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
err_exit:
|
2015-04-07 18:40:15 +00:00
|
|
|
for (i = 0; last_good_net != -1 && i <= last_good_net; i++) {
|
2011-01-31 10:47:03 +00:00
|
|
|
net = def->nets[i];
|
2011-07-04 01:57:45 +00:00
|
|
|
if (virDomainNetGetActualType(net) == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
ignore_value(virNetDevVPortProfileDisassociate(net->ifname,
|
2012-02-15 19:19:32 +00:00
|
|
|
virDomainNetGetActualVirtPortProfile(net),
|
2012-07-17 12:07:59 +00:00
|
|
|
&net->mac,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
virDomainNetGetActualDirectDev(net),
|
2012-03-06 01:12:39 +00:00
|
|
|
-1,
|
Rename Macvtap management APIs
In preparation for code re-organization, rename the Macvtap
management APIs to have the following patterns
virNetDevMacVLanXXXXX - macvlan/macvtap interface management
virNetDevVPortProfileXXXX - virtual port profile management
* src/util/macvtap.c, src/util/macvtap.h: Rename APIs
* src/conf/domain_conf.c, src/network/bridge_driver.c,
src/qemu/qemu_command.c, src/qemu/qemu_command.h,
src/qemu/qemu_driver.c, src/qemu/qemu_hotplug.c,
src/qemu/qemu_migration.c, src/qemu/qemu_process.c,
src/qemu/qemu_process.h: Update for renamed APIs
2011-11-02 16:51:01 +00:00
|
|
|
VIR_NETDEV_VPORT_PROFILE_OP_MIGRATE_IN_FINISH));
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
}
|
2012-03-27 20:00:01 +00:00
|
|
|
return -1;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-07-07 10:11:37 +00:00
|
|
|
static int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPersist(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr mig,
|
|
|
|
bool ignoreSaveError)
|
2015-07-07 10:11:37 +00:00
|
|
|
{
|
2020-07-13 09:49:53 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2019-08-06 11:41:42 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2015-07-07 10:11:37 +00:00
|
|
|
virDomainDefPtr vmdef;
|
2020-07-13 09:49:53 +00:00
|
|
|
g_autoptr(virDomainDef) oldDef = NULL;
|
2015-09-10 22:14:59 +00:00
|
|
|
unsigned int oldPersist = vm->persistent;
|
2015-07-07 10:11:37 +00:00
|
|
|
virObjectEventPtr event;
|
|
|
|
|
|
|
|
vm->persistent = 1;
|
2015-09-10 22:14:59 +00:00
|
|
|
oldDef = vm->newDef;
|
|
|
|
vm->newDef = qemuMigrationCookieGetPersistent(mig);
|
2015-07-07 10:11:37 +00:00
|
|
|
|
2019-11-27 12:41:59 +00:00
|
|
|
if (!(vmdef = virDomainObjGetPersistentDef(driver->xmlopt, vm,
|
2019-08-06 11:41:42 +00:00
|
|
|
priv->qemuCaps)))
|
2015-09-10 22:14:59 +00:00
|
|
|
goto error;
|
2015-07-07 10:11:37 +00:00
|
|
|
|
2020-11-03 07:16:21 +00:00
|
|
|
if (!oldPersist && qemuDomainNamePathsCleanup(cfg, vmdef->name, false) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2019-11-27 12:53:10 +00:00
|
|
|
if (virDomainDefSave(vmdef, driver->xmlopt, cfg->configDir) < 0 &&
|
2016-02-03 21:40:36 +00:00
|
|
|
!ignoreSaveError)
|
2015-09-10 22:14:59 +00:00
|
|
|
goto error;
|
2015-07-07 10:11:37 +00:00
|
|
|
|
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED,
|
2015-09-10 22:14:59 +00:00
|
|
|
oldPersist ?
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_UPDATED :
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_ADDED);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2015-07-07 10:11:37 +00:00
|
|
|
|
2020-07-13 09:49:53 +00:00
|
|
|
return 0;
|
2015-09-10 22:14:59 +00:00
|
|
|
|
|
|
|
error:
|
|
|
|
virDomainDefFree(vm->newDef);
|
|
|
|
vm->persistent = oldPersist;
|
|
|
|
vm->newDef = oldDef;
|
|
|
|
oldDef = NULL;
|
2020-07-13 09:49:53 +00:00
|
|
|
return -1;
|
2015-07-07 10:11:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-31 10:47:03 +00:00
|
|
|
virDomainPtr
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstFinish(virQEMUDriverPtr driver,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
|
|
|
unsigned long flags,
|
|
|
|
int retcode,
|
|
|
|
bool v3proto)
|
2011-01-31 10:47:03 +00:00
|
|
|
{
|
|
|
|
virDomainPtr dom = NULL;
|
2020-07-13 09:49:54 +00:00
|
|
|
g_autoptr(qemuMigrationCookie) mig = NULL;
|
2011-07-19 00:27:31 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2011-09-15 13:13:11 +00:00
|
|
|
int cookie_flags = 0;
|
2011-10-04 07:11:35 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-07-16 11:48:34 +00:00
|
|
|
qemuDomainJobPrivatePtr jobPriv = priv->job.privateData;
|
2020-07-13 09:49:54 +00:00
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
2013-11-06 03:36:57 +00:00
|
|
|
unsigned short port;
|
2015-12-15 12:19:08 +00:00
|
|
|
unsigned long long timeReceived = 0;
|
|
|
|
virObjectEventPtr event;
|
2016-02-18 13:02:15 +00:00
|
|
|
qemuDomainJobInfoPtr jobInfo = NULL;
|
2015-11-26 14:37:23 +00:00
|
|
|
bool inPostCopy = false;
|
2016-04-12 13:41:28 +00:00
|
|
|
bool doKill = true;
|
2011-07-19 00:27:31 +00:00
|
|
|
|
2011-05-20 10:03:04 +00:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
2017-09-25 10:43:33 +00:00
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=0x%lx, retcode=%d",
|
2011-05-20 10:03:04 +00:00
|
|
|
driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, retcode);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2013-11-06 03:36:57 +00:00
|
|
|
port = priv->migrationPort;
|
|
|
|
priv->migrationPort = 0;
|
|
|
|
|
2015-07-02 06:26:48 +00:00
|
|
|
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstErrorReport(driver, vm->def->name);
|
2011-01-31 10:47:03 +00:00
|
|
|
goto cleanup;
|
2015-07-02 06:26:48 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
ignore_value(virTimeMillisNow(&timeReceived));
|
|
|
|
|
2011-07-19 00:27:31 +00:00
|
|
|
qemuMigrationJobStartPhase(driver, vm,
|
|
|
|
v3proto ? QEMU_MIGRATION_PHASE_FINISH3
|
|
|
|
: QEMU_MIGRATION_PHASE_FINISH2);
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
|
2020-03-26 16:55:00 +00:00
|
|
|
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
|
2012-03-16 06:56:19 +00:00
|
|
|
|
2014-08-28 12:06:10 +00:00
|
|
|
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
|
2014-11-04 09:52:10 +00:00
|
|
|
QEMU_MIGRATION_COOKIE_STATS |
|
|
|
|
QEMU_MIGRATION_COOKIE_NBD;
|
2016-07-18 11:17:28 +00:00
|
|
|
/* Some older versions of libvirt always send persistent XML in the cookie
|
|
|
|
* even though VIR_MIGRATE_PERSIST_DEST was not used. */
|
|
|
|
cookie_flags |= QEMU_MIGRATION_COOKIE_PERSISTENT;
|
2011-09-15 13:13:11 +00:00
|
|
|
|
2020-09-28 15:43:46 +00:00
|
|
|
if (!(mig = qemuMigrationCookieParse(driver, vm->def, priv->origname, priv,
|
|
|
|
cookiein, cookieinlen, cookie_flags)))
|
2011-07-19 00:27:31 +00:00
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-07-07 12:11:58 +00:00
|
|
|
if (flags & VIR_MIGRATE_OFFLINE) {
|
2015-12-15 12:19:08 +00:00
|
|
|
if (retcode == 0 &&
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstPersist(driver, vm, mig, false) == 0)
|
2017-03-28 15:08:03 +00:00
|
|
|
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, -1);
|
2015-12-15 12:19:08 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2015-10-01 19:54:38 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (retcode != 0) {
|
|
|
|
/* Check for a possible error on the monitor in case Finish was called
|
|
|
|
* earlier than monitor EOF handler got a chance to process the error
|
|
|
|
*/
|
2016-09-12 08:24:21 +00:00
|
|
|
qemuDomainCheckMonitor(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
|
2015-12-15 12:19:08 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstErrorReport(driver, vm->def->name);
|
2015-12-15 12:19:08 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
2015-07-07 12:42:42 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
|
2015-12-15 12:19:08 +00:00
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0)
|
2015-12-15 12:19:08 +00:00
|
|
|
VIR_WARN("unable to provide network data for relocation");
|
2012-11-27 15:34:24 +00:00
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0)
|
2015-12-15 12:19:08 +00:00
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2016-06-29 13:52:49 +00:00
|
|
|
if (qemuRefreshVirtioChannelState(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
2016-01-08 16:03:48 +00:00
|
|
|
goto endjob;
|
|
|
|
|
2016-11-16 13:43:01 +00:00
|
|
|
if (qemuConnectAgent(driver, vm) < 0)
|
|
|
|
goto endjob;
|
2016-01-08 16:03:48 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (flags & VIR_MIGRATE_PERSIST_DEST) {
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstPersist(driver, vm, mig, !v3proto) < 0) {
|
2015-12-15 12:19:08 +00:00
|
|
|
/* Hmpf. Migration was successful, but making it persistent
|
|
|
|
* was not. If we report successful, then when this domain
|
|
|
|
* shuts down, management tools are in for a surprise. On the
|
|
|
|
* other hand, if we report failure, then the management tools
|
|
|
|
* might try to restart the domain on the source side, even
|
|
|
|
* though the domain is actually running on the destination.
|
|
|
|
* Pretend success and hope that this is a rare situation and
|
|
|
|
* management tools are smart.
|
|
|
|
*
|
|
|
|
* However, in v3 protocol, the source VM is still available
|
|
|
|
* to restart during confirm() step, so we kill it off now.
|
2015-10-15 17:49:22 +00:00
|
|
|
*/
|
|
|
|
if (v3proto)
|
|
|
|
goto endjob;
|
2015-10-02 10:08:26 +00:00
|
|
|
}
|
2015-12-15 12:19:08 +00:00
|
|
|
}
|
2015-10-02 10:08:26 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
/* We need to wait for QEMU to process all data sent by the source
|
|
|
|
* before starting guest CPUs.
|
|
|
|
*/
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstWaitForCompletion(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN,
|
|
|
|
!!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
|
2015-12-15 12:19:08 +00:00
|
|
|
/* There's not much we can do for v2 protocol since the
|
|
|
|
* original domain on the source host is already gone.
|
|
|
|
*/
|
|
|
|
if (v3proto)
|
|
|
|
goto endjob;
|
|
|
|
}
|
2015-10-01 19:39:35 +00:00
|
|
|
|
2018-02-01 14:02:17 +00:00
|
|
|
/* Now that the state data was transferred we can refresh the actual state
|
|
|
|
* of the devices */
|
|
|
|
if (qemuProcessRefreshState(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
|
|
|
|
/* Similarly to the case above v2 protocol will not be able to recover
|
|
|
|
* from this. Let's ignore this and perhaps stuff will not break. */
|
|
|
|
if (v3proto)
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2017-09-01 06:49:21 +00:00
|
|
|
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY)
|
2015-11-26 14:37:23 +00:00
|
|
|
inPostCopy = true;
|
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (!(flags & VIR_MIGRATE_PAUSED)) {
|
|
|
|
/* run 'cont' on the destination, which allows migration on qemu
|
|
|
|
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
|
|
|
* older qemu's, but it also doesn't hurt anything there
|
|
|
|
*/
|
2018-02-09 15:40:51 +00:00
|
|
|
if (qemuProcessStartCPUs(driver, vm,
|
2015-11-26 14:37:23 +00:00
|
|
|
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
|
|
|
|
: VIR_DOMAIN_RUNNING_MIGRATED,
|
2015-12-15 12:19:08 +00:00
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
|
2018-05-05 12:04:21 +00:00
|
|
|
if (virGetLastErrorCode() == VIR_ERR_OK)
|
2015-12-15 12:19:08 +00:00
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
|
|
|
/* Need to save the current error, in case shutting
|
|
|
|
* down the process overwrites it
|
|
|
|
*/
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2015-10-01 19:54:38 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
/*
|
|
|
|
* In v3 protocol, the source VM is still available to
|
|
|
|
* restart during confirm() step, so we kill it off
|
|
|
|
* now.
|
|
|
|
* In v2 protocol, the source is dead, so we leave
|
|
|
|
* target in paused state, in case admin can fix
|
|
|
|
* things up.
|
|
|
|
*/
|
|
|
|
if (v3proto)
|
|
|
|
goto endjob;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2015-11-26 14:37:23 +00:00
|
|
|
|
2018-09-12 12:34:33 +00:00
|
|
|
if (inPostCopy)
|
2016-04-12 13:41:28 +00:00
|
|
|
doKill = false;
|
2015-12-15 12:19:08 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (mig->jobInfo) {
|
2016-02-18 13:02:15 +00:00
|
|
|
jobInfo = mig->jobInfo;
|
2015-12-15 12:19:08 +00:00
|
|
|
mig->jobInfo = NULL;
|
|
|
|
|
|
|
|
if (jobInfo->sent && timeReceived) {
|
|
|
|
jobInfo->timeDelta = timeReceived - jobInfo->sent;
|
|
|
|
jobInfo->received = timeReceived;
|
|
|
|
jobInfo->timeDeltaSet = true;
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
2016-02-18 13:02:15 +00:00
|
|
|
qemuDomainJobInfoUpdateTime(jobInfo);
|
|
|
|
qemuDomainJobInfoUpdateDowntime(jobInfo);
|
2015-12-15 12:19:08 +00:00
|
|
|
}
|
2012-11-21 08:28:49 +00:00
|
|
|
|
2015-11-26 14:37:23 +00:00
|
|
|
if (inPostCopy) {
|
2018-02-12 17:11:41 +00:00
|
|
|
if (qemuMigrationDstWaitForCompletion(driver, vm,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN,
|
|
|
|
false) < 0) {
|
2015-11-26 14:37:23 +00:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
virDomainObjSetState(vm,
|
|
|
|
VIR_DOMAIN_RUNNING,
|
|
|
|
VIR_DOMAIN_RUNNING_MIGRATED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 15:08:03 +00:00
|
|
|
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, vm->def->id);
|
2011-06-23 10:03:57 +00:00
|
|
|
|
2018-09-12 12:34:33 +00:00
|
|
|
if (inPostCopy) {
|
|
|
|
/* The only RESUME event during post-copy migration is triggered by
|
|
|
|
* QEMU when the running domain moves from the source to the
|
|
|
|
* destination host, but then the migration keeps running until all
|
|
|
|
* modified memory is transferred from the source host. This will
|
|
|
|
* result in VIR_DOMAIN_EVENT_RESUMED with RESUMED_POSTCOPY detail.
|
|
|
|
* However, our API documentation says we need to fire another RESUMED
|
|
|
|
* event at the very end of migration with RESUMED_MIGRATED detail.
|
|
|
|
*/
|
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
}
|
2015-07-02 19:46:56 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
|
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2015-07-07 12:42:42 +00:00
|
|
|
}
|
2015-07-02 19:46:56 +00:00
|
|
|
|
2015-12-15 12:19:08 +00:00
|
|
|
if (virDomainObjIsActive(vm) &&
|
2019-11-27 12:53:10 +00:00
|
|
|
virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0)
|
2015-12-15 12:19:08 +00:00
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
|
|
|
|
/* Guest is successfully running, so cancel previous auto destroy */
|
|
|
|
qemuProcessAutoDestroyRemove(driver, vm);
|
|
|
|
|
2015-07-07 12:42:42 +00:00
|
|
|
endjob:
|
2015-07-07 13:29:10 +00:00
|
|
|
if (!dom &&
|
2015-07-07 12:42:42 +00:00
|
|
|
!(flags & VIR_MIGRATE_OFFLINE) &&
|
|
|
|
virDomainObjIsActive(vm)) {
|
2016-04-12 13:41:28 +00:00
|
|
|
if (doKill) {
|
2016-01-13 15:29:58 +00:00
|
|
|
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
|
|
|
|
QEMU_ASYNC_JOB_MIGRATION_IN,
|
|
|
|
VIR_QEMU_PROCESS_STOP_MIGRATED);
|
|
|
|
virDomainAuditStop(vm, "failed");
|
|
|
|
event = virDomainEventLifecycleNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
2018-06-12 17:33:02 +00:00
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
2016-01-13 15:29:58 +00:00
|
|
|
} else {
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationAnyPostcopyFailed(driver, vm);
|
2016-01-13 15:29:58 +00:00
|
|
|
}
|
2011-01-31 10:47:03 +00:00
|
|
|
}
|
|
|
|
|
2016-02-18 13:02:15 +00:00
|
|
|
if (dom) {
|
2018-01-19 09:32:44 +00:00
|
|
|
if (jobInfo) {
|
2019-10-16 11:43:18 +00:00
|
|
|
priv->job.completed = g_steal_pointer(&jobInfo);
|
2018-01-19 09:32:44 +00:00
|
|
|
priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED;
|
2018-06-01 08:32:49 +00:00
|
|
|
priv->job.completed->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
|
2018-01-19 09:32:44 +00:00
|
|
|
}
|
2018-01-11 19:47:50 +00:00
|
|
|
|
2020-09-28 14:49:28 +00:00
|
|
|
if (qemuMigrationCookieFormat(mig, driver, vm,
|
|
|
|
QEMU_MIGRATION_DESTINATION,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_STATS) < 0)
|
2016-02-18 13:02:15 +00:00
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
2015-11-26 14:37:23 +00:00
|
|
|
|
|
|
|
/* Remove completed stats for post-copy, everything but timing fields
|
|
|
|
* is obsolete anyway.
|
|
|
|
*/
|
|
|
|
if (inPostCopy)
|
2020-03-26 16:55:00 +00:00
|
|
|
g_clear_pointer(&priv->job.completed, qemuDomainJobInfoFree);
|
2016-02-18 13:02:15 +00:00
|
|
|
}
|
2011-01-24 18:06:16 +00:00
|
|
|
|
2018-02-27 16:09:17 +00:00
|
|
|
qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
2020-07-16 11:48:34 +00:00
|
|
|
jobPriv->migParams, priv->job.apiFlags);
|
2017-03-03 12:22:16 +00:00
|
|
|
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuMigrationJobFinish(driver, vm);
|
2015-09-22 13:25:00 +00:00
|
|
|
if (!virDomainObjIsActive(vm))
|
2017-08-15 07:12:43 +00:00
|
|
|
qemuDomainRemoveInactiveJob(driver, vm);
|
2011-01-31 10:47:03 +00:00
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2020-03-26 16:55:00 +00:00
|
|
|
g_clear_pointer(&jobInfo, qemuDomainJobInfoFree);
|
2018-02-06 09:09:08 +00:00
|
|
|
virPortAllocatorRelease(port);
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
if (priv->mon)
|
2015-11-12 13:54:04 +00:00
|
|
|
qemuMonitorSetDomainLog(priv->mon, NULL, NULL, NULL);
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
VIR_FREE(priv->origname);
|
2015-04-23 15:27:58 +00:00
|
|
|
virDomainObjEndAPI(&vm);
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2015-07-02 20:32:54 +00:00
|
|
|
|
|
|
|
/* Set a special error if Finish is expected to return NULL as a result of
|
|
|
|
* successful call with retcode != 0
|
|
|
|
*/
|
2018-05-05 12:04:21 +00:00
|
|
|
if (retcode != 0 && !dom && virGetLastErrorCode() == VIR_ERR_OK)
|
2015-07-02 20:32:54 +00:00
|
|
|
virReportError(VIR_ERR_MIGRATE_FINISH_OK, NULL);
|
2011-01-31 10:47:03 +00:00
|
|
|
return dom;
|
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2011-02-03 11:09:28 +00:00
|
|
|
|
2013-02-06 18:17:20 +00:00
|
|
|
/* Helper function called while vm is active. */
|
2011-03-10 00:35:13 +00:00
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
|
|
|
int fd,
|
2019-11-28 12:07:04 +00:00
|
|
|
virCommandPtr compressor,
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuDomainAsyncJob asyncJob)
|
2011-03-10 00:35:13 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-06-10 14:13:15 +00:00
|
|
|
bool bwParam = virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATION_PARAM_BANDWIDTH);
|
2011-03-10 00:35:13 +00:00
|
|
|
int rc;
|
2013-07-08 10:08:46 +00:00
|
|
|
int ret = -1;
|
2011-03-25 17:02:27 +00:00
|
|
|
int pipeFD[2] = { -1, -1 };
|
2012-03-20 15:56:29 +00:00
|
|
|
unsigned long saveMigBandwidth = priv->migMaxBandwidth;
|
2013-01-17 10:59:23 +00:00
|
|
|
char *errbuf = NULL;
|
2014-05-22 10:38:47 +00:00
|
|
|
virErrorPtr orig_err = NULL;
|
2020-06-10 14:13:15 +00:00
|
|
|
g_autoptr(qemuMigrationParams) migParams = NULL;
|
2012-03-20 15:56:29 +00:00
|
|
|
|
2020-02-25 09:55:11 +00:00
|
|
|
if (qemuMigrationSetDBusVMState(driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2012-03-20 15:56:29 +00:00
|
|
|
/* Increase migration bandwidth to unlimited since target is a file.
|
|
|
|
* Failure to change migration speed is not fatal. */
|
2020-06-10 14:13:15 +00:00
|
|
|
if (bwParam) {
|
|
|
|
if (!(migParams = qemuMigrationParamsNew()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuMigrationParamsSetULL(migParams,
|
|
|
|
QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
|
|
|
|
QEMU_DOMAIN_MIG_BANDWIDTH_MAX * 1024 * 1024) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuMigrationParamsApply(driver, vm, asyncJob, migParams) < 0)
|
2014-12-16 09:40:58 +00:00
|
|
|
return -1;
|
2020-06-10 14:13:15 +00:00
|
|
|
|
|
|
|
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon,
|
|
|
|
QEMU_DOMAIN_MIG_BANDWIDTH_MAX);
|
|
|
|
priv->migMaxBandwidth = QEMU_DOMAIN_MIG_BANDWIDTH_MAX;
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2012-03-20 15:56:29 +00:00
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2014-08-12 13:21:56 +00:00
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
/* nothing to tear down */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-01-24 15:22:12 +00:00
|
|
|
if (compressor && virPipe(pipeFD) < 0)
|
2016-02-15 16:17:02 +00:00
|
|
|
return -1;
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2016-02-15 16:17:02 +00:00
|
|
|
/* All right! We can use fd migration, which means that qemu
|
|
|
|
* doesn't have to open() the file, so while we still have to
|
|
|
|
* grant SELinux access, we can do it on fd and avoid cleanup
|
|
|
|
* later, as well as skip futzing with cgroup. */
|
2017-02-13 13:36:53 +00:00
|
|
|
if (qemuSecuritySetImageFDLabel(driver->securityManager, vm->def,
|
|
|
|
compressor ? pipeFD[1] : fd) < 0)
|
2016-02-15 16:17:02 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
qemu: fix crash when mixing sync and async monitor jobs
Currently, we attempt to run sync job and async job at the same time. It
means that the monitor commands for two jobs can be run in any order.
In the function qemuDomainObjEnterMonitorInternal():
if (priv->job.active == QEMU_JOB_NONE && priv->job.asyncJob) {
if (qemuDomainObjBeginNestedJob(driver, obj) < 0)
We check whether the caller is an async job by priv->job.active and
priv->job.asynJob. But when an async job is running, and a sync job is
also running at the time of the check, then priv->job.active is not
QEMU_JOB_NONE. So we cannot check whether the caller is an async job
in the function qemuDomainObjEnterMonitorInternal(), and must instead
put the burden on the caller to tell us when an async command wants
to do a nested job.
Once the burden is on the caller, then only async monitor enters need
to worry about whether the VM is still running; for sync monitor enter,
the internal return is always 0, so lots of ignore_value can be dropped.
* src/qemu/THREADS.txt: Reflect new rules.
* src/qemu/qemu_domain.h (qemuDomainObjEnterMonitorAsync): New
prototype.
* src/qemu/qemu_process.h (qemuProcessStartCPUs)
(qemuProcessStopCPUs): Add parameter.
* src/qemu/qemu_migration.h (qemuMigrationToFile): Likewise.
(qemuMigrationWaitForCompletion): Make static.
* src/qemu/qemu_domain.c (qemuDomainObjEnterMonitorInternal): Add
parameter.
(qemuDomainObjEnterMonitorAsync): New function.
(qemuDomainObjEnterMonitor, qemuDomainObjEnterMonitorWithDriver):
Update callers.
* src/qemu/qemu_driver.c (qemuDomainSaveInternal)
(qemudDomainCoreDump, doCoreDump, processWatchdogEvent)
(qemudDomainSuspend, qemudDomainResume, qemuDomainSaveImageStartVM)
(qemuDomainSnapshotCreateActive, qemuDomainRevertToSnapshot):
Likewise.
* src/qemu/qemu_process.c (qemuProcessStopCPUs)
(qemuProcessFakeReboot, qemuProcessRecoverMigration)
(qemuProcessRecoverJob, qemuProcessStart): Likewise.
* src/qemu/qemu_migration.c (qemuMigrationToFile)
(qemuMigrationWaitForCompletion, qemuMigrationUpdateJobStatus)
(qemuMigrationJobStart, qemuDomainMigrateGraphicsRelocate)
(doNativeMigrate, doTunnelMigrate, qemuMigrationPerformJob)
(qemuMigrationPerformPhase, qemuMigrationFinish)
(qemuMigrationConfirm): Likewise.
* src/qemu/qemu_hotplug.c: Drop unneeded ignore_value.
2011-07-28 23:18:24 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
2011-06-30 09:23:50 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
if (!compressor) {
|
2016-02-15 16:17:02 +00:00
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
fd);
|
2011-03-10 00:35:13 +00:00
|
|
|
} else {
|
2019-11-28 12:07:04 +00:00
|
|
|
virCommandSetInputFD(compressor, pipeFD[0]);
|
|
|
|
virCommandSetOutputFD(compressor, &fd);
|
|
|
|
virCommandSetErrorBuffer(compressor, &errbuf);
|
|
|
|
virCommandDoAsyncIO(compressor);
|
2016-02-15 16:17:02 +00:00
|
|
|
if (virSetCloseExec(pipeFD[1]) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to set cloexec flag"));
|
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
|
|
|
goto cleanup;
|
2011-03-25 17:02:27 +00:00
|
|
|
}
|
2019-11-28 12:07:04 +00:00
|
|
|
if (virCommandRunAsync(compressor, NULL) < 0) {
|
2016-02-15 16:17:02 +00:00
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
pipeFD[1]);
|
|
|
|
if (VIR_CLOSE(pipeFD[0]) < 0 ||
|
|
|
|
VIR_CLOSE(pipeFD[1]) < 0)
|
|
|
|
VIR_WARN("failed to close intermediate pipe");
|
2011-03-10 00:35:13 +00:00
|
|
|
}
|
2014-12-16 09:40:58 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
2014-08-12 13:21:56 +00:00
|
|
|
goto cleanup;
|
2011-03-10 00:35:13 +00:00
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2018-02-12 17:11:41 +00:00
|
|
|
rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0);
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2014-05-22 10:38:47 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
if (rc == -2) {
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2019-11-28 12:07:04 +00:00
|
|
|
virCommandAbort(compressor);
|
2014-08-12 13:21:56 +00:00
|
|
|
if (virDomainObjIsActive(vm) &&
|
|
|
|
qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
2014-05-22 10:38:47 +00:00
|
|
|
qemuMonitorMigrateCancel(priv->mon);
|
2014-12-16 09:40:58 +00:00
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
2014-05-22 10:38:47 +00:00
|
|
|
}
|
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
goto cleanup;
|
2014-05-22 10:38:47 +00:00
|
|
|
}
|
2011-03-10 00:35:13 +00:00
|
|
|
|
2019-11-28 12:07:04 +00:00
|
|
|
if (compressor && virCommandWait(compressor, NULL) < 0)
|
2011-03-25 17:02:27 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2016-02-17 23:12:33 +00:00
|
|
|
qemuDomainEventEmitJobCompleted(driver, vm);
|
2011-03-10 00:35:13 +00:00
|
|
|
ret = 0;
|
|
|
|
|
2014-03-25 06:49:44 +00:00
|
|
|
cleanup:
|
2014-05-22 10:38:47 +00:00
|
|
|
if (ret < 0 && !orig_err)
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorPreserveLast(&orig_err);
|
2014-05-22 10:38:47 +00:00
|
|
|
|
2012-03-20 15:56:29 +00:00
|
|
|
/* Restore max migration bandwidth */
|
2020-06-10 14:13:15 +00:00
|
|
|
if (virDomainObjIsActive(vm)) {
|
|
|
|
if (bwParam) {
|
|
|
|
if (qemuMigrationParamsSetULL(migParams,
|
|
|
|
QEMU_MIGRATION_PARAM_MAX_BANDWIDTH,
|
|
|
|
saveMigBandwidth * 1024 * 1024) == 0)
|
|
|
|
ignore_value(qemuMigrationParamsApply(driver, vm, asyncJob,
|
|
|
|
migParams));
|
|
|
|
} else {
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) == 0) {
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon, saveMigBandwidth);
|
|
|
|
ignore_value(qemuDomainObjExitMonitor(driver, vm));
|
|
|
|
}
|
|
|
|
}
|
2012-03-20 15:56:29 +00:00
|
|
|
priv->migMaxBandwidth = saveMigBandwidth;
|
|
|
|
}
|
|
|
|
|
2011-03-25 17:02:27 +00:00
|
|
|
VIR_FORCE_CLOSE(pipeFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(pipeFD[1]);
|
2019-11-28 12:07:04 +00:00
|
|
|
if (errbuf) {
|
2013-01-17 10:59:23 +00:00
|
|
|
VIR_DEBUG("Compression binary stderr: %s", NULLSTR(errbuf));
|
|
|
|
VIR_FREE(errbuf);
|
|
|
|
}
|
2014-05-22 10:38:47 +00:00
|
|
|
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&orig_err);
|
2014-05-22 10:38:47 +00:00
|
|
|
|
2011-03-10 00:35:13 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2011-07-19 00:27:30 +00:00
|
|
|
|
2015-05-19 15:28:25 +00:00
|
|
|
|
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcCancel(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
2015-05-19 15:28:25 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
bool storage = false;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
VIR_DEBUG("Canceling unfinished outgoing migration of domain %s",
|
|
|
|
vm->def->name);
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
ignore_value(qemuMonitorMigrateCancel(priv->mon));
|
2018-10-18 11:13:21 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-05-19 15:28:25 +00:00
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
2018-10-19 07:14:54 +00:00
|
|
|
qemuBlockJobDataPtr job;
|
2015-05-19 15:28:25 +00:00
|
|
|
|
2018-10-19 07:14:54 +00:00
|
|
|
if (!(job = qemuBlockJobDiskGetJob(disk)) ||
|
2019-01-17 15:34:11 +00:00
|
|
|
!qemuBlockJobIsRunning(job))
|
2015-05-19 15:28:25 +00:00
|
|
|
diskPriv->migrating = false;
|
2018-10-18 11:13:21 +00:00
|
|
|
|
|
|
|
if (diskPriv->migrating) {
|
2018-10-19 07:14:54 +00:00
|
|
|
qemuBlockJobSyncBegin(job);
|
2018-10-18 11:13:21 +00:00
|
|
|
storage = true;
|
2015-05-19 15:28:25 +00:00
|
|
|
}
|
|
|
|
|
2018-10-19 07:14:54 +00:00
|
|
|
virObjectUnref(job);
|
|
|
|
}
|
2018-10-18 11:13:21 +00:00
|
|
|
|
|
|
|
if (storage &&
|
|
|
|
qemuMigrationSrcNBDCopyCancel(driver, vm, false,
|
2018-02-22 15:11:59 +00:00
|
|
|
QEMU_ASYNC_JOB_NONE, NULL) < 0)
|
2019-11-12 20:46:27 +00:00
|
|
|
return -1;
|
2015-05-19 15:28:25 +00:00
|
|
|
|
2019-11-12 20:46:27 +00:00
|
|
|
return 0;
|
2015-05-19 15:28:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static int
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobStart(virQEMUDriverPtr driver,
|
2011-07-19 00:27:30 +00:00
|
|
|
virDomainObjPtr vm,
|
2018-03-21 12:01:59 +00:00
|
|
|
qemuDomainAsyncJob job,
|
|
|
|
unsigned long apiFlags)
|
2011-07-19 00:27:30 +00:00
|
|
|
{
|
2018-01-26 17:30:50 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2017-04-26 10:00:09 +00:00
|
|
|
virDomainJobOperation op;
|
|
|
|
unsigned long long mask;
|
2011-07-19 00:27:30 +00:00
|
|
|
|
2011-07-19 00:27:36 +00:00
|
|
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN) {
|
2017-04-26 10:00:09 +00:00
|
|
|
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_IN;
|
|
|
|
mask = QEMU_JOB_NONE;
|
2011-07-19 00:27:36 +00:00
|
|
|
} else {
|
2017-04-26 10:00:09 +00:00
|
|
|
op = VIR_DOMAIN_JOB_OPERATION_MIGRATION_OUT;
|
|
|
|
mask = QEMU_JOB_DEFAULT_MASK |
|
|
|
|
JOB_MASK(QEMU_JOB_SUSPEND) |
|
|
|
|
JOB_MASK(QEMU_JOB_MIGRATION_OP);
|
2011-07-19 00:27:36 +00:00
|
|
|
}
|
2011-07-19 00:27:30 +00:00
|
|
|
|
2018-03-21 12:01:59 +00:00
|
|
|
if (qemuDomainObjBeginAsyncJob(driver, vm, job, op, apiFlags) < 0)
|
2017-04-26 10:00:09 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-01-26 17:30:50 +00:00
|
|
|
priv->job.current->statsType = QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION;
|
|
|
|
|
2017-04-26 10:00:09 +00:00
|
|
|
qemuDomainObjSetAsyncJobMask(vm, mask);
|
2011-07-19 00:27:30 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobSetPhase(virQEMUDriverPtr driver,
|
2011-07-19 00:27:30 +00:00
|
|
|
virDomainObjPtr vm,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuMigrationJobPhase phase)
|
2011-07-19 00:27:30 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (phase < priv->job.phase) {
|
|
|
|
VIR_ERROR(_("migration protocol going backwards %s => %s"),
|
|
|
|
qemuMigrationJobPhaseTypeToString(priv->job.phase),
|
|
|
|
qemuMigrationJobPhaseTypeToString(phase));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjSetJobPhase(driver, vm, phase);
|
|
|
|
}
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobStartPhase(virQEMUDriverPtr driver,
|
2011-07-19 00:27:30 +00:00
|
|
|
virDomainObjPtr vm,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuMigrationJobPhase phase)
|
2011-07-19 00:27:30 +00:00
|
|
|
{
|
|
|
|
qemuMigrationJobSetPhase(driver, vm, phase);
|
|
|
|
}
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static void
|
2011-07-19 00:27:30 +00:00
|
|
|
qemuMigrationJobContinue(virDomainObjPtr vm)
|
|
|
|
{
|
2012-04-06 16:55:46 +00:00
|
|
|
qemuDomainObjReleaseAsyncJob(vm);
|
2011-07-19 00:27:30 +00:00
|
|
|
}
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static bool
|
2011-07-19 00:27:30 +00:00
|
|
|
qemuMigrationJobIsActive(virDomainObjPtr vm,
|
2014-06-15 16:32:56 +00:00
|
|
|
qemuDomainAsyncJob job)
|
2011-07-19 00:27:30 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
|
|
|
if (priv->job.asyncJob != job) {
|
|
|
|
const char *msg;
|
|
|
|
|
|
|
|
if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
|
|
|
|
msg = _("domain '%s' is not processing incoming migration");
|
|
|
|
else
|
|
|
|
msg = _("domain '%s' is not being migrated");
|
|
|
|
|
2012-07-18 15:22:03 +00:00
|
|
|
virReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
|
2011-07-19 00:27:30 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-02-12 16:54:23 +00:00
|
|
|
static void
|
2012-11-28 16:43:10 +00:00
|
|
|
qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
|
2011-07-19 00:27:30 +00:00
|
|
|
{
|
qemu: completely rework reference counting
There is one problem that causes various errors in the daemon. When
domain is waiting for a job, it is unlocked while waiting on the
condition. However, if that domain is for example transient and being
removed in another API (e.g. cancelling incoming migration), it get's
unref'd. If the first call, that was waiting, fails to get the job, it
unref's the domain object, and because it was the last reference, it
causes clearing of the whole domain object. However, when finishing the
call, the domain must be unlocked, but there is no way for the API to
know whether it was cleaned or not (unless there is some ugly temporary
variable, but let's scratch that).
The root cause is that our APIs don't ref the objects they are using and
all use the implicit reference that the object has when it is in the
domain list. That reference can be removed when the API is waiting for
a job. And because each domain doesn't do its ref'ing, it results in
the ugly checking of the return value of virObjectUnref() that we have
everywhere.
This patch changes qemuDomObjFromDomain() to ref the domain (using
virDomainObjListFindByUUIDRef()) and adds qemuDomObjEndAPI() which
should be the only function in which the return value of
virObjectUnref() is checked. This makes all reference counting
deterministic and makes the code a bit clearer.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com>
2014-12-04 13:41:36 +00:00
|
|
|
qemuDomainObjEndAsyncJob(driver, vm);
|
2011-07-19 00:27:30 +00:00
|
|
|
}
|
2015-07-02 06:26:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
static void
|
2019-11-21 19:27:58 +00:00
|
|
|
qemuMigrationDstErrorFree(void *data)
|
2015-07-02 06:26:48 +00:00
|
|
|
{
|
|
|
|
virErrorPtr err = data;
|
|
|
|
virFreeError(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstErrorInit(virQEMUDriverPtr driver)
|
2015-07-02 06:26:48 +00:00
|
|
|
{
|
2020-10-20 16:28:21 +00:00
|
|
|
driver->migrationErrors = virHashAtomicNew(qemuMigrationDstErrorFree);
|
2015-07-02 06:26:48 +00:00
|
|
|
if (driver->migrationErrors)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function consumes @err; the caller should consider the @err pointer
|
|
|
|
* invalid after calling this function.
|
|
|
|
*/
|
|
|
|
void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
|
|
|
|
const char *name,
|
|
|
|
virErrorPtr err)
|
2015-07-02 06:26:48 +00:00
|
|
|
{
|
|
|
|
if (!err)
|
|
|
|
return;
|
|
|
|
|
|
|
|
VIR_DEBUG("Saving incoming migration error for domain %s: %s",
|
|
|
|
name, err->message);
|
|
|
|
if (virHashAtomicUpdate(driver->migrationErrors, name, err) < 0) {
|
|
|
|
VIR_WARN("Failed to save migration error for domain '%s'", name);
|
|
|
|
virFreeError(err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
|
|
|
|
const char *name)
|
2015-07-02 06:26:48 +00:00
|
|
|
{
|
|
|
|
virErrorPtr err;
|
|
|
|
|
|
|
|
if (!(err = virHashAtomicSteal(driver->migrationErrors, name)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
VIR_DEBUG("Restoring saved incoming migration error for domain %s: %s",
|
|
|
|
name, err->message);
|
2018-12-06 17:33:04 +00:00
|
|
|
virErrorRestore(&err);
|
2015-07-02 06:26:48 +00:00
|
|
|
}
|
2016-04-14 10:33:48 +00:00
|
|
|
|
|
|
|
|
2017-09-01 06:49:30 +00:00
|
|
|
int
|
2018-02-12 17:11:41 +00:00
|
|
|
qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
qemuDomainJobInfoPtr jobInfo)
|
2017-09-01 06:49:30 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
bool nbd = false;
|
2020-10-22 17:04:18 +00:00
|
|
|
GHashTable *blockinfo = NULL;
|
2017-09-01 06:49:30 +00:00
|
|
|
qemuDomainMirrorStatsPtr stats = &jobInfo->mirrorStats;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
if (QEMU_DOMAIN_DISK_PRIVATE(disk)->migrating) {
|
|
|
|
nbd = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nbd)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2019-06-11 14:42:53 +00:00
|
|
|
blockinfo = qemuMonitorGetAllBlockJobInfo(priv->mon, false);
|
2017-09-01 06:49:30 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || !blockinfo)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memset(stats, 0, sizeof(*stats));
|
|
|
|
|
|
|
|
for (i = 0; i < vm->def->ndisks; i++) {
|
|
|
|
virDomainDiskDefPtr disk = vm->def->disks[i];
|
|
|
|
qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk);
|
|
|
|
qemuMonitorBlockJobInfoPtr data;
|
|
|
|
|
|
|
|
if (!diskPriv->migrating ||
|
|
|
|
!(data = virHashLookup(blockinfo, disk->info.alias)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
stats->transferred += data->cur;
|
|
|
|
stats->total += data->end;
|
|
|
|
}
|
|
|
|
|
|
|
|
virHashFree(blockinfo);
|
|
|
|
return 0;
|
|
|
|
}
|