2015-04-16 09:24:19 +00:00
|
|
|
/*
|
|
|
|
* qemu_blockjob.c: helper functions for QEMU block jobs
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006-2015 Red Hat, Inc.
|
|
|
|
* Copyright (C) 2006 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library. If not, see
|
|
|
|
* <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
#include "qemu_blockjob.h"
|
2017-03-15 12:03:21 +00:00
|
|
|
#include "qemu_block.h"
|
2015-04-16 09:24:19 +00:00
|
|
|
#include "qemu_domain.h"
|
2019-07-22 11:39:24 +00:00
|
|
|
#include "qemu_alias.h"
|
2019-10-04 14:28:47 +00:00
|
|
|
#include "qemu_backup.h"
|
2015-04-16 09:24:19 +00:00
|
|
|
|
|
|
|
#include "conf/domain_conf.h"
|
|
|
|
#include "conf/domain_event.h"
|
|
|
|
|
2021-01-21 14:44:53 +00:00
|
|
|
#include "storage_source_conf.h"
|
2015-04-16 09:24:19 +00:00
|
|
|
#include "virlog.h"
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
#include "virthread.h"
|
|
|
|
#include "virtime.h"
|
2016-12-16 15:06:57 +00:00
|
|
|
#include "locking/domain_lock.h"
|
2017-10-27 12:37:22 +00:00
|
|
|
#include "viralloc.h"
|
2019-01-17 16:01:55 +00:00
|
|
|
#include "virstring.h"
|
2019-03-25 16:02:44 +00:00
|
|
|
#include "qemu_security.h"
|
2015-04-16 09:24:19 +00:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
|
|
|
VIR_LOG_INIT("qemu.qemu_blockjob");
|
|
|
|
|
2018-11-30 15:55:08 +00:00
|
|
|
/* Note that qemuBlockjobState and qemuBlockjobType values are formatted into
|
|
|
|
* the status XML */
|
|
|
|
VIR_ENUM_IMPL(qemuBlockjobState,
|
|
|
|
QEMU_BLOCKJOB_STATE_LAST,
|
|
|
|
"completed",
|
|
|
|
"failed",
|
|
|
|
"cancelled",
|
|
|
|
"ready",
|
|
|
|
"new",
|
2018-12-07 16:40:30 +00:00
|
|
|
"running",
|
2019-02-11 13:36:24 +00:00
|
|
|
"concluded",
|
|
|
|
"aborting",
|
|
|
|
"pivoting");
|
2018-11-30 15:55:08 +00:00
|
|
|
|
|
|
|
VIR_ENUM_IMPL(qemuBlockjob,
|
|
|
|
QEMU_BLOCKJOB_TYPE_LAST,
|
|
|
|
"",
|
|
|
|
"pull",
|
|
|
|
"copy",
|
|
|
|
"commit",
|
|
|
|
"active-commit",
|
2019-10-18 13:10:33 +00:00
|
|
|
"backup",
|
2019-06-10 16:13:09 +00:00
|
|
|
"",
|
2019-11-26 13:55:05 +00:00
|
|
|
"create",
|
|
|
|
"broken");
|
2015-05-11 12:50:48 +00:00
|
|
|
|
2018-11-14 15:47:50 +00:00
|
|
|
static virClassPtr qemuBlockJobDataClass;
|
|
|
|
|
|
|
|
|
2019-11-26 12:39:18 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobDataDisposeJobdata(qemuBlockJobDataPtr job)
|
|
|
|
{
|
|
|
|
if (job->type == QEMU_BLOCKJOB_TYPE_CREATE)
|
|
|
|
virObjectUnref(job->data.create.src);
|
2019-10-16 07:39:32 +00:00
|
|
|
|
|
|
|
if (job->type == QEMU_BLOCKJOB_TYPE_BACKUP) {
|
|
|
|
virObjectUnref(job->data.backup.store);
|
|
|
|
g_free(job->data.backup.bitmap);
|
|
|
|
}
|
2019-11-26 12:39:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-14 15:47:50 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobDataDispose(void *obj)
|
2018-10-17 06:57:08 +00:00
|
|
|
{
|
2018-11-14 15:47:50 +00:00
|
|
|
qemuBlockJobDataPtr job = obj;
|
2018-10-17 06:57:08 +00:00
|
|
|
|
2019-03-19 06:54:12 +00:00
|
|
|
virObjectUnref(job->chain);
|
|
|
|
virObjectUnref(job->mirrorChain);
|
|
|
|
|
2019-11-26 12:39:18 +00:00
|
|
|
qemuBlockJobDataDisposeJobdata(job);
|
2019-06-10 16:13:09 +00:00
|
|
|
|
2019-10-18 13:45:43 +00:00
|
|
|
g_free(job->name);
|
|
|
|
g_free(job->errmsg);
|
2018-11-14 15:47:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuBlockJobDataOnceInit(void)
|
|
|
|
{
|
|
|
|
if (!VIR_CLASS_NEW(qemuBlockJobData, virClassForObject()))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-20 17:23:29 +00:00
|
|
|
VIR_ONCE_GLOBAL_INIT(qemuBlockJobData);
|
2018-11-14 15:47:50 +00:00
|
|
|
|
2019-07-01 15:36:38 +00:00
|
|
|
qemuBlockJobDataPtr
|
2019-01-17 16:01:55 +00:00
|
|
|
qemuBlockJobDataNew(qemuBlockJobType type,
|
|
|
|
const char *name)
|
2018-11-14 15:47:50 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2018-10-17 15:22:38 +00:00
|
|
|
|
2018-11-14 15:47:50 +00:00
|
|
|
if (qemuBlockJobDataInitialize() < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2018-10-17 15:22:38 +00:00
|
|
|
if (!(job = virObjectNew(qemuBlockJobDataClass)))
|
|
|
|
return NULL;
|
2018-10-17 06:57:08 +00:00
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
job->name = g_strdup(name);
|
2019-01-17 16:01:55 +00:00
|
|
|
|
2018-10-17 15:22:38 +00:00
|
|
|
job->state = QEMU_BLOCKJOB_STATE_NEW;
|
2018-11-19 15:48:09 +00:00
|
|
|
job->newstate = -1;
|
2018-10-17 15:22:38 +00:00
|
|
|
job->type = type;
|
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&job);
|
2018-11-19 15:48:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-26 13:55:05 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobMarkBroken:
|
|
|
|
* @job: job to mark as broken
|
|
|
|
*
|
|
|
|
* In case when we are unable to parse the block job data from the XML
|
|
|
|
* successfully we'll need to mark the job as broken and then attempt to abort
|
|
|
|
* it. This function marks the job as broken.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobMarkBroken(qemuBlockJobDataPtr job)
|
|
|
|
{
|
|
|
|
qemuBlockJobDataDisposeJobdata(job);
|
|
|
|
job->brokentype = job->type;
|
|
|
|
job->type = QEMU_BLOCKJOB_TYPE_BROKEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-24 12:50:03 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobRegister:
|
|
|
|
* @job: job to register
|
|
|
|
* @vm: domain to register @job with
|
|
|
|
* @disk: disk to register @job with
|
|
|
|
* @savestatus: save the status XML after registering
|
|
|
|
*
|
|
|
|
* This function registers @job with @disk and @vm and records it into the status
|
|
|
|
* xml (if @savestatus is true).
|
2019-07-24 12:50:33 +00:00
|
|
|
*
|
|
|
|
* Note that if @job also references a separate chain e.g. for disk mirroring,
|
2019-08-01 14:41:28 +00:00
|
|
|
* then job->mirrorchain needs to be set manually.
|
2019-07-24 12:50:03 +00:00
|
|
|
*/
|
2019-07-01 15:36:38 +00:00
|
|
|
int
|
2019-06-27 15:54:24 +00:00
|
|
|
qemuBlockJobRegister(qemuBlockJobDataPtr job,
|
2018-11-29 16:35:52 +00:00
|
|
|
virDomainObjPtr vm,
|
2019-05-15 08:58:42 +00:00
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
bool savestatus)
|
2019-06-27 15:54:24 +00:00
|
|
|
{
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2019-09-14 07:40:29 +00:00
|
|
|
if (disk && QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob) {
|
|
|
|
virReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("disk '%s' has a blockjob assigned"), disk->dst);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-11-29 16:35:52 +00:00
|
|
|
if (virHashAddEntry(priv->blockjobs, job->name, virObjectRef(job)) < 0) {
|
|
|
|
virObjectUnref(job);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-06-27 15:54:24 +00:00
|
|
|
if (disk) {
|
|
|
|
job->disk = disk;
|
2019-03-19 06:54:12 +00:00
|
|
|
job->chain = virObjectRef(disk->src);
|
2019-06-27 15:54:24 +00:00
|
|
|
QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob = virObjectRef(job);
|
|
|
|
}
|
|
|
|
|
2019-05-15 08:58:42 +00:00
|
|
|
if (savestatus)
|
|
|
|
qemuDomainSaveStatus(vm);
|
|
|
|
|
2019-06-27 15:54:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobUnregister(qemuBlockJobDataPtr job,
|
|
|
|
virDomainObjPtr vm)
|
2019-06-27 15:54:24 +00:00
|
|
|
{
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2019-06-27 15:54:24 +00:00
|
|
|
qemuDomainDiskPrivatePtr diskPriv;
|
|
|
|
|
|
|
|
if (job->disk) {
|
|
|
|
diskPriv = QEMU_DOMAIN_DISK_PRIVATE(job->disk);
|
|
|
|
|
|
|
|
if (job == diskPriv->blockjob) {
|
|
|
|
virObjectUnref(diskPriv->blockjob);
|
|
|
|
diskPriv->blockjob = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
job->disk = NULL;
|
|
|
|
}
|
2018-11-29 16:35:52 +00:00
|
|
|
|
|
|
|
/* this may remove the last reference of 'job' */
|
|
|
|
virHashRemoveEntry(priv->blockjobs, job->name);
|
2019-05-15 08:58:42 +00:00
|
|
|
|
|
|
|
qemuDomainSaveStatus(vm);
|
2019-06-27 15:54:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-19 15:48:09 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobDiskNew:
|
|
|
|
* @disk: disk definition
|
|
|
|
*
|
|
|
|
* Start/associate a new blockjob with @disk.
|
|
|
|
*
|
|
|
|
* Returns 0 on success and -1 on failure.
|
|
|
|
*/
|
|
|
|
qemuBlockJobDataPtr
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobDiskNew(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
2019-01-17 16:01:55 +00:00
|
|
|
qemuBlockJobType type,
|
|
|
|
const char *jobname)
|
2018-11-19 15:48:09 +00:00
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2018-11-19 15:48:09 +00:00
|
|
|
|
2019-01-17 16:01:55 +00:00
|
|
|
if (!(job = qemuBlockJobDataNew(type, jobname)))
|
2018-10-17 15:22:38 +00:00
|
|
|
return NULL;
|
2018-11-23 11:45:32 +00:00
|
|
|
|
2019-05-15 08:58:42 +00:00
|
|
|
if (qemuBlockJobRegister(job, vm, disk, true) < 0)
|
2019-06-27 15:54:24 +00:00
|
|
|
return NULL;
|
2018-11-23 11:45:32 +00:00
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&job);
|
2018-11-19 15:48:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobDiskNewPull(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
2020-01-30 13:02:55 +00:00
|
|
|
virStorageSourcePtr base,
|
|
|
|
unsigned int jobflags)
|
2019-07-22 11:39:24 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *jobname = NULL;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
2019-10-22 13:26:14 +00:00
|
|
|
jobname = g_strdup_printf("pull-%s-%s", disk->dst, disk->src->nodeformat);
|
2019-07-22 11:39:24 +00:00
|
|
|
} else {
|
|
|
|
if (!(jobname = qemuAliasDiskDriveFromDisk(disk)))
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_PULL, jobname)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
job->data.pull.base = base;
|
2020-01-30 13:02:55 +00:00
|
|
|
job->jobflags = jobflags;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
|
|
|
if (qemuBlockJobRegister(job, vm, disk, true) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&job);
|
2019-07-22 11:39:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobDiskNewCommit(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr topparent,
|
|
|
|
virStorageSourcePtr top,
|
2019-12-10 16:25:38 +00:00
|
|
|
virStorageSourcePtr base,
|
2020-01-30 13:02:55 +00:00
|
|
|
bool delete_imgs,
|
|
|
|
unsigned int jobflags)
|
2019-07-22 11:39:24 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *jobname = NULL;
|
2019-07-22 11:39:24 +00:00
|
|
|
qemuBlockJobType jobtype = QEMU_BLOCKJOB_TYPE_COMMIT;
|
|
|
|
|
|
|
|
if (topparent == NULL)
|
|
|
|
jobtype = QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT;
|
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
2019-10-22 13:26:14 +00:00
|
|
|
jobname = g_strdup_printf("commit-%s-%s", disk->dst, top->nodeformat);
|
2019-07-22 11:39:24 +00:00
|
|
|
} else {
|
|
|
|
if (!(jobname = qemuAliasDiskDriveFromDisk(disk)))
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(job = qemuBlockJobDataNew(jobtype, jobname)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
job->data.commit.topparent = topparent;
|
|
|
|
job->data.commit.top = top;
|
|
|
|
job->data.commit.base = base;
|
2019-12-10 16:25:38 +00:00
|
|
|
job->data.commit.deleteCommittedImages = delete_imgs;
|
2020-01-30 13:02:55 +00:00
|
|
|
job->jobflags = jobflags;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
|
|
|
if (qemuBlockJobRegister(job, vm, disk, true) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&job);
|
2019-07-22 11:39:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-10 16:13:09 +00:00
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobNewCreate(virDomainObjPtr vm,
|
|
|
|
virStorageSourcePtr src,
|
|
|
|
virStorageSourcePtr chain,
|
|
|
|
bool storage)
|
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *jobname = NULL;
|
2019-06-10 16:13:09 +00:00
|
|
|
const char *nodename = src->nodeformat;
|
|
|
|
|
|
|
|
if (storage)
|
|
|
|
nodename = src->nodestorage;
|
|
|
|
|
2019-10-22 13:26:14 +00:00
|
|
|
jobname = g_strdup_printf("create-%s", nodename);
|
2019-06-10 16:13:09 +00:00
|
|
|
|
|
|
|
if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_CREATE, jobname)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (virStorageSourceIsBacking(chain))
|
|
|
|
job->chain = virObjectRef(chain);
|
|
|
|
|
|
|
|
job->data.create.src = virObjectRef(src);
|
|
|
|
|
|
|
|
if (qemuBlockJobRegister(job, vm, NULL, true) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&job);
|
2019-06-10 16:13:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:59:01 +00:00
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobDiskNewCopy(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr mirror,
|
|
|
|
bool shallow,
|
2020-01-30 13:02:55 +00:00
|
|
|
bool reuse,
|
|
|
|
unsigned int jobflags)
|
2019-07-22 11:59:01 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
2019-10-15 13:16:31 +00:00
|
|
|
g_autofree char *jobname = NULL;
|
2019-07-22 11:59:01 +00:00
|
|
|
|
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV)) {
|
2019-10-22 13:26:14 +00:00
|
|
|
jobname = g_strdup_printf("copy-%s-%s", disk->dst, disk->src->nodeformat);
|
2019-07-22 11:59:01 +00:00
|
|
|
} else {
|
|
|
|
if (!(jobname = qemuAliasDiskDriveFromDisk(disk)))
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_COPY, jobname)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
job->mirrorChain = virObjectRef(mirror);
|
|
|
|
|
|
|
|
if (shallow && !reuse)
|
|
|
|
job->data.copy.shallownew = true;
|
|
|
|
|
2020-01-30 13:02:55 +00:00
|
|
|
job->jobflags = jobflags;
|
|
|
|
|
2019-07-22 11:59:01 +00:00
|
|
|
if (qemuBlockJobRegister(job, vm, disk, true) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
2019-10-16 11:35:54 +00:00
|
|
|
return g_steal_pointer(&job);
|
2019-10-16 07:39:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobDiskNewBackup(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr store,
|
|
|
|
const char *bitmap)
|
|
|
|
{
|
|
|
|
g_autoptr(qemuBlockJobData) job = NULL;
|
|
|
|
g_autofree char *jobname = NULL;
|
|
|
|
|
|
|
|
jobname = g_strdup_printf("backup-%s-%s", disk->dst, disk->src->nodeformat);
|
|
|
|
|
|
|
|
if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_BACKUP, jobname)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
job->data.backup.bitmap = g_strdup(bitmap);
|
|
|
|
job->data.backup.store = virObjectRef(store);
|
|
|
|
|
|
|
|
/* backup jobs are usually started in bulk by transaction so the caller
|
|
|
|
* shall save the status XML */
|
|
|
|
if (qemuBlockJobRegister(job, vm, disk, false) < 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return g_steal_pointer(&job);
|
2019-07-22 11:59:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-19 15:48:09 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobDiskGetJob:
|
|
|
|
* @disk: disk definition
|
|
|
|
*
|
|
|
|
* Get a reference to the block job data object associated with @disk.
|
|
|
|
*/
|
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobDiskGetJob(virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;
|
|
|
|
|
|
|
|
if (!job)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return virObjectRef(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuBlockJobStarted:
|
|
|
|
* @job: job data
|
|
|
|
*
|
|
|
|
* Mark @job as started in qemu.
|
|
|
|
*/
|
|
|
|
void
|
2019-05-15 08:58:42 +00:00
|
|
|
qemuBlockJobStarted(qemuBlockJobDataPtr job,
|
|
|
|
virDomainObjPtr vm)
|
2018-11-19 15:48:09 +00:00
|
|
|
{
|
2019-01-24 09:31:38 +00:00
|
|
|
if (job->state == QEMU_BLOCKJOB_STATE_NEW)
|
|
|
|
job->state = QEMU_BLOCKJOB_STATE_RUNNING;
|
2019-05-15 08:58:42 +00:00
|
|
|
|
|
|
|
qemuDomainSaveStatus(vm);
|
2018-11-19 15:48:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuBlockJobStartupFinalize:
|
|
|
|
* @job: job being started
|
|
|
|
*
|
|
|
|
* Cancels and clears the job private data if the job was not started with
|
|
|
|
* qemu (see qemuBlockJobStarted) or just clears up the local reference
|
|
|
|
* to @job if it was started.
|
|
|
|
*/
|
|
|
|
void
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobStartupFinalize(virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job)
|
2018-11-19 15:48:09 +00:00
|
|
|
{
|
|
|
|
if (!job)
|
|
|
|
return;
|
|
|
|
|
2019-01-17 15:34:11 +00:00
|
|
|
if (job->state == QEMU_BLOCKJOB_STATE_NEW)
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobUnregister(job, vm);
|
2018-11-19 15:48:09 +00:00
|
|
|
|
|
|
|
virObjectUnref(job);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-17 15:34:11 +00:00
|
|
|
bool
|
|
|
|
qemuBlockJobIsRunning(qemuBlockJobDataPtr job)
|
|
|
|
{
|
|
|
|
return job->state == QEMU_BLOCKJOB_STATE_RUNNING ||
|
2019-02-12 13:50:24 +00:00
|
|
|
job->state == QEMU_BLOCKJOB_STATE_READY ||
|
|
|
|
job->state == QEMU_BLOCKJOB_STATE_ABORTING ||
|
|
|
|
job->state == QEMU_BLOCKJOB_STATE_PIVOTING;
|
2019-01-17 15:34:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-12-11 17:13:35 +00:00
|
|
|
/* returns 1 for a job we didn't reconnect to */
|
|
|
|
static int
|
|
|
|
qemuBlockJobRefreshJobsFindInactive(const void *payload,
|
2020-10-21 11:31:16 +00:00
|
|
|
const char *name G_GNUC_UNUSED,
|
2019-10-14 12:45:33 +00:00
|
|
|
const void *data G_GNUC_UNUSED)
|
2018-12-11 17:13:35 +00:00
|
|
|
{
|
|
|
|
const qemuBlockJobData *job = payload;
|
|
|
|
|
|
|
|
return !job->reconnected;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuBlockJobRefreshJobs(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
qemuMonitorJobInfoPtr *jobinfo = NULL;
|
|
|
|
size_t njobinfo = 0;
|
|
|
|
qemuBlockJobDataPtr job = NULL;
|
|
|
|
int newstate;
|
|
|
|
size_t i;
|
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
|
|
|
|
rc = qemuMonitorGetJobInfo(priv->mon, &jobinfo, &njobinfo);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0 || rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
for (i = 0; i < njobinfo; i++) {
|
|
|
|
if (!(job = virHashLookup(priv->blockjobs, jobinfo[i]->id))) {
|
|
|
|
VIR_DEBUG("ignoring untracked job '%s'", jobinfo[i]->id);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* try cancelling invalid jobs - this works only if the job is not
|
|
|
|
* concluded. In such case it will fail. We'll leave such job linger
|
|
|
|
* in qemu and just forget about it in libvirt because there's not much
|
2020-07-09 04:42:21 +00:00
|
|
|
* we could do besides killing the VM */
|
2018-12-11 17:13:35 +00:00
|
|
|
if (job->invalidData) {
|
2019-11-26 13:55:05 +00:00
|
|
|
|
|
|
|
qemuBlockJobMarkBroken(job);
|
|
|
|
|
2018-12-11 17:13:35 +00:00
|
|
|
qemuDomainObjEnterMonitor(driver, vm);
|
|
|
|
|
|
|
|
rc = qemuMonitorJobCancel(priv->mon, job->name, true);
|
|
|
|
if (rc == -1 && jobinfo[i]->status == QEMU_MONITOR_JOB_STATUS_CONCLUDED)
|
|
|
|
VIR_WARN("can't cancel job '%s' with invalid data", job->name);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
qemuBlockJobUnregister(job, vm);
|
2019-11-26 13:16:37 +00:00
|
|
|
else
|
|
|
|
job->reconnected = true;
|
2018-12-11 17:13:35 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((newstate = qemuBlockjobConvertMonitorStatus(jobinfo[i]->status)) < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (newstate != job->state) {
|
|
|
|
if ((job->state == QEMU_BLOCKJOB_STATE_FAILED ||
|
|
|
|
job->state == QEMU_BLOCKJOB_STATE_COMPLETED)) {
|
|
|
|
/* preserve the old state but allow the job to be bumped to
|
|
|
|
* execute the finishing steps */
|
|
|
|
job->newstate = job->state;
|
|
|
|
} else if (newstate == QEMU_BLOCKJOB_STATE_CONCLUDED) {
|
2019-10-20 11:49:46 +00:00
|
|
|
job->errmsg = g_strdup(jobinfo[i]->error);
|
2018-12-11 17:13:35 +00:00
|
|
|
|
|
|
|
if (job->errmsg)
|
|
|
|
job->newstate = QEMU_BLOCKJOB_STATE_FAILED;
|
|
|
|
else
|
|
|
|
job->newstate = QEMU_BLOCKJOB_STATE_COMPLETED;
|
|
|
|
} else if (newstate == QEMU_BLOCKJOB_STATE_READY) {
|
|
|
|
/* Apply _READY state only if it was not applied before */
|
|
|
|
if (job->state == QEMU_BLOCKJOB_STATE_NEW ||
|
|
|
|
job->state == QEMU_BLOCKJOB_STATE_RUNNING)
|
|
|
|
job->newstate = newstate;
|
|
|
|
}
|
|
|
|
/* don't update the job otherwise */
|
|
|
|
}
|
|
|
|
|
|
|
|
job->reconnected = true;
|
|
|
|
|
|
|
|
if (job->newstate != -1)
|
|
|
|
qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
|
2020-03-26 12:31:05 +00:00
|
|
|
/* 'job' may be invalid after this update */
|
2018-12-11 17:13:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* remove data for job which qemu didn't report (the algorithm is
|
|
|
|
* inefficient, but the possibility of such jobs is very low */
|
2019-11-26 13:14:54 +00:00
|
|
|
while ((job = virHashSearch(priv->blockjobs, qemuBlockJobRefreshJobsFindInactive, NULL, NULL))) {
|
|
|
|
VIR_WARN("dropping blockjob '%s' untracked by qemu", job->name);
|
2018-12-11 17:13:35 +00:00
|
|
|
qemuBlockJobUnregister(job, vm);
|
2019-11-26 13:14:54 +00:00
|
|
|
}
|
2018-12-11 17:13:35 +00:00
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
for (i = 0; i < njobinfo; i++)
|
|
|
|
qemuMonitorJobInfoFree(jobinfo[i]);
|
|
|
|
VIR_FREE(jobinfo);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-12 15:13:16 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobEmitEvents:
|
|
|
|
*
|
|
|
|
* Emits the VIR_DOMAIN_EVENT_ID_BLOCK_JOB and VIR_DOMAIN_EVENT_ID_BLOCK_JOB_2
|
2019-01-17 12:52:09 +00:00
|
|
|
* for a block job. The former event is emitted only for local disks.
|
2018-09-12 15:13:16 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobEmitEvents(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virDomainBlockJobType type,
|
|
|
|
virConnectDomainEventBlockJobStatus status)
|
|
|
|
{
|
|
|
|
virObjectEventPtr event = NULL;
|
|
|
|
virObjectEventPtr event2 = NULL;
|
|
|
|
|
2019-04-23 08:47:01 +00:00
|
|
|
/* don't emit events for jobs without disk */
|
|
|
|
if (!disk)
|
|
|
|
return;
|
|
|
|
|
2019-01-24 08:49:26 +00:00
|
|
|
/* don't emit events for internal jobs and states */
|
|
|
|
if (type >= VIR_DOMAIN_BLOCK_JOB_TYPE_LAST ||
|
|
|
|
status >= VIR_DOMAIN_BLOCK_JOB_LAST)
|
|
|
|
return;
|
|
|
|
|
2019-01-17 12:52:09 +00:00
|
|
|
if (virStorageSourceIsLocalStorage(disk->src) &&
|
|
|
|
!virStorageSourceIsEmpty(disk->src)) {
|
|
|
|
event = virDomainEventBlockJobNewFromObj(vm, virDomainDiskGetSource(disk),
|
|
|
|
type, status);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, event);
|
|
|
|
}
|
2018-09-12 15:13:16 +00:00
|
|
|
|
|
|
|
event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type, status);
|
|
|
|
virObjectEventStateQueue(driver->domainEventState, event2);
|
|
|
|
}
|
|
|
|
|
2019-07-24 15:22:05 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobCleanStorageSourceRuntime:
|
|
|
|
* @src: storage source to clean from runtime data
|
|
|
|
*
|
|
|
|
* Remove all runtime related data from the storage source.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobCleanStorageSourceRuntime(virStorageSourcePtr src)
|
|
|
|
{
|
|
|
|
src->id = 0;
|
|
|
|
src->detected = false;
|
|
|
|
VIR_FREE(src->relPath);
|
|
|
|
VIR_FREE(src->backingStoreRaw);
|
|
|
|
VIR_FREE(src->nodestorage);
|
|
|
|
VIR_FREE(src->nodeformat);
|
|
|
|
VIR_FREE(src->tlsAlias);
|
|
|
|
VIR_FREE(src->tlsCertdir);
|
|
|
|
}
|
|
|
|
|
2018-09-12 15:13:16 +00:00
|
|
|
|
2019-07-24 15:17:17 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobRewriteConfigDiskSource:
|
|
|
|
* @vm: domain object
|
|
|
|
* @disk: live definition disk
|
|
|
|
* @newsrc: new source which should be also considered for the new disk
|
|
|
|
*
|
|
|
|
* For block jobs which modify the running disk source it is required that we
|
|
|
|
* try our best to update the config XML's disk source as well in most cases.
|
|
|
|
*
|
|
|
|
* This helper finds the disk from the persistent definition corresponding to
|
|
|
|
* @disk and updates its source to @newsrc.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobRewriteConfigDiskSource(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr newsrc)
|
|
|
|
{
|
|
|
|
virDomainDiskDefPtr persistDisk = NULL;
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(virStorageSource) copy = NULL;
|
2019-08-07 14:31:19 +00:00
|
|
|
virStorageSourcePtr n;
|
2019-07-24 15:17:17 +00:00
|
|
|
|
|
|
|
if (!vm->newDef)
|
|
|
|
return;
|
|
|
|
|
2019-10-14 15:24:20 +00:00
|
|
|
if (!(persistDisk = virDomainDiskByTarget(vm->newDef, disk->dst)))
|
2019-07-24 15:17:17 +00:00
|
|
|
return;
|
|
|
|
|
2019-07-24 15:33:38 +00:00
|
|
|
if (!virStorageSourceIsSameLocation(disk->src, persistDisk->src))
|
|
|
|
return;
|
|
|
|
|
2019-08-07 14:31:19 +00:00
|
|
|
if (!(copy = virStorageSourceCopy(newsrc, true)) ||
|
2019-07-24 15:17:17 +00:00
|
|
|
virStorageSourceInitChainElement(copy, persistDisk->src, true) < 0) {
|
|
|
|
VIR_WARN("Unable to update persistent definition on vm %s after block job",
|
|
|
|
vm->def->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-07 14:31:19 +00:00
|
|
|
for (n = copy; virStorageSourceIsBacking(n); n = n->backingStore) {
|
|
|
|
qemuBlockJobCleanStorageSourceRuntime(n);
|
|
|
|
|
|
|
|
/* discard any detected backing store */
|
|
|
|
if (virStorageSourceIsBacking(n->backingStore) &&
|
|
|
|
n->backingStore->detected) {
|
|
|
|
virObjectUnref(n->backingStore);
|
|
|
|
n->backingStore = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-07-24 15:22:05 +00:00
|
|
|
|
2019-07-24 15:17:17 +00:00
|
|
|
virObjectUnref(persistDisk->src);
|
2019-10-16 11:43:18 +00:00
|
|
|
persistDisk->src = g_steal_pointer(©);
|
2019-07-24 15:17:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-10-17 12:51:56 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobEventProcessLegacyCompleted(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2018-10-17 15:22:38 +00:00
|
|
|
qemuBlockJobDataPtr job,
|
2018-10-17 12:51:56 +00:00
|
|
|
int asyncJob)
|
|
|
|
{
|
2018-10-17 15:22:38 +00:00
|
|
|
virDomainDiskDefPtr disk = job->disk;
|
2018-10-17 12:51:56 +00:00
|
|
|
|
2018-10-17 15:22:38 +00:00
|
|
|
if (!disk)
|
|
|
|
return;
|
|
|
|
|
2018-10-17 12:51:56 +00:00
|
|
|
if (disk->mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_PIVOT) {
|
2019-07-24 15:17:17 +00:00
|
|
|
qemuBlockJobRewriteConfigDiskSource(vm, disk, disk->mirror);
|
2018-10-17 12:51:56 +00:00
|
|
|
/* XXX We want to revoke security labels as well as audit that
|
|
|
|
* revocation, before dropping the original source. But it gets
|
|
|
|
* tricky if both source and mirror share common backing files (we
|
|
|
|
* want to only revoke the non-shared portion of the chain); so for
|
|
|
|
* now, we leak the access to the original. */
|
|
|
|
virDomainLockImageDetach(driver->lockManager, vm, disk->src);
|
2019-03-25 16:02:44 +00:00
|
|
|
|
|
|
|
/* Move secret driver metadata */
|
2019-08-30 13:07:48 +00:00
|
|
|
if (qemuSecurityMoveImageMetadata(driver, vm, disk->src, disk->mirror) < 0) {
|
|
|
|
VIR_WARN("Unable to move disk metadata on "
|
|
|
|
"vm %s from %s to %s (disk target %s)",
|
|
|
|
vm->def->name,
|
|
|
|
NULLSTR(disk->src->path),
|
|
|
|
NULLSTR(disk->mirror->path),
|
|
|
|
disk->dst);
|
|
|
|
}
|
2019-03-25 16:02:44 +00:00
|
|
|
|
2019-02-15 12:03:58 +00:00
|
|
|
virObjectUnref(disk->src);
|
2018-10-17 12:51:56 +00:00
|
|
|
disk->src = disk->mirror;
|
|
|
|
} else {
|
2019-09-16 10:28:48 +00:00
|
|
|
if (disk->mirror) {
|
2018-10-17 12:51:56 +00:00
|
|
|
virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
|
2019-08-30 12:34:12 +00:00
|
|
|
|
|
|
|
/* Ideally, we would restore seclabels on the backing chain here
|
|
|
|
* but we don't know if somebody else is not using parts of it.
|
|
|
|
* Remove security driver metadata so that they are not leaked. */
|
2019-11-19 07:43:58 +00:00
|
|
|
qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->mirror);
|
2019-08-30 12:34:12 +00:00
|
|
|
|
2019-02-15 12:03:58 +00:00
|
|
|
virObjectUnref(disk->mirror);
|
2018-10-17 12:51:56 +00:00
|
|
|
}
|
2019-09-16 10:28:48 +00:00
|
|
|
|
2019-11-19 07:43:58 +00:00
|
|
|
qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->src);
|
2018-10-17 12:51:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Recompute the cached backing chain to match our
|
|
|
|
* updates. Better would be storing the chain ourselves
|
|
|
|
* rather than reprobing, but we haven't quite completed
|
|
|
|
* that conversion to use our XML tracking. */
|
|
|
|
disk->mirror = NULL;
|
|
|
|
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
|
|
|
|
disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
|
|
|
|
disk->src->id = 0;
|
|
|
|
virStorageSourceBackingStoreClear(disk->src);
|
2019-01-16 14:33:07 +00:00
|
|
|
ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, NULL, true));
|
2018-10-17 12:51:56 +00:00
|
|
|
ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob));
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobUnregister(job, vm);
|
2019-07-17 13:39:57 +00:00
|
|
|
qemuDomainSaveConfig(vm);
|
2018-10-17 12:51:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-04-16 09:24:19 +00:00
|
|
|
/**
|
2018-10-17 12:43:01 +00:00
|
|
|
* qemuBlockJobEventProcessLegacy:
|
2015-04-16 09:24:19 +00:00
|
|
|
* @driver: qemu driver
|
|
|
|
* @vm: domain
|
2018-11-23 13:31:30 +00:00
|
|
|
* @job: job to process events for
|
2015-04-16 09:24:19 +00:00
|
|
|
*
|
|
|
|
* Update disk's mirror state in response to a block job event
|
|
|
|
* from QEMU. For mirror state's that must survive libvirt
|
|
|
|
* restart, also update the domain's status XML.
|
|
|
|
*/
|
2018-10-16 09:54:02 +00:00
|
|
|
static void
|
2018-10-17 12:43:01 +00:00
|
|
|
qemuBlockJobEventProcessLegacy(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2018-11-23 13:31:30 +00:00
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
int asyncJob)
|
2015-04-16 09:24:19 +00:00
|
|
|
{
|
2018-11-23 13:31:30 +00:00
|
|
|
virDomainDiskDefPtr disk = job->disk;
|
2015-04-16 09:24:19 +00:00
|
|
|
|
2019-01-17 15:34:11 +00:00
|
|
|
VIR_DEBUG("disk=%s, mirrorState=%s, type=%d, state=%d, newstate=%d",
|
2015-05-11 12:50:48 +00:00
|
|
|
disk->dst,
|
|
|
|
NULLSTR(virDomainDiskMirrorStateTypeToString(disk->mirrorState)),
|
2018-11-23 13:31:30 +00:00
|
|
|
job->type,
|
2019-01-17 15:34:11 +00:00
|
|
|
job->state,
|
2018-11-23 13:31:30 +00:00
|
|
|
job->newstate);
|
2015-05-11 12:50:48 +00:00
|
|
|
|
2018-11-26 14:29:55 +00:00
|
|
|
if (job->newstate == -1)
|
|
|
|
return;
|
|
|
|
|
2018-11-23 13:31:30 +00:00
|
|
|
qemuBlockJobEmitEvents(driver, vm, disk, job->type, job->newstate);
|
2015-04-16 09:24:19 +00:00
|
|
|
|
2019-07-17 14:00:32 +00:00
|
|
|
job->state = job->newstate;
|
|
|
|
job->newstate = -1;
|
|
|
|
|
2015-04-16 09:24:19 +00:00
|
|
|
/* If we completed a block pull or commit, then update the XML
|
|
|
|
* to match. */
|
2019-07-17 14:00:32 +00:00
|
|
|
switch ((virConnectDomainEventBlockJobStatus) job->state) {
|
2015-04-16 09:24:19 +00:00
|
|
|
case VIR_DOMAIN_BLOCK_JOB_COMPLETED:
|
2018-10-17 15:22:38 +00:00
|
|
|
qemuBlockJobEventProcessLegacyCompleted(driver, vm, job, asyncJob);
|
2015-04-16 09:24:19 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_BLOCK_JOB_READY:
|
|
|
|
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
|
2019-05-15 09:07:53 +00:00
|
|
|
qemuDomainSaveStatus(vm);
|
2015-04-16 09:24:19 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_BLOCK_JOB_FAILED:
|
|
|
|
case VIR_DOMAIN_BLOCK_JOB_CANCELED:
|
2016-12-16 15:06:57 +00:00
|
|
|
if (disk->mirror) {
|
|
|
|
virDomainLockImageDetach(driver->lockManager, vm, disk->mirror);
|
2019-08-30 12:34:12 +00:00
|
|
|
|
|
|
|
/* Ideally, we would restore seclabels on the backing chain here
|
|
|
|
* but we don't know if somebody else is not using parts of it.
|
|
|
|
* Remove security driver metadata so that they are not leaked. */
|
2019-11-19 07:43:58 +00:00
|
|
|
qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->mirror);
|
2019-08-30 12:34:12 +00:00
|
|
|
|
2019-02-15 12:03:58 +00:00
|
|
|
virObjectUnref(disk->mirror);
|
2016-12-16 15:06:57 +00:00
|
|
|
disk->mirror = NULL;
|
|
|
|
}
|
2015-05-19 06:44:16 +00:00
|
|
|
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
|
2015-04-16 09:24:19 +00:00
|
|
|
disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
|
2018-11-29 16:35:52 +00:00
|
|
|
qemuBlockJobUnregister(job, vm);
|
2015-04-16 09:24:19 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case VIR_DOMAIN_BLOCK_JOB_LAST:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
|
|
|
|
|
2019-03-29 07:47:38 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
virStorageSourcePtr chain)
|
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockStorageSourceChainData) data = NULL;
|
2019-03-29 07:47:38 +00:00
|
|
|
|
|
|
|
if (!(data = qemuBlockStorageSourceChainDetachPrepareBlockdev(chain)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qemuBlockStorageSourceChainDetach(qemuDomainGetMonitor(vm), data);
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qemuDomainStorageSourceChainAccessRevoke(driver, vm, chain);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobGetConfigDisk:
|
|
|
|
* @vm: domain object
|
|
|
|
* @disk: disk from the running definition
|
|
|
|
* @diskChainBottom: the last element of backing chain of @disk which is relevant
|
|
|
|
*
|
|
|
|
* Finds and returns the disk corresponding to @disk in the inactive definition.
|
|
|
|
* The inactive disk must have the backing chain starting from the source until
|
|
|
|
* @@diskChainBottom identical. If @diskChainBottom is NULL the whole backing
|
|
|
|
* chains of both @disk and the persistent config definition equivalent must
|
|
|
|
* be identical.
|
|
|
|
*/
|
|
|
|
static virDomainDiskDefPtr
|
|
|
|
qemuBlockJobGetConfigDisk(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr diskChainBottom)
|
|
|
|
{
|
|
|
|
virStorageSourcePtr disksrc = NULL;
|
|
|
|
virStorageSourcePtr cfgsrc = NULL;
|
|
|
|
virDomainDiskDefPtr ret = NULL;
|
|
|
|
|
|
|
|
if (!vm->newDef || !disk)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
disksrc = disk->src;
|
|
|
|
|
2019-10-14 15:24:20 +00:00
|
|
|
if (!(ret = virDomainDiskByTarget(vm->newDef, disk->dst)))
|
2019-07-22 11:39:24 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
cfgsrc = ret->src;
|
|
|
|
|
|
|
|
while (disksrc && cfgsrc) {
|
|
|
|
if (!virStorageSourceIsSameLocation(disksrc, cfgsrc))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (diskChainBottom && diskChainBottom == disksrc)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
disksrc = disksrc->backingStore;
|
|
|
|
cfgsrc = cfgsrc->backingStore;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (disksrc || cfgsrc)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuBlockJobClearConfigChain:
|
|
|
|
* @vm: domain object
|
|
|
|
* @disk: disk object from running definition of @vm
|
|
|
|
*
|
|
|
|
* In cases when the backing chain definitions of the live disk differ from
|
|
|
|
* the definition for the next start config and the backing chain would touch
|
|
|
|
* it we'd not be able to restore the chain in the next start config properly.
|
|
|
|
*
|
|
|
|
* This function checks that the source of the running disk definition and the
|
|
|
|
* config disk definition are the same and if such it clears the backing chain
|
|
|
|
* data.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobClearConfigChain(virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
virDomainDiskDefPtr cfgdisk = NULL;
|
|
|
|
|
|
|
|
if (!vm->newDef || !disk)
|
|
|
|
return;
|
|
|
|
|
2019-10-14 15:24:20 +00:00
|
|
|
if (!(cfgdisk = virDomainDiskByTarget(vm->newDef, disk->dst)))
|
2019-07-22 11:39:24 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!virStorageSourceIsSameLocation(disk->src, cfgdisk->src))
|
|
|
|
return;
|
|
|
|
|
|
|
|
virObjectUnref(cfgdisk->src->backingStore);
|
|
|
|
cfgdisk->src->backingStore = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-06-26 13:29:34 +00:00
|
|
|
static int
|
|
|
|
qemuBlockJobProcessEventCompletedPullBitmaps(virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-10-22 17:04:18 +00:00
|
|
|
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
2020-06-26 13:29:34 +00:00
|
|
|
g_autoptr(virJSONValue) actions = NULL;
|
|
|
|
|
|
|
|
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuBlockGetBitmapMergeActions(job->disk->src,
|
|
|
|
job->data.pull.base,
|
|
|
|
job->disk->src,
|
|
|
|
NULL, NULL, NULL,
|
|
|
|
&actions,
|
|
|
|
blockNamedNodeData) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!actions)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
qemuMonitorTransaction(priv->mon, &actions);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobProcessEventCompletedPull:
|
|
|
|
* @driver: qemu driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @job: job data
|
|
|
|
* @asyncJob: qemu asynchronous job type (for monitor interaction)
|
|
|
|
*
|
|
|
|
* This function executes the finalizing steps after a successful block pull job
|
|
|
|
* (block-stream in qemu terminology. The pull job copies all the data from the
|
|
|
|
* images in the backing chain up to the 'base' image. The 'base' image becomes
|
|
|
|
* the backing store of the active top level image. If 'base' was not used
|
|
|
|
* everything is pulled into the top level image and the top level image will
|
|
|
|
* cease to have backing store. All intermediate images between the active image
|
|
|
|
* and base image are no longer required and can be unplugged.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventCompletedPull(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
virStorageSourcePtr baseparent = NULL;
|
|
|
|
virDomainDiskDefPtr cfgdisk = NULL;
|
|
|
|
virStorageSourcePtr cfgbase = NULL;
|
|
|
|
virStorageSourcePtr cfgbaseparent = NULL;
|
|
|
|
virStorageSourcePtr n;
|
|
|
|
virStorageSourcePtr tmp;
|
|
|
|
|
|
|
|
VIR_DEBUG("pull job '%s' on VM '%s' completed", job->name, vm->def->name);
|
|
|
|
|
|
|
|
/* if the job isn't associated with a disk there's nothing to do */
|
|
|
|
if (!job->disk)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((cfgdisk = qemuBlockJobGetConfigDisk(vm, job->disk, job->data.pull.base)))
|
|
|
|
cfgbase = cfgdisk->src->backingStore;
|
|
|
|
|
|
|
|
if (!cfgdisk)
|
|
|
|
qemuBlockJobClearConfigChain(vm, job->disk);
|
|
|
|
|
2020-06-26 13:29:34 +00:00
|
|
|
qemuBlockJobProcessEventCompletedPullBitmaps(vm, job, asyncJob);
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
/* when pulling if 'base' is right below the top image we don't have to modify it */
|
|
|
|
if (job->disk->src->backingStore == job->data.pull.base)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (job->data.pull.base) {
|
|
|
|
for (n = job->disk->src->backingStore; n && n != job->data.pull.base; n = n->backingStore) {
|
|
|
|
/* find the image on top of 'base' */
|
|
|
|
|
|
|
|
if (cfgbase) {
|
|
|
|
cfgbaseparent = cfgbase;
|
|
|
|
cfgbase = cfgbase->backingStore;
|
|
|
|
}
|
|
|
|
|
|
|
|
baseparent = n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = job->disk->src->backingStore;
|
|
|
|
job->disk->src->backingStore = job->data.pull.base;
|
|
|
|
if (baseparent)
|
|
|
|
baseparent->backingStore = NULL;
|
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, tmp);
|
|
|
|
virObjectUnref(tmp);
|
|
|
|
|
|
|
|
if (cfgdisk) {
|
|
|
|
tmp = cfgdisk->src->backingStore;
|
|
|
|
cfgdisk->src->backingStore = cfgbase;
|
|
|
|
if (cfgbaseparent)
|
|
|
|
cfgbaseparent->backingStore = NULL;
|
|
|
|
virObjectUnref(tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-10 16:25:39 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobDeleteImages:
|
|
|
|
* @driver: qemu driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @disk: disk object that the chain to be deleted is associated with
|
|
|
|
* @top: top snapshot of the chain to be deleted
|
|
|
|
*
|
|
|
|
* Helper for removing snapshot images. Intended for callers like
|
|
|
|
* qemuBlockJobProcessEventCompletedCommit() and
|
|
|
|
* qemuBlockJobProcessEventCompletedActiveCommit() as it relies on adjustments
|
|
|
|
* these functions perform on the 'backingStore' chain to function correctly.
|
|
|
|
*
|
|
|
|
* TODO look into removing backing store for non-local snapshots too
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobDeleteImages(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
virDomainDiskDefPtr disk,
|
|
|
|
virStorageSourcePtr top)
|
|
|
|
{
|
|
|
|
virStorageSourcePtr p = top;
|
|
|
|
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
|
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
|
|
|
|
for (; p != NULL; p = p->backingStore) {
|
|
|
|
if (virStorageSourceGetActualType(p) == VIR_STORAGE_TYPE_FILE) {
|
|
|
|
|
|
|
|
qemuDomainGetImageIds(cfg, vm, p, disk->src, &uid, &gid);
|
|
|
|
|
|
|
|
if (virFileRemove(p->path, uid, gid) < 0) {
|
|
|
|
VIR_WARN("Unable to remove snapshot image file '%s' (%s)",
|
|
|
|
p->path, g_strerror(errno));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-03 14:46:15 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuBlockJobProcessEventCompletedCommitBitmaps:
|
|
|
|
*
|
|
|
|
* Handles the bitmap changes after commit. This returns -1 on monitor failures.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
qemuBlockJobProcessEventCompletedCommitBitmaps(virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-10-22 17:04:18 +00:00
|
|
|
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
2020-03-03 14:46:15 +00:00
|
|
|
g_autoptr(virJSONValue) actions = NULL;
|
2020-04-17 07:51:40 +00:00
|
|
|
bool active = job->type == QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT;
|
2020-03-03 14:46:15 +00:00
|
|
|
|
2020-07-16 13:54:46 +00:00
|
|
|
if (!active &&
|
|
|
|
!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
|
2020-03-03 14:46:15 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuBlockBitmapsHandleCommitFinish(job->data.commit.top,
|
|
|
|
job->data.commit.base,
|
2020-04-17 07:51:40 +00:00
|
|
|
active,
|
2020-03-03 14:46:15 +00:00
|
|
|
blockNamedNodeData,
|
2020-04-17 07:51:40 +00:00
|
|
|
&actions) < 0)
|
2020-03-03 14:46:15 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!actions)
|
|
|
|
return 0;
|
|
|
|
|
2020-04-17 07:51:40 +00:00
|
|
|
if (!active) {
|
|
|
|
if (qemuBlockReopenReadWrite(vm, job->data.commit.base, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2020-03-03 14:46:15 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
qemuMonitorTransaction(priv->mon, &actions);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2020-04-17 07:51:40 +00:00
|
|
|
if (!active) {
|
|
|
|
if (qemuBlockReopenReadOnly(vm, job->data.commit.base, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
2020-03-03 14:46:15 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
/**
|
|
|
|
* qemuBlockJobProcessEventCompletedCommit:
|
|
|
|
* @driver: qemu driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @job: job data
|
|
|
|
* @asyncJob: qemu asynchronous job type (for monitor interaction)
|
|
|
|
*
|
|
|
|
* This function executes the finalizing steps after a successful block commit
|
|
|
|
* job. The commit job moves the blocks from backing chain images starting from
|
|
|
|
* 'top' into the 'base' image. The overlay of the 'top' image ('topparent')
|
|
|
|
* then directly references the 'base' image. All intermediate images can be
|
|
|
|
* removed/deleted.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventCompletedCommit(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
virStorageSourcePtr baseparent = NULL;
|
|
|
|
virDomainDiskDefPtr cfgdisk = NULL;
|
|
|
|
virStorageSourcePtr cfgnext = NULL;
|
|
|
|
virStorageSourcePtr cfgtopparent = NULL;
|
|
|
|
virStorageSourcePtr cfgtop = NULL;
|
|
|
|
virStorageSourcePtr cfgbase = NULL;
|
|
|
|
virStorageSourcePtr cfgbaseparent = NULL;
|
|
|
|
virStorageSourcePtr n;
|
|
|
|
|
|
|
|
VIR_DEBUG("commit job '%s' on VM '%s' completed", job->name, vm->def->name);
|
|
|
|
|
|
|
|
/* if the job isn't associated with a disk there's nothing to do */
|
|
|
|
if (!job->disk)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((cfgdisk = qemuBlockJobGetConfigDisk(vm, job->disk, job->data.commit.base)))
|
|
|
|
cfgnext = cfgdisk->src;
|
|
|
|
|
|
|
|
if (!cfgdisk)
|
|
|
|
qemuBlockJobClearConfigChain(vm, job->disk);
|
|
|
|
|
|
|
|
for (n = job->disk->src; n && n != job->data.commit.base; n = n->backingStore) {
|
|
|
|
if (cfgnext) {
|
|
|
|
if (n == job->data.commit.topparent)
|
|
|
|
cfgtopparent = cfgnext;
|
|
|
|
|
|
|
|
if (n == job->data.commit.top)
|
|
|
|
cfgtop = cfgnext;
|
|
|
|
|
|
|
|
cfgbaseparent = cfgnext;
|
|
|
|
cfgnext = cfgnext->backingStore;
|
|
|
|
}
|
|
|
|
baseparent = n;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!n)
|
|
|
|
return;
|
|
|
|
|
2020-03-03 14:46:15 +00:00
|
|
|
if (qemuBlockJobProcessEventCompletedCommitBitmaps(vm, job, asyncJob) < 0)
|
|
|
|
return;
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
/* revert access to images */
|
2020-02-27 10:20:51 +00:00
|
|
|
qemuDomainStorageSourceAccessAllow(driver, vm, job->data.commit.base,
|
|
|
|
true, false, false);
|
2019-07-22 11:39:24 +00:00
|
|
|
if (job->data.commit.topparent != job->disk->src)
|
2020-02-27 10:20:51 +00:00
|
|
|
qemuDomainStorageSourceAccessAllow(driver, vm, job->data.commit.topparent,
|
|
|
|
true, false, true);
|
2019-07-22 11:39:24 +00:00
|
|
|
|
|
|
|
baseparent->backingStore = NULL;
|
|
|
|
job->data.commit.topparent->backingStore = job->data.commit.base;
|
|
|
|
|
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->data.commit.top);
|
2019-12-10 16:25:39 +00:00
|
|
|
|
|
|
|
if (job->data.commit.deleteCommittedImages)
|
|
|
|
qemuBlockJobDeleteImages(driver, vm, job->disk, job->data.commit.top);
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
virObjectUnref(job->data.commit.top);
|
|
|
|
job->data.commit.top = NULL;
|
|
|
|
|
|
|
|
if (cfgbaseparent) {
|
|
|
|
cfgbase = cfgbaseparent->backingStore;
|
|
|
|
cfgbaseparent->backingStore = NULL;
|
|
|
|
|
|
|
|
if (cfgtopparent)
|
|
|
|
cfgtopparent->backingStore = cfgbase;
|
|
|
|
else
|
|
|
|
cfgdisk->src = cfgbase;
|
|
|
|
|
|
|
|
virObjectUnref(cfgtop);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* qemuBlockJobProcessEventCompletedActiveCommit:
|
|
|
|
* @driver: qemu driver object
|
|
|
|
* @vm: domain object
|
|
|
|
* @job: job data
|
|
|
|
* @asyncJob: qemu asynchronous job type (for monitor interaction)
|
|
|
|
*
|
|
|
|
* This function executes the finalizing steps after a successful active layer
|
|
|
|
* block commit job. The commit job moves the blocks from backing chain images
|
|
|
|
* starting from the active disk source image into the 'base' image. The disk
|
|
|
|
* source then changes to the 'base' image. All intermediate images can be
|
|
|
|
* removed/deleted.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
virStorageSourcePtr baseparent = NULL;
|
|
|
|
virDomainDiskDefPtr cfgdisk = NULL;
|
|
|
|
virStorageSourcePtr cfgnext = NULL;
|
|
|
|
virStorageSourcePtr cfgtop = NULL;
|
|
|
|
virStorageSourcePtr cfgbase = NULL;
|
|
|
|
virStorageSourcePtr cfgbaseparent = NULL;
|
|
|
|
virStorageSourcePtr n;
|
|
|
|
|
|
|
|
VIR_DEBUG("active commit job '%s' on VM '%s' completed", job->name, vm->def->name);
|
|
|
|
|
|
|
|
/* if the job isn't associated with a disk there's nothing to do */
|
|
|
|
if (!job->disk)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((cfgdisk = qemuBlockJobGetConfigDisk(vm, job->disk, job->data.commit.base)))
|
|
|
|
cfgnext = cfgdisk->src;
|
|
|
|
|
|
|
|
for (n = job->disk->src; n && n != job->data.commit.base; n = n->backingStore) {
|
|
|
|
if (cfgnext) {
|
|
|
|
if (n == job->data.commit.top)
|
|
|
|
cfgtop = cfgnext;
|
|
|
|
|
|
|
|
cfgbaseparent = cfgnext;
|
|
|
|
cfgnext = cfgnext->backingStore;
|
|
|
|
}
|
|
|
|
baseparent = n;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!n)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!cfgdisk) {
|
|
|
|
/* in case when the config disk chain didn't match but the disk top seems
|
|
|
|
* to be identical we need to modify the disk source since the active
|
|
|
|
* commit makes the top level image invalid.
|
|
|
|
*/
|
|
|
|
qemuBlockJobRewriteConfigDiskSource(vm, job->disk, job->data.commit.base);
|
|
|
|
} else {
|
|
|
|
cfgbase = cfgbaseparent->backingStore;
|
|
|
|
cfgbaseparent->backingStore = NULL;
|
|
|
|
cfgdisk->src = cfgbase;
|
2019-11-08 15:38:08 +00:00
|
|
|
cfgdisk->src->readonly = cfgtop->readonly;
|
2019-07-22 11:39:24 +00:00
|
|
|
virObjectUnref(cfgtop);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Move security driver metadata */
|
|
|
|
if (qemuSecurityMoveImageMetadata(driver, vm, job->disk->src, job->data.commit.base) < 0)
|
|
|
|
VIR_WARN("Unable to move disk metadata on vm %s", vm->def->name);
|
|
|
|
|
|
|
|
baseparent->backingStore = NULL;
|
|
|
|
job->disk->src = job->data.commit.base;
|
2019-11-08 15:38:08 +00:00
|
|
|
job->disk->src->readonly = job->data.commit.top->readonly;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
2020-04-17 07:51:40 +00:00
|
|
|
if (qemuBlockJobProcessEventCompletedCommitBitmaps(vm, job, asyncJob) < 0)
|
|
|
|
return;
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->data.commit.top);
|
2019-12-10 16:25:39 +00:00
|
|
|
|
|
|
|
if (job->data.commit.deleteCommittedImages)
|
|
|
|
qemuBlockJobDeleteImages(driver, vm, job->disk, job->data.commit.top);
|
|
|
|
|
2019-07-22 11:39:24 +00:00
|
|
|
virObjectUnref(job->data.commit.top);
|
|
|
|
job->data.commit.top = NULL;
|
|
|
|
/* the mirror element does not serve functional purpose for the commit job */
|
|
|
|
virObjectUnref(job->disk->mirror);
|
|
|
|
job->disk->mirror = NULL;
|
|
|
|
}
|
|
|
|
|
2019-06-10 16:13:09 +00:00
|
|
|
|
2020-05-29 08:34:11 +00:00
|
|
|
static int
|
|
|
|
qemuBlockJobProcessEventCompletedCopyBitmaps(virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-10-22 17:04:18 +00:00
|
|
|
g_autoptr(GHashTable) blockNamedNodeData = NULL;
|
2020-05-29 08:34:11 +00:00
|
|
|
g_autoptr(virJSONValue) actions = NULL;
|
|
|
|
bool shallow = job->jobflags & VIR_DOMAIN_BLOCK_COPY_SHALLOW;
|
|
|
|
|
|
|
|
if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!(blockNamedNodeData = qemuBlockGetNamedNodeData(vm, asyncJob)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (qemuBlockBitmapsHandleBlockcopy(job->disk->src,
|
|
|
|
job->disk->mirror,
|
|
|
|
blockNamedNodeData,
|
|
|
|
shallow,
|
|
|
|
&actions) < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!actions)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
qemuMonitorTransaction(priv->mon, &actions);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-22 11:59:01 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventConcludedCopyPivot(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2020-05-06 16:32:09 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2019-07-22 11:59:01 +00:00
|
|
|
VIR_DEBUG("copy job '%s' on VM '%s' pivoted", job->name, vm->def->name);
|
|
|
|
|
2019-12-06 16:46:29 +00:00
|
|
|
/* mirror may be NULL for copy job corresponding to migration */
|
|
|
|
if (!job->disk ||
|
|
|
|
!job->disk->mirror)
|
2019-07-22 11:59:01 +00:00
|
|
|
return;
|
|
|
|
|
2020-05-29 08:34:11 +00:00
|
|
|
qemuBlockJobProcessEventCompletedCopyBitmaps(vm, job, asyncJob);
|
|
|
|
|
2019-07-22 11:59:01 +00:00
|
|
|
/* for shallow copy without reusing external image the user can either not
|
|
|
|
* specify the backing chain in which case libvirt will open and use the
|
|
|
|
* chain the user provided or not specify a chain in which case we'll
|
|
|
|
* inherit the rest of the chain */
|
|
|
|
if (job->data.copy.shallownew &&
|
|
|
|
!virStorageSourceIsBacking(job->disk->mirror->backingStore))
|
2019-10-16 11:43:18 +00:00
|
|
|
job->disk->mirror->backingStore = g_steal_pointer(&job->disk->src->backingStore);
|
2019-07-22 11:59:01 +00:00
|
|
|
|
2020-05-06 16:32:09 +00:00
|
|
|
if (job->disk->src->readonly &&
|
|
|
|
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV_REOPEN))
|
|
|
|
ignore_value(qemuBlockReopenReadOnly(vm, job->disk->mirror, asyncJob));
|
|
|
|
|
2019-07-22 11:59:01 +00:00
|
|
|
qemuBlockJobRewriteConfigDiskSource(vm, job->disk, job->disk->mirror);
|
|
|
|
|
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->disk->src);
|
|
|
|
virObjectUnref(job->disk->src);
|
2019-10-16 11:43:18 +00:00
|
|
|
job->disk->src = g_steal_pointer(&job->disk->mirror);
|
2019-07-22 11:59:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventConcludedCopyAbort(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
VIR_DEBUG("copy job '%s' on VM '%s' aborted", job->name, vm->def->name);
|
|
|
|
|
2019-12-06 16:46:29 +00:00
|
|
|
/* mirror may be NULL for copy job corresponding to migration */
|
|
|
|
if (!job->disk ||
|
|
|
|
!job->disk->mirror)
|
2019-07-22 11:59:01 +00:00
|
|
|
return;
|
|
|
|
|
2020-05-29 08:34:11 +00:00
|
|
|
/* activeWrite bitmap is removed automatically here */
|
2019-07-22 11:59:01 +00:00
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob, job->disk->mirror);
|
|
|
|
virObjectUnref(job->disk->mirror);
|
|
|
|
job->disk->mirror = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-30 12:21:24 +00:00
|
|
|
static void
|
2019-08-30 12:34:12 +00:00
|
|
|
qemuBlockJobProcessEventFailedActiveCommit(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2020-07-16 13:19:25 +00:00
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
2019-08-30 12:21:24 +00:00
|
|
|
{
|
2020-07-16 13:19:25 +00:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2020-04-17 07:51:40 +00:00
|
|
|
g_autoptr(virJSONValue) actions = virJSONValueNewArray();
|
2019-08-30 12:34:12 +00:00
|
|
|
virDomainDiskDefPtr disk = job->disk;
|
|
|
|
|
2019-08-30 12:21:24 +00:00
|
|
|
VIR_DEBUG("active commit job '%s' on VM '%s' failed", job->name, vm->def->name);
|
|
|
|
|
2019-08-30 12:34:12 +00:00
|
|
|
if (!disk)
|
2019-08-30 12:21:24 +00:00
|
|
|
return;
|
|
|
|
|
2020-04-17 07:51:40 +00:00
|
|
|
ignore_value(qemuMonitorTransactionBitmapRemove(actions, disk->mirror->nodeformat,
|
|
|
|
"libvirt-tmp-activewrite"));
|
|
|
|
|
2020-07-16 13:19:25 +00:00
|
|
|
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qemuMonitorTransaction(priv->mon, &actions);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0)
|
|
|
|
return;
|
2020-04-17 07:51:40 +00:00
|
|
|
|
2019-08-30 12:34:12 +00:00
|
|
|
/* Ideally, we would make the backing chain read only again (yes, SELinux
|
|
|
|
* can do that using different labels). But that is not implemented yet and
|
|
|
|
* not leaking security driver metadata is more important. */
|
2019-11-19 07:43:58 +00:00
|
|
|
qemuBlockRemoveImageMetadata(driver, vm, disk->dst, disk->mirror);
|
2019-08-30 12:34:12 +00:00
|
|
|
|
|
|
|
virObjectUnref(disk->mirror);
|
|
|
|
disk->mirror = NULL;
|
2019-08-30 12:21:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-06-10 16:13:09 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventConcludedCreate(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
2019-10-15 12:47:50 +00:00
|
|
|
g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
|
2019-06-10 16:13:09 +00:00
|
|
|
|
|
|
|
/* if there is a synchronous client waiting for this job that means that
|
|
|
|
* it will handle further hotplug of the created volume and also that
|
|
|
|
* the 'chain' which was registered is under their control */
|
|
|
|
if (job->synchronous) {
|
|
|
|
virObjectUnref(job->chain);
|
|
|
|
job->chain = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!job->data.create.src)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(backend = qemuBlockStorageSourceDetachPrepare(job->data.create.src, NULL)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* the format node part was not attached yet, so we don't need to detach it */
|
|
|
|
backend->formatAttached = false;
|
|
|
|
if (job->data.create.storage) {
|
|
|
|
backend->storageAttached = false;
|
2020-02-10 13:37:14 +00:00
|
|
|
backend->storageSliceAttached = false;
|
2019-06-10 16:13:09 +00:00
|
|
|
VIR_FREE(backend->encryptsecretAlias);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.create.src);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-10-04 14:28:47 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobProcessEventConcludedBackup(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
qemuBlockjobState newstate,
|
|
|
|
unsigned long long progressCurrent,
|
|
|
|
unsigned long long progressTotal)
|
|
|
|
{
|
|
|
|
g_autoptr(qemuBlockStorageSourceAttachData) backend = NULL;
|
|
|
|
g_autoptr(virJSONValue) actions = NULL;
|
|
|
|
|
2020-04-16 09:23:07 +00:00
|
|
|
qemuBackupNotifyBlockjobEnd(vm, job->disk, newstate, job->errmsg,
|
2019-12-20 10:15:47 +00:00
|
|
|
progressCurrent, progressTotal, asyncJob);
|
2019-10-04 14:28:47 +00:00
|
|
|
|
|
|
|
if (job->data.backup.store &&
|
|
|
|
!(backend = qemuBlockStorageSourceDetachPrepare(job->data.backup.store, NULL)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (job->data.backup.bitmap) {
|
2020-01-31 07:18:36 +00:00
|
|
|
actions = virJSONValueNewArray();
|
2019-10-04 14:28:47 +00:00
|
|
|
|
|
|
|
if (qemuMonitorTransactionBitmapRemove(actions,
|
|
|
|
job->disk->src->nodeformat,
|
|
|
|
job->data.backup.bitmap) < 0)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (backend)
|
|
|
|
qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend);
|
|
|
|
|
|
|
|
if (actions)
|
|
|
|
qemuMonitorTransaction(qemuDomainGetMonitor(vm), &actions);
|
|
|
|
|
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
|
|
|
return;
|
|
|
|
|
2019-12-20 10:00:57 +00:00
|
|
|
if (job->data.backup.store)
|
2019-10-04 14:28:47 +00:00
|
|
|
qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.backup.store);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-12-07 09:21:22 +00:00
|
|
|
static void
|
|
|
|
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
2019-10-04 14:28:47 +00:00
|
|
|
qemuDomainAsyncJob asyncJob,
|
|
|
|
unsigned long long progressCurrent,
|
|
|
|
unsigned long long progressTotal)
|
2018-12-07 09:21:22 +00:00
|
|
|
{
|
2019-10-04 12:01:20 +00:00
|
|
|
bool success = job->newstate == QEMU_BLOCKJOB_STATE_COMPLETED;
|
|
|
|
|
|
|
|
switch ((qemuBlockJobType) job->type) {
|
|
|
|
case QEMU_BLOCKJOB_TYPE_PULL:
|
|
|
|
if (success)
|
2019-07-22 11:39:24 +00:00
|
|
|
qemuBlockJobProcessEventCompletedPull(driver, vm, job, asyncJob);
|
2019-10-04 12:01:20 +00:00
|
|
|
break;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
2019-10-04 12:01:20 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_COMMIT:
|
|
|
|
if (success)
|
2019-07-22 11:39:24 +00:00
|
|
|
qemuBlockJobProcessEventCompletedCommit(driver, vm, job, asyncJob);
|
2018-12-07 09:21:22 +00:00
|
|
|
break;
|
|
|
|
|
2019-10-04 12:01:20 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT:
|
2020-03-03 14:58:09 +00:00
|
|
|
if (success) {
|
2019-10-04 12:01:20 +00:00
|
|
|
qemuBlockJobProcessEventCompletedActiveCommit(driver, vm, job, asyncJob);
|
2020-03-03 14:58:09 +00:00
|
|
|
} else {
|
2020-07-16 13:19:25 +00:00
|
|
|
qemuBlockJobProcessEventFailedActiveCommit(driver, vm, job, asyncJob);
|
2020-03-03 14:58:09 +00:00
|
|
|
}
|
2019-10-04 12:01:20 +00:00
|
|
|
break;
|
2019-07-22 11:39:24 +00:00
|
|
|
|
2019-10-04 12:01:20 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_CREATE:
|
|
|
|
qemuBlockJobProcessEventConcludedCreate(driver, vm, job, asyncJob);
|
|
|
|
break;
|
2019-06-10 16:13:09 +00:00
|
|
|
|
2019-10-04 12:01:20 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_COPY:
|
|
|
|
if (job->state == QEMU_BLOCKJOB_STATE_PIVOTING && success)
|
|
|
|
qemuBlockJobProcessEventConcludedCopyPivot(driver, vm, job, asyncJob);
|
|
|
|
else
|
2019-07-22 11:59:01 +00:00
|
|
|
qemuBlockJobProcessEventConcludedCopyAbort(driver, vm, job, asyncJob);
|
2018-12-07 09:21:22 +00:00
|
|
|
break;
|
|
|
|
|
2019-10-18 13:10:33 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_BACKUP:
|
2019-10-04 14:28:47 +00:00
|
|
|
qemuBlockJobProcessEventConcludedBackup(driver, vm, job, asyncJob,
|
|
|
|
job->newstate, progressCurrent,
|
|
|
|
progressTotal);
|
2019-10-18 13:10:33 +00:00
|
|
|
break;
|
2019-11-26 13:55:05 +00:00
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_TYPE_BROKEN:
|
2019-10-04 12:01:20 +00:00
|
|
|
case QEMU_BLOCKJOB_TYPE_NONE:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
|
|
|
case QEMU_BLOCKJOB_TYPE_LAST:
|
2018-12-07 09:21:22 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuBlockJobEmitEvents(driver, vm, job->disk, job->type, job->newstate);
|
|
|
|
job->state = job->newstate;
|
|
|
|
job->newstate = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuBlockJobEventProcessConcluded(qemuBlockJobDataPtr job,
|
|
|
|
virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
{
|
|
|
|
qemuMonitorJobInfoPtr *jobinfo = NULL;
|
|
|
|
size_t njobinfo = 0;
|
|
|
|
size_t i;
|
|
|
|
bool refreshed = false;
|
2019-10-04 14:28:47 +00:00
|
|
|
unsigned long long progressCurrent = 0;
|
|
|
|
unsigned long long progressTotal = 0;
|
2018-12-07 09:21:22 +00:00
|
|
|
|
|
|
|
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* we need to fetch the error state as the event does not propagate it */
|
|
|
|
if (job->newstate == QEMU_BLOCKJOB_STATE_CONCLUDED &&
|
2019-11-26 13:29:42 +00:00
|
|
|
qemuMonitorGetJobInfo(qemuDomainGetMonitor(vm), &jobinfo, &njobinfo) == 0) {
|
2018-12-07 09:21:22 +00:00
|
|
|
|
|
|
|
for (i = 0; i < njobinfo; i++) {
|
|
|
|
if (STRNEQ_NULLABLE(job->name, jobinfo[i]->id))
|
|
|
|
continue;
|
|
|
|
|
2019-10-04 14:28:47 +00:00
|
|
|
progressCurrent = jobinfo[i]->progressCurrent;
|
|
|
|
progressTotal = jobinfo[i]->progressTotal;
|
|
|
|
|
2019-10-20 11:49:46 +00:00
|
|
|
job->errmsg = g_strdup(jobinfo[i]->error);
|
2018-12-07 09:21:22 +00:00
|
|
|
|
|
|
|
if (job->errmsg)
|
|
|
|
job->newstate = QEMU_BLOCKJOB_STATE_FAILED;
|
|
|
|
else
|
|
|
|
job->newstate = QEMU_BLOCKJOB_STATE_COMPLETED;
|
|
|
|
|
|
|
|
refreshed = true;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-11-26 13:29:42 +00:00
|
|
|
if (i == njobinfo)
|
2018-12-07 09:21:22 +00:00
|
|
|
VIR_WARN("failed to refresh job '%s'", job->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dismiss job in qemu */
|
2019-11-26 13:29:42 +00:00
|
|
|
ignore_value(qemuMonitorJobDismiss(qemuDomainGetMonitor(vm), job->name));
|
2018-12-07 09:21:22 +00:00
|
|
|
|
2019-11-26 13:29:42 +00:00
|
|
|
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
2019-11-26 12:16:36 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
2019-11-26 08:28:22 +00:00
|
|
|
if ((job->newstate == QEMU_BLOCKJOB_STATE_COMPLETED ||
|
|
|
|
job->newstate == QEMU_BLOCKJOB_STATE_FAILED) &&
|
2019-02-12 13:50:24 +00:00
|
|
|
job->state == QEMU_BLOCKJOB_STATE_ABORTING)
|
|
|
|
job->newstate = QEMU_BLOCKJOB_STATE_CANCELLED;
|
|
|
|
|
2018-12-07 09:21:22 +00:00
|
|
|
if (refreshed)
|
|
|
|
qemuDomainSaveStatus(vm);
|
|
|
|
|
|
|
|
VIR_DEBUG("handling job '%s' state '%d' newstate '%d'", job->name, job->state, job->newstate);
|
|
|
|
|
2019-10-04 14:28:47 +00:00
|
|
|
qemuBlockJobEventProcessConcludedTransition(job, driver, vm, asyncJob,
|
|
|
|
progressCurrent, progressTotal);
|
2018-12-07 09:21:22 +00:00
|
|
|
|
2019-03-29 07:47:38 +00:00
|
|
|
/* unplug the backing chains in case the job inherited them */
|
|
|
|
if (!job->disk) {
|
|
|
|
if (job->chain)
|
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob,
|
|
|
|
job->chain);
|
|
|
|
if (job->mirrorChain)
|
|
|
|
qemuBlockJobEventProcessConcludedRemoveChain(driver, vm, asyncJob,
|
|
|
|
job->mirrorChain);
|
|
|
|
}
|
|
|
|
|
2018-12-07 09:21:22 +00:00
|
|
|
cleanup:
|
2019-11-26 13:29:42 +00:00
|
|
|
qemuBlockJobUnregister(job, vm);
|
|
|
|
qemuDomainSaveConfig(vm);
|
2018-12-07 09:21:22 +00:00
|
|
|
|
|
|
|
for (i = 0; i < njobinfo; i++)
|
|
|
|
qemuMonitorJobInfoFree(jobinfo[i]);
|
|
|
|
VIR_FREE(jobinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
qemuBlockJobEventProcess(virQEMUDriverPtr driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
qemuDomainAsyncJob asyncJob)
|
|
|
|
|
|
|
|
{
|
|
|
|
switch ((qemuBlockjobState) job->newstate) {
|
|
|
|
case QEMU_BLOCKJOB_STATE_COMPLETED:
|
|
|
|
case QEMU_BLOCKJOB_STATE_FAILED:
|
|
|
|
case QEMU_BLOCKJOB_STATE_CANCELLED:
|
|
|
|
case QEMU_BLOCKJOB_STATE_CONCLUDED:
|
2021-01-11 09:42:15 +00:00
|
|
|
if (job->disk) {
|
|
|
|
job->disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
|
|
|
|
job->disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN;
|
|
|
|
}
|
2018-12-07 09:21:22 +00:00
|
|
|
qemuBlockJobEventProcessConcluded(job, driver, vm, asyncJob);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_STATE_READY:
|
2019-12-06 16:46:29 +00:00
|
|
|
/* mirror may be NULL for copy job corresponding to migration */
|
|
|
|
if (job->disk) {
|
2019-07-24 20:44:11 +00:00
|
|
|
job->disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_READY;
|
2018-12-07 09:21:22 +00:00
|
|
|
qemuBlockJobEmitEvents(driver, vm, job->disk, job->type, job->newstate);
|
|
|
|
}
|
|
|
|
job->state = job->newstate;
|
|
|
|
job->newstate = -1;
|
|
|
|
qemuDomainSaveStatus(vm);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_BLOCKJOB_STATE_NEW:
|
|
|
|
case QEMU_BLOCKJOB_STATE_RUNNING:
|
|
|
|
case QEMU_BLOCKJOB_STATE_LAST:
|
2019-02-11 13:36:24 +00:00
|
|
|
/* these are never processed as 'newstate' */
|
|
|
|
case QEMU_BLOCKJOB_STATE_ABORTING:
|
|
|
|
case QEMU_BLOCKJOB_STATE_PIVOTING:
|
2018-12-07 09:21:22 +00:00
|
|
|
default:
|
|
|
|
job->newstate = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-10-16 09:54:02 +00:00
|
|
|
/**
|
2018-11-27 12:55:28 +00:00
|
|
|
* qemuBlockJobUpdate:
|
2018-10-16 09:54:02 +00:00
|
|
|
* @vm: domain
|
2018-11-27 12:55:28 +00:00
|
|
|
* @job: job data
|
|
|
|
* @asyncJob: current qemu asynchronous job type
|
2018-10-16 09:54:02 +00:00
|
|
|
*
|
|
|
|
* Update disk's mirror state in response to a block job event stored in
|
|
|
|
* blockJobStatus by qemuProcessHandleBlockJob event handler.
|
|
|
|
*/
|
2020-03-26 12:22:18 +00:00
|
|
|
void
|
2018-11-27 12:55:28 +00:00
|
|
|
qemuBlockJobUpdate(virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
int asyncJob)
|
2018-10-16 09:54:02 +00:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
|
2018-11-26 14:29:55 +00:00
|
|
|
if (job->newstate == -1)
|
2020-03-26 12:22:18 +00:00
|
|
|
return;
|
2018-11-26 14:29:55 +00:00
|
|
|
|
2018-12-07 09:21:22 +00:00
|
|
|
if (virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_BLOCKDEV))
|
|
|
|
qemuBlockJobEventProcess(priv->driver, vm, job, asyncJob);
|
|
|
|
else
|
|
|
|
qemuBlockJobEventProcessLegacy(priv->driver, vm, job, asyncJob);
|
2018-10-16 09:54:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
/**
|
2018-10-19 07:14:54 +00:00
|
|
|
* qemuBlockJobSyncBegin:
|
|
|
|
* @job: block job data
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
* @disk: domain disk
|
|
|
|
*
|
|
|
|
* Begin a new synchronous block job for @disk. The synchronous
|
2018-11-27 17:06:28 +00:00
|
|
|
* block job is ended by a call to qemuBlockJobSyncEnd, or by
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
* the guest quitting.
|
|
|
|
*
|
|
|
|
* During a synchronous block job, a block job event for @disk
|
|
|
|
* will not be processed asynchronously. Instead, it will be
|
2018-11-27 17:06:28 +00:00
|
|
|
* processed only when qemuBlockJobUpdate or qemuBlockJobSyncEnd
|
2015-05-14 12:28:12 +00:00
|
|
|
* is called.
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
*/
|
|
|
|
void
|
2018-10-19 07:14:54 +00:00
|
|
|
qemuBlockJobSyncBegin(qemuBlockJobDataPtr job)
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
{
|
2018-10-19 07:14:54 +00:00
|
|
|
const char *diskdst = NULL;
|
2015-05-13 09:20:36 +00:00
|
|
|
|
2018-10-19 07:14:54 +00:00
|
|
|
if (job->disk)
|
|
|
|
diskdst = job->disk->dst;
|
|
|
|
|
|
|
|
VIR_DEBUG("disk=%s", NULLSTR(diskdst));
|
2018-10-17 06:57:08 +00:00
|
|
|
job->synchronous = true;
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2018-11-27 17:06:28 +00:00
|
|
|
* qemuBlockJobSyncEnd:
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
* @vm: domain
|
|
|
|
* @disk: domain disk
|
|
|
|
*
|
|
|
|
* End a synchronous block job for @disk. Any pending block job event
|
2018-11-22 17:34:42 +00:00
|
|
|
* for the disk is processed. Note that it's not necessary to call this function
|
|
|
|
* in case the block job was not started successfully if
|
|
|
|
* qemuBlockJobStartupFinalize will be called.
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
*/
|
|
|
|
void
|
2018-11-27 17:06:28 +00:00
|
|
|
qemuBlockJobSyncEnd(virDomainObjPtr vm,
|
|
|
|
qemuBlockJobDataPtr job,
|
|
|
|
int asyncJob)
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
{
|
2018-11-27 17:06:28 +00:00
|
|
|
const char *diskdst = NULL;
|
2018-11-27 12:55:28 +00:00
|
|
|
|
2018-11-27 17:06:28 +00:00
|
|
|
if (job->disk)
|
|
|
|
diskdst = job->disk->dst;
|
2018-11-27 12:55:28 +00:00
|
|
|
|
2018-11-27 17:06:28 +00:00
|
|
|
VIR_DEBUG("disk=%s", NULLSTR(diskdst));
|
2018-11-27 12:55:28 +00:00
|
|
|
job->synchronous = false;
|
2019-07-22 16:15:38 +00:00
|
|
|
qemuBlockJobUpdate(vm, job, asyncJob);
|
qemuBlockJobSync*: introduce sync block job helpers
qemuBlockJobSyncBegin and qemuBlockJobSyncEnd delimit a region of code
where block job events are processed "synchronously".
qemuBlockJobSyncWait and qemuBlockJobSyncWaitWithTimeout wait for an
event generated by a block job.
The Wait* functions may be called multiple times while the synchronous
block job is active. Any pending block job event will be processed by
only when Wait* or End is called. disk->blockJobStatus is reset by
these functions, so if it is needed a pointer to a
virConnectDomainEventBlockJobStatus variable should be passed as the
last argument. It is safe to pass NULL if you do not care about the
block job status.
All functions assume the VM object is locked. The Wait* functions will
unlock the object for as long as they are waiting. They will return -1
and report an error if the domain exits before an event is received.
Typical use is as follows:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
qemuBlockJobSyncBegin(disk);
... start block job ...
if (qemuBlockJobSyncWait(driver, vm, disk, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
... possibly start other block jobs
or wait for further events ...
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
To perform other tasks periodically while waiting for an event:
virQEMUDriverPtr driver;
virDomainObjPtr vm; /* locked */
virDomainDiskDefPtr disk;
virConnectDomainEventBlockJobStatus status;
unsigned long long timeout = 500 * 1000ull; /* milliseconds */
qemuBlockJobSyncBegin(disk);
... start block job ...
do {
... do other task ...
if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk,
timeout, &status) < 0) {
/* domain died while waiting for event */
ret = -1;
goto error;
}
} while (status == -1);
qemuBlockJobSyncEnd(driver, vm, disk, NULL);
Signed-off-by: Michael Chapman <mike@very.puzzling.org>
2015-04-16 09:24:20 +00:00
|
|
|
}
|
2018-10-17 15:22:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
qemuBlockJobDataPtr
|
|
|
|
qemuBlockJobGetByDisk(virDomainDiskDefPtr disk)
|
|
|
|
{
|
|
|
|
qemuBlockJobDataPtr job = QEMU_DOMAIN_DISK_PRIVATE(disk)->blockjob;
|
|
|
|
|
|
|
|
if (!job)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return virObjectRef(job);
|
|
|
|
}
|
2018-12-10 15:56:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @monitorstatus: Status of the blockjob from qemu monitor (qemuMonitorJobStatus)
|
|
|
|
*
|
|
|
|
* Converts the block job status from the monitor to the one used by
|
|
|
|
* qemuBlockJobData. If the status is unknown or does not require any handling
|
|
|
|
* QEMU_BLOCKJOB_TYPE_LAST is returned.
|
|
|
|
*/
|
|
|
|
qemuBlockjobState
|
|
|
|
qemuBlockjobConvertMonitorStatus(int monitorstatus)
|
|
|
|
{
|
|
|
|
qemuBlockjobState ret = QEMU_BLOCKJOB_STATE_LAST;
|
|
|
|
|
|
|
|
switch ((qemuMonitorJobStatus) monitorstatus) {
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_READY:
|
|
|
|
ret = QEMU_BLOCKJOB_STATE_READY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_CONCLUDED:
|
|
|
|
ret = QEMU_BLOCKJOB_STATE_CONCLUDED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_UNKNOWN:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_CREATED:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_RUNNING:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_PAUSED:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_STANDBY:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_WAITING:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_PENDING:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_ABORTING:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_UNDEFINED:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_NULL:
|
|
|
|
case QEMU_MONITOR_JOB_STATUS_LAST:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|