mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2025-01-13 16:15:19 +00:00
qemu: blockjob: Separate qemuDomainBlockJobAbort from qemuDomainBlockJobImpl
Sacrifice a few lines of code in favor of the code being more readable.
This commit is contained in:
parent
1344a74ef2
commit
cfc0a3d4ce
@ -16249,16 +16249,12 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
||||
char *device = NULL;
|
||||
int ret = -1;
|
||||
bool async = false;
|
||||
virObjectEventPtr event = NULL;
|
||||
virObjectEventPtr event2 = NULL;
|
||||
int idx;
|
||||
virDomainDiskDefPtr disk;
|
||||
virStorageSourcePtr baseSource = NULL;
|
||||
unsigned int baseIndex = 0;
|
||||
char *basePath = NULL;
|
||||
char *backingPath = NULL;
|
||||
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
||||
bool save = false;
|
||||
unsigned long long speed = bandwidth;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
@ -16310,40 +16306,6 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
||||
if (mode == BLOCK_JOB_PULL && qemuDomainDiskBlockJobIsActive(disk))
|
||||
goto endjob;
|
||||
|
||||
if (mode == BLOCK_JOB_ABORT) {
|
||||
if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
|
||||
/* prepare state for event delivery */
|
||||
disk->blockJobStatus = -1;
|
||||
disk->blockJobSync = true;
|
||||
}
|
||||
|
||||
if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
|
||||
!(async && disk->mirror)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("pivot of disk '%s' requires an active copy job"),
|
||||
disk->dst);
|
||||
goto endjob;
|
||||
}
|
||||
if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_NONE &&
|
||||
disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("another job on disk '%s' is still being ended"),
|
||||
disk->dst);
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
if (disk->mirror && (flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)) {
|
||||
ret = qemuDomainBlockPivot(driver, vm, device, disk);
|
||||
if (ret < 0 && async)
|
||||
goto endjob;
|
||||
goto waitjob;
|
||||
}
|
||||
if (disk->mirror) {
|
||||
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_ABORT;
|
||||
save = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (base &&
|
||||
(virStorageFileParseChainIndex(disk->dst, base, &baseIndex) < 0 ||
|
||||
!(baseSource = virStorageFileChainLookup(disk->src, disk->src,
|
||||
@ -16395,13 +16357,107 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
ret = -1;
|
||||
if (ret < 0) {
|
||||
if (mode == BLOCK_JOB_ABORT && disk->mirror)
|
||||
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
|
||||
goto endjob;
|
||||
} else if (mode == BLOCK_JOB_PULL) {
|
||||
disk->blockjob = true;
|
||||
}
|
||||
|
||||
endjob:
|
||||
qemuDomainObjEndJob(driver, vm);
|
||||
|
||||
cleanup:
|
||||
VIR_FREE(basePath);
|
||||
VIR_FREE(backingPath);
|
||||
VIR_FREE(device);
|
||||
qemuDomObjEndAPI(&vm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
qemuDomainBlockJobAbort(virDomainPtr dom,
|
||||
const char *path,
|
||||
unsigned int flags)
|
||||
{
|
||||
virQEMUDriverPtr driver = dom->conn->privateData;
|
||||
char *device = NULL;
|
||||
virObjectEventPtr event = NULL;
|
||||
virObjectEventPtr event2 = NULL;
|
||||
virDomainDiskDefPtr disk;
|
||||
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
|
||||
bool save = false;
|
||||
int idx;
|
||||
bool async;
|
||||
virDomainObjPtr vm;
|
||||
int ret = -1;
|
||||
|
||||
virCheckFlags(VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC |
|
||||
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT, -1);
|
||||
|
||||
if (!(vm = qemuDomObjFromDomain(dom)))
|
||||
return -1;
|
||||
|
||||
if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuDomainSupportsBlockJobs(vm, &async) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (!virDomainObjIsActive(vm)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID, "%s",
|
||||
_("domain is not running"));
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
if (!(device = qemuDiskPathToAlias(vm, path, &idx)))
|
||||
goto endjob;
|
||||
disk = vm->def->disks[idx];
|
||||
|
||||
if (async && !(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC)) {
|
||||
/* prepare state for event delivery */
|
||||
disk->blockJobStatus = -1;
|
||||
disk->blockJobSync = true;
|
||||
}
|
||||
|
||||
if ((flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
|
||||
!(async && disk->mirror)) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("pivot of disk '%s' requires an active copy job"),
|
||||
disk->dst);
|
||||
goto endjob;
|
||||
}
|
||||
if (disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_NONE &&
|
||||
disk->mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY) {
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("another job on disk '%s' is still being ended"),
|
||||
disk->dst);
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
if (disk->mirror && (flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)) {
|
||||
ret = qemuDomainBlockPivot(driver, vm, device, disk);
|
||||
if (ret < 0 && async)
|
||||
goto endjob;
|
||||
goto waitjob;
|
||||
}
|
||||
if (disk->mirror) {
|
||||
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_ABORT;
|
||||
save = true;
|
||||
}
|
||||
|
||||
qemuDomainObjEnterMonitor(driver, vm);
|
||||
ret = qemuMonitorBlockJobCancel(qemuDomainGetMonitor(vm), device, async);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
ret = -1;
|
||||
|
||||
if (ret < 0) {
|
||||
if (disk->mirror)
|
||||
disk->mirrorState = VIR_DOMAIN_DISK_MIRROR_STATE_NONE;
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
waitjob:
|
||||
/* If we have made changes to XML due to a copy job, make a best
|
||||
* effort to save it now. But we can ignore failure, since there
|
||||
@ -16416,37 +16472,35 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
||||
* block to guarantee synchronous operation. We do the waiting
|
||||
* while still holding the VM job, to prevent newly scheduled
|
||||
* block jobs from confusing us. */
|
||||
if (mode == BLOCK_JOB_ABORT) {
|
||||
if (!async) {
|
||||
/* Older qemu that lacked async reporting also lacked
|
||||
* blockcopy and active commit, so we can hardcode the
|
||||
* event to pull, and we know the XML doesn't need
|
||||
* updating. We have to generate two event variants. */
|
||||
int type = VIR_DOMAIN_BLOCK_JOB_TYPE_PULL;
|
||||
int status = VIR_DOMAIN_BLOCK_JOB_CANCELED;
|
||||
event = virDomainEventBlockJobNewFromObj(vm, disk->src->path, type,
|
||||
status);
|
||||
event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
|
||||
status);
|
||||
} else if (disk->blockJobSync) {
|
||||
/* XXX If the event reports failure, we should reflect
|
||||
* that back into the return status of this API call. */
|
||||
if (!async) {
|
||||
/* Older qemu that lacked async reporting also lacked
|
||||
* blockcopy and active commit, so we can hardcode the
|
||||
* event to pull, and we know the XML doesn't need
|
||||
* updating. We have to generate two event variants. */
|
||||
int type = VIR_DOMAIN_BLOCK_JOB_TYPE_PULL;
|
||||
int status = VIR_DOMAIN_BLOCK_JOB_CANCELED;
|
||||
event = virDomainEventBlockJobNewFromObj(vm, disk->src->path, type,
|
||||
status);
|
||||
event2 = virDomainEventBlockJob2NewFromObj(vm, disk->dst, type,
|
||||
status);
|
||||
} else if (disk->blockJobSync) {
|
||||
/* XXX If the event reports failure, we should reflect
|
||||
* that back into the return status of this API call. */
|
||||
|
||||
while (disk->blockJobStatus == -1 && disk->blockJobSync) {
|
||||
if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
|
||||
virReportSystemError(errno, "%s",
|
||||
_("Unable to wait on block job sync "
|
||||
"condition"));
|
||||
disk->blockJobSync = false;
|
||||
goto endjob;
|
||||
}
|
||||
while (disk->blockJobStatus == -1 && disk->blockJobSync) {
|
||||
if (virCondWait(&disk->blockJobSyncCond, &vm->parent.lock) < 0) {
|
||||
virReportSystemError(errno, "%s",
|
||||
_("Unable to wait on block job sync "
|
||||
"condition"));
|
||||
disk->blockJobSync = false;
|
||||
goto endjob;
|
||||
}
|
||||
|
||||
qemuBlockJobEventProcess(driver, vm, disk,
|
||||
disk->blockJobType,
|
||||
disk->blockJobStatus);
|
||||
disk->blockJobSync = false;
|
||||
}
|
||||
|
||||
qemuBlockJobEventProcess(driver, vm, disk,
|
||||
disk->blockJobType,
|
||||
disk->blockJobStatus);
|
||||
disk->blockJobSync = false;
|
||||
}
|
||||
|
||||
endjob:
|
||||
@ -16454,8 +16508,6 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
||||
|
||||
cleanup:
|
||||
virObjectUnref(cfg);
|
||||
VIR_FREE(basePath);
|
||||
VIR_FREE(backingPath);
|
||||
VIR_FREE(device);
|
||||
qemuDomObjEndAPI(&vm);
|
||||
if (event)
|
||||
@ -16465,25 +16517,6 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
qemuDomainBlockJobAbort(virDomainPtr dom, const char *path, unsigned int flags)
|
||||
{
|
||||
virDomainObjPtr vm;
|
||||
|
||||
virCheckFlags(VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC |
|
||||
VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT, -1);
|
||||
|
||||
if (!(vm = qemuDomObjFromDomain(dom)))
|
||||
return -1;
|
||||
|
||||
if (virDomainBlockJobAbortEnsureACL(dom->conn, vm->def) < 0) {
|
||||
qemuDomObjEndAPI(&vm);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return qemuDomainBlockJobImpl(vm, dom->conn, path, NULL, 0,
|
||||
BLOCK_JOB_ABORT, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
qemuDomainGetBlockJobInfo(virDomainPtr dom, const char *path,
|
||||
|
@ -1846,10 +1846,9 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
|
||||
continue;
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) == 0) {
|
||||
if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, NULL, 0,
|
||||
BLOCK_JOB_ABORT, true) < 0) {
|
||||
if (qemuMonitorBlockJobCancel(priv->mon, diskAlias, true) < 0)
|
||||
VIR_WARN("Unable to cancel block-job on '%s'", diskAlias);
|
||||
}
|
||||
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
break;
|
||||
} else {
|
||||
@ -1920,8 +1919,7 @@ qemuMigrationCancelDriveMirror(qemuMigrationCookiePtr mig,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuMonitorBlockJob(priv->mon, diskAlias, NULL, NULL, 0,
|
||||
BLOCK_JOB_ABORT, true) < 0)
|
||||
if (qemuMonitorBlockJobCancel(priv->mon, diskAlias, true) < 0)
|
||||
VIR_WARN("Unable to stop block job on %s", diskAlias);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0) {
|
||||
ret = -1;
|
||||
|
@ -3602,6 +3602,24 @@ qemuMonitorBlockJob(qemuMonitorPtr mon,
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
qemuMonitorBlockJobCancel(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
bool modern)
|
||||
{
|
||||
VIR_DEBUG("mon=%p, device=%s, modern=%d",
|
||||
mon, device, modern);
|
||||
|
||||
if (!mon->json) {
|
||||
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
|
||||
_("block jobs require JSON monitor"));
|
||||
return -1;
|
||||
}
|
||||
|
||||
return qemuMonitorJSONBlockJobCancel(mon, device, modern);
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
qemuMonitorBlockJobSetSpeed(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
|
@ -756,7 +756,6 @@ int qemuMonitorSendKey(qemuMonitorPtr mon,
|
||||
unsigned int nkeycodes);
|
||||
|
||||
typedef enum {
|
||||
BLOCK_JOB_ABORT,
|
||||
BLOCK_JOB_PULL,
|
||||
} qemuMonitorBlockJobCmd;
|
||||
|
||||
@ -769,6 +768,11 @@ int qemuMonitorBlockJob(qemuMonitorPtr mon,
|
||||
bool modern)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
||||
|
||||
int qemuMonitorBlockJobCancel(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
bool modern)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
||||
|
||||
int qemuMonitorBlockJobSetSpeed(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
unsigned long long bandwidth,
|
||||
|
@ -4321,13 +4321,6 @@ qemuMonitorJSONBlockJob(qemuMonitorPtr mon,
|
||||
}
|
||||
|
||||
switch (mode) {
|
||||
case BLOCK_JOB_ABORT:
|
||||
cmd_name = modern ? "block-job-cancel" : "block_job_cancel";
|
||||
cmd = qemuMonitorJSONMakeCommand(cmd_name,
|
||||
"s:device", device,
|
||||
NULL);
|
||||
break;
|
||||
|
||||
case BLOCK_JOB_PULL:
|
||||
cmd_name = modern ? "block-stream" : "block_stream";
|
||||
cmd = qemuMonitorJSONMakeCommand(cmd_name,
|
||||
@ -4357,6 +4350,36 @@ qemuMonitorJSONBlockJob(qemuMonitorPtr mon,
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
qemuMonitorJSONBlockJobCancel(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
bool modern)
|
||||
{
|
||||
int ret = -1;
|
||||
virJSONValuePtr cmd = NULL;
|
||||
virJSONValuePtr reply = NULL;
|
||||
const char *cmd_name = modern ? "block-job-cancel" : "block_job_cancel";
|
||||
|
||||
if (!(cmd = qemuMonitorJSONMakeCommand(cmd_name,
|
||||
"s:device", device,
|
||||
NULL)))
|
||||
return -1;
|
||||
|
||||
if (qemuMonitorJSONCommand(mon, cmd, &reply) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuMonitorJSONBlockJobError(reply, cmd_name, device) < 0)
|
||||
goto cleanup;
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
virJSONValueFree(cmd);
|
||||
virJSONValueFree(reply);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
qemuMonitorJSONBlockJobSetSpeed(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
|
@ -306,6 +306,11 @@ int qemuMonitorJSONBlockJob(qemuMonitorPtr mon,
|
||||
bool modern)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
||||
|
||||
int qemuMonitorJSONBlockJobCancel(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
bool modern)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
|
||||
|
||||
int qemuMonitorJSONBlockJobSetSpeed(qemuMonitorPtr mon,
|
||||
const char *device,
|
||||
unsigned long long speed,
|
||||
|
Loading…
x
Reference in New Issue
Block a user