mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-22 13:45:38 +00:00
qemu: Introduce code for blockdev-create
QEMU finally exposes an interface which allows us to instruct it to format or create arbitrary images. This is required for blockdev integration of block copy and snapshots as we need to pre-format images prior to use with blockdev-add. This path introduces job handling and also helpers for formatting and attaching a whole image described by a virStorageSource. Signed-off-by: Peter Krempa <pkrempa@redhat.com> Reviewed-by: Ján Tomko <jtomko@redhat.com>
This commit is contained in:
parent
7b8db52f5b
commit
545edb2502
@ -2379,3 +2379,258 @@ qemuBlockStorageSourceCreateGetStorageProps(virStorageSourcePtr src,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qemuBlockStorageSourceCreateGeneric(virDomainObjPtr vm,
|
||||
virJSONValuePtr createProps,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr chain,
|
||||
bool storageCreate,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
VIR_AUTOPTR(virJSONValue) props = createProps;
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
qemuBlockJobDataPtr job = NULL;
|
||||
int ret = -1;
|
||||
int rc;
|
||||
|
||||
if (!(job = qemuBlockJobNewCreate(vm, src, chain, storageCreate)))
|
||||
return -1;
|
||||
|
||||
qemuBlockJobSyncBegin(job);
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
|
||||
rc = qemuMonitorBlockdevCreate(priv->mon, job->name, props);
|
||||
props = NULL;
|
||||
|
||||
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
|
||||
goto cleanup;
|
||||
|
||||
qemuBlockJobStarted(job, vm);
|
||||
|
||||
qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
|
||||
while (qemuBlockJobIsRunning(job)) {
|
||||
if (virDomainObjWait(vm) < 0)
|
||||
goto cleanup;
|
||||
qemuBlockJobUpdate(vm, job, QEMU_ASYNC_JOB_NONE);
|
||||
}
|
||||
|
||||
if (job->state == QEMU_BLOCKJOB_STATE_FAILED ||
|
||||
job->state == QEMU_BLOCKJOB_STATE_CANCELLED) {
|
||||
if (job->state == QEMU_BLOCKJOB_STATE_CANCELLED && !job->errmsg) {
|
||||
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
||||
_("blockdev-create job was cancelled"));
|
||||
} else {
|
||||
virReportError(VIR_ERR_OPERATION_FAILED,
|
||||
_("failed to format image: '%s'"), NULLSTR(job->errmsg));
|
||||
}
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
qemuBlockJobStartupFinalize(vm, job);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qemuBlockStorageSourceCreateStorage(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr chain,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
int actualType = virStorageSourceGetActualType(src);
|
||||
VIR_AUTOPTR(virJSONValue) createstorageprops = NULL;
|
||||
int ret;
|
||||
|
||||
/* We create local files directly to be able to apply security labels
|
||||
* properly. This is enough for formats which store the capacity of the image
|
||||
* in the metadata as they will grow. We must create a correctly sized
|
||||
* image for 'raw' and 'luks' though as the image size influences the
|
||||
* capacity.
|
||||
*/
|
||||
if (actualType != VIR_STORAGE_TYPE_NETWORK &&
|
||||
!(actualType == VIR_STORAGE_TYPE_FILE && src->format == VIR_STORAGE_FILE_RAW))
|
||||
return 0;
|
||||
|
||||
if (qemuBlockStorageSourceCreateGetStorageProps(src, &createstorageprops) < 0)
|
||||
return -1;
|
||||
|
||||
if (!createstorageprops) {
|
||||
/* we can always try opening it to see whether it was existing */
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = qemuBlockStorageSourceCreateGeneric(vm, createstorageprops, src, chain,
|
||||
true, asyncJob);
|
||||
createstorageprops = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qemuBlockStorageSourceCreateFormat(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr backingStore,
|
||||
virStorageSourcePtr chain,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
VIR_AUTOPTR(virJSONValue) createformatprops = NULL;
|
||||
int ret;
|
||||
|
||||
if (src->format == VIR_STORAGE_FILE_RAW)
|
||||
return 0;
|
||||
|
||||
if (qemuBlockStorageSourceCreateGetFormatProps(src, backingStore,
|
||||
&createformatprops) < 0)
|
||||
return -1;
|
||||
|
||||
if (!createformatprops) {
|
||||
virReportError(VIR_ERR_OPERATION_UNSUPPORTED,
|
||||
_("can't create storage format '%s'"),
|
||||
virStorageFileFormatTypeToString(src->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = qemuBlockStorageSourceCreateGeneric(vm, createformatprops, src, chain,
|
||||
false, asyncJob);
|
||||
createformatprops = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* qemuBlockStorageSourceCreate:
|
||||
* @vm: domain object
|
||||
* @src: storage source definition to create
|
||||
* @backingStore: backingStore of the new image (used only in image metadata)
|
||||
* @chain: backing chain to unplug in case of a long-running job failure
|
||||
* @data: qemuBlockStorageSourceAttachData for @src so that it can be attached
|
||||
* @asyncJob: qemu asynchronous job type
|
||||
*
|
||||
* Creates and formats a storage volume according to @src and attaches it to @vm.
|
||||
* @data must provide attachment data as if @src was existing. @src is attached
|
||||
* after successful return of this function. If libvirtd is restarted during
|
||||
* the create job @chain is unplugged, otherwise it's left for the caller.
|
||||
* If @backingStore is provided, the new image will refer to it as its backing
|
||||
* store.
|
||||
*/
|
||||
int
|
||||
qemuBlockStorageSourceCreate(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr backingStore,
|
||||
virStorageSourcePtr chain,
|
||||
qemuBlockStorageSourceAttachDataPtr data,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
int ret = -1;
|
||||
int rc;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
|
||||
rc = qemuBlockStorageSourceAttachApplyStorageDeps(priv->mon, data);
|
||||
|
||||
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuBlockStorageSourceCreateStorage(vm, src, chain, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
|
||||
rc = qemuBlockStorageSourceAttachApplyStorage(priv->mon, data);
|
||||
|
||||
if (rc == 0)
|
||||
rc = qemuBlockStorageSourceAttachApplyFormatDeps(priv->mon, data);
|
||||
|
||||
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuBlockStorageSourceCreateFormat(vm, src, backingStore, chain,
|
||||
asyncJob) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
||||
goto cleanup;
|
||||
|
||||
rc = qemuBlockStorageSourceAttachApplyFormat(priv->mon, data);
|
||||
|
||||
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
|
||||
goto cleanup;
|
||||
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (ret < 0 &&
|
||||
virDomainObjIsActive(vm) &&
|
||||
qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) == 0) {
|
||||
|
||||
qemuBlockStorageSourceAttachRollback(priv->mon, data);
|
||||
ignore_value(qemuDomainObjExitMonitor(priv->driver, vm));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* qemuBlockStorageSourceCreateDetectSize:
|
||||
* @vm: domain object
|
||||
* @src: storage source to update size/capacity on
|
||||
* @templ: storage source template
|
||||
* @asyncJob: qemu asynchronous job type
|
||||
*
|
||||
* When creating a storage source via blockdev-create we need to know the size
|
||||
* and capacity of the original volume (e.g. when creating a snapshot or copy).
|
||||
* This function updates @src's 'capacity' and 'physical' attributes according
|
||||
* to the detected sizes from @templ.
|
||||
*/
|
||||
int
|
||||
qemuBlockStorageSourceCreateDetectSize(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr templ,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||
VIR_AUTOPTR(virHashTable) stats = NULL;
|
||||
qemuBlockStatsPtr entry;
|
||||
int rc;
|
||||
|
||||
if (!(stats = virHashCreate(10, virHashValueFree)))
|
||||
return -1;
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(priv->driver, vm, asyncJob) < 0)
|
||||
return -1;
|
||||
|
||||
rc = qemuMonitorBlockStatsUpdateCapacityBlockdev(priv->mon, stats);
|
||||
|
||||
if (qemuDomainObjExitMonitor(priv->driver, vm) < 0 || rc < 0)
|
||||
return -1;
|
||||
|
||||
if (!(entry = virHashLookup(stats, templ->nodeformat))) {
|
||||
virReportError(VIR_ERR_INTERNAL_ERROR,
|
||||
_("failed to update capacity data for block node '%s'"),
|
||||
templ->nodeformat);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (src->format == VIR_STORAGE_FILE_RAW) {
|
||||
src->physical = entry->capacity;
|
||||
} else {
|
||||
src->physical = entry->physical;
|
||||
}
|
||||
|
||||
src->capacity = entry->capacity;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -182,3 +182,17 @@ int
|
||||
qemuBlockStorageSourceCreateGetStorageProps(virStorageSourcePtr src,
|
||||
virJSONValuePtr *props)
|
||||
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
|
||||
|
||||
int
|
||||
qemuBlockStorageSourceCreate(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr backingStore,
|
||||
virStorageSourcePtr chain,
|
||||
qemuBlockStorageSourceAttachDataPtr data,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
|
||||
int
|
||||
qemuBlockStorageSourceCreateDetectSize(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr templ,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
|
@ -65,7 +65,8 @@ VIR_ENUM_IMPL(qemuBlockjob,
|
||||
"copy",
|
||||
"commit",
|
||||
"active-commit",
|
||||
"");
|
||||
"",
|
||||
"create");
|
||||
|
||||
static virClassPtr qemuBlockJobDataClass;
|
||||
|
||||
@ -78,6 +79,9 @@ qemuBlockJobDataDispose(void *obj)
|
||||
virObjectUnref(job->chain);
|
||||
virObjectUnref(job->mirrorChain);
|
||||
|
||||
if (job->type == QEMU_BLOCKJOB_TYPE_CREATE)
|
||||
virObjectUnref(job->data.create.src);
|
||||
|
||||
VIR_FREE(job->name);
|
||||
VIR_FREE(job->errmsg);
|
||||
}
|
||||
@ -274,6 +278,37 @@ qemuBlockJobDiskNewCommit(virDomainObjPtr vm,
|
||||
}
|
||||
|
||||
|
||||
qemuBlockJobDataPtr
|
||||
qemuBlockJobNewCreate(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr chain,
|
||||
bool storage)
|
||||
{
|
||||
VIR_AUTOUNREF(qemuBlockJobDataPtr) job = NULL;
|
||||
VIR_AUTOFREE(char *) jobname = NULL;
|
||||
const char *nodename = src->nodeformat;
|
||||
|
||||
if (storage)
|
||||
nodename = src->nodestorage;
|
||||
|
||||
if (virAsprintf(&jobname, "create-%s", nodename) < 0)
|
||||
return NULL;
|
||||
|
||||
if (!(job = qemuBlockJobDataNew(QEMU_BLOCKJOB_TYPE_CREATE, jobname)))
|
||||
return NULL;
|
||||
|
||||
if (virStorageSourceIsBacking(chain))
|
||||
job->chain = virObjectRef(chain);
|
||||
|
||||
job->data.create.src = virObjectRef(src);
|
||||
|
||||
if (qemuBlockJobRegister(job, vm, NULL, true) < 0)
|
||||
return NULL;
|
||||
|
||||
VIR_RETURN_PTR(job);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* qemuBlockJobDiskGetJob:
|
||||
* @disk: disk definition
|
||||
@ -1007,6 +1042,49 @@ qemuBlockJobProcessEventCompletedActiveCommit(virQEMUDriverPtr driver,
|
||||
job->disk->mirror = NULL;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
qemuBlockJobProcessEventConcludedCreate(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
qemuBlockJobDataPtr job,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
VIR_AUTOPTR(qemuBlockStorageSourceAttachData) backend = NULL;
|
||||
|
||||
/* if there is a synchronous client waiting for this job that means that
|
||||
* it will handle further hotplug of the created volume and also that
|
||||
* the 'chain' which was registered is under their control */
|
||||
if (job->synchronous) {
|
||||
virObjectUnref(job->chain);
|
||||
job->chain = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!job->data.create.src)
|
||||
return;
|
||||
|
||||
if (!(backend = qemuBlockStorageSourceDetachPrepare(job->data.create.src, NULL)))
|
||||
return;
|
||||
|
||||
/* the format node part was not attached yet, so we don't need to detach it */
|
||||
backend->formatAttached = false;
|
||||
if (job->data.create.storage) {
|
||||
backend->storageAttached = false;
|
||||
VIR_FREE(backend->encryptsecretAlias);
|
||||
}
|
||||
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
||||
return;
|
||||
|
||||
qemuBlockStorageSourceAttachRollback(qemuDomainGetMonitor(vm), backend);
|
||||
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
return;
|
||||
|
||||
qemuDomainStorageSourceAccessRevoke(driver, vm, job->data.create.src);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job,
|
||||
virQEMUDriverPtr driver,
|
||||
@ -1028,6 +1106,10 @@ qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job,
|
||||
qemuBlockJobProcessEventCompletedActiveCommit(driver, vm, job, asyncJob);
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_CREATE:
|
||||
qemuBlockJobProcessEventConcludedCreate(driver, vm, job, asyncJob);
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_COPY:
|
||||
case QEMU_BLOCKJOB_TYPE_NONE:
|
||||
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
||||
@ -1051,6 +1133,10 @@ qemuBlockJobEventProcessConcludedTransition(qemuBlockJobDataPtr job,
|
||||
}
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_CREATE:
|
||||
qemuBlockJobProcessEventConcludedCreate(driver, vm, job, asyncJob);
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_COPY:
|
||||
case QEMU_BLOCKJOB_TYPE_NONE:
|
||||
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
||||
|
@ -62,6 +62,7 @@ typedef enum {
|
||||
QEMU_BLOCKJOB_TYPE_ACTIVE_COMMIT = VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT,
|
||||
/* Additional enum values local to qemu */
|
||||
QEMU_BLOCKJOB_TYPE_INTERNAL,
|
||||
QEMU_BLOCKJOB_TYPE_CREATE,
|
||||
QEMU_BLOCKJOB_TYPE_LAST
|
||||
} qemuBlockJobType;
|
||||
verify((int)QEMU_BLOCKJOB_TYPE_INTERNAL == VIR_DOMAIN_BLOCK_JOB_TYPE_LAST);
|
||||
@ -87,6 +88,15 @@ struct _qemuBlockJobCommitData {
|
||||
};
|
||||
|
||||
|
||||
typedef struct _qemuBlockJobCreateData qemuBlockJobCreateData;
|
||||
typedef qemuBlockJobCreateData *qemuBlockJobDataCreatePtr;
|
||||
|
||||
struct _qemuBlockJobCreateData {
|
||||
bool storage;
|
||||
virStorageSourcePtr src;
|
||||
};
|
||||
|
||||
|
||||
typedef struct _qemuBlockJobData qemuBlockJobData;
|
||||
typedef qemuBlockJobData *qemuBlockJobDataPtr;
|
||||
|
||||
@ -102,6 +112,7 @@ struct _qemuBlockJobData {
|
||||
union {
|
||||
qemuBlockJobPullData pull;
|
||||
qemuBlockJobCommitData commit;
|
||||
qemuBlockJobCreateData create;
|
||||
} data;
|
||||
|
||||
int type; /* qemuBlockJobType */
|
||||
@ -146,6 +157,12 @@ qemuBlockJobDiskNewCommit(virDomainObjPtr vm,
|
||||
virStorageSourcePtr top,
|
||||
virStorageSourcePtr base);
|
||||
|
||||
qemuBlockJobDataPtr
|
||||
qemuBlockJobNewCreate(virDomainObjPtr vm,
|
||||
virStorageSourcePtr src,
|
||||
virStorageSourcePtr chain,
|
||||
bool storage);
|
||||
|
||||
qemuBlockJobDataPtr
|
||||
qemuBlockJobDiskGetJob(virDomainDiskDefPtr disk)
|
||||
ATTRIBUTE_NONNULL(1);
|
||||
|
@ -2412,6 +2412,19 @@ qemuDomainObjPrivateXMLFormatBlockjobIterator(void *payload,
|
||||
virBufferAsprintf(&childBuf, "<topparent node='%s'/>\n", job->data.commit.topparent->nodeformat);
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_CREATE:
|
||||
if (job->data.create.storage)
|
||||
virBufferAddLit(&childBuf, "<create mode='storage'/>\n");
|
||||
|
||||
if (job->data.create.src &&
|
||||
qemuDomainObjPrivateXMLFormatBlockjobFormatSource(&childBuf,
|
||||
"src",
|
||||
job->data.create.src,
|
||||
data->xmlopt,
|
||||
false) < 0)
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_COPY:
|
||||
case QEMU_BLOCKJOB_TYPE_NONE:
|
||||
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
||||
@ -2856,8 +2869,12 @@ qemuDomainObjPrivateXMLParseBlockjobNodename(qemuBlockJobDataPtr job,
|
||||
|
||||
static void
|
||||
qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobDataPtr job,
|
||||
xmlXPathContextPtr ctxt)
|
||||
xmlXPathContextPtr ctxt,
|
||||
virDomainXMLOptionPtr xmlopt)
|
||||
{
|
||||
VIR_AUTOFREE(char *) createmode = NULL;
|
||||
xmlNodePtr tmp;
|
||||
|
||||
switch ((qemuBlockJobType) job->type) {
|
||||
case QEMU_BLOCKJOB_TYPE_PULL:
|
||||
qemuDomainObjPrivateXMLParseBlockjobNodename(job,
|
||||
@ -2891,6 +2908,19 @@ qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobDataPtr job,
|
||||
goto broken;
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_CREATE:
|
||||
if (!(tmp = virXPathNode("./src", ctxt)) ||
|
||||
!(job->data.create.src = qemuDomainObjPrivateXMLParseBlockjobChain(tmp, ctxt, xmlopt)))
|
||||
goto broken;
|
||||
|
||||
if ((createmode = virXPathString("string(./create/@mode)", ctxt))) {
|
||||
if (STRNEQ(createmode, "storage"))
|
||||
goto broken;
|
||||
|
||||
job->data.create.storage = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case QEMU_BLOCKJOB_TYPE_COPY:
|
||||
case QEMU_BLOCKJOB_TYPE_NONE:
|
||||
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
||||
@ -2980,7 +3010,7 @@ qemuDomainObjPrivateXMLParseBlockjobData(virDomainObjPtr vm,
|
||||
if (mirror)
|
||||
job->mirrorChain = virObjectRef(job->disk->mirror);
|
||||
|
||||
qemuDomainObjPrivateXMLParseBlockjobDataSpecific(job, ctxt);
|
||||
qemuDomainObjPrivateXMLParseBlockjobDataSpecific(job, ctxt, xmlopt);
|
||||
|
||||
if (qemuBlockJobRegister(job, vm, disk, false) < 0)
|
||||
return -1;
|
||||
|
@ -17771,6 +17771,7 @@ qemuDomainBlockPivot(virQEMUDriverPtr driver,
|
||||
case QEMU_BLOCKJOB_TYPE_PULL:
|
||||
case QEMU_BLOCKJOB_TYPE_COMMIT:
|
||||
case QEMU_BLOCKJOB_TYPE_INTERNAL:
|
||||
case QEMU_BLOCKJOB_TYPE_CREATE:
|
||||
virReportError(VIR_ERR_OPERATION_INVALID,
|
||||
_("job type '%s' does not support pivot"),
|
||||
qemuBlockjobTypeToString(job->type));
|
||||
|
@ -243,6 +243,23 @@
|
||||
<base node='libvirt-19-format'/>
|
||||
<top node='libvirt-17-format'/>
|
||||
</blockjob>
|
||||
<blockjob name='create-libvirt-1337-storage' type='create' state='running'>
|
||||
<create mode='storage'/>
|
||||
<src type='network' format='qcow2'>
|
||||
<source protocol='rbd' name='pool/volname.qcow2' tlsFromConfig='0' index='1337'>
|
||||
<host name='example.org'/>
|
||||
<privateData>
|
||||
<nodenames>
|
||||
<nodename type='storage' name='libvirt-1337-storage'/>
|
||||
<nodename type='format' name='libvirt-1337-format'/>
|
||||
</nodenames>
|
||||
<objects>
|
||||
<secret type='auth' alias='libvirt-1337-storage-secret0'/>
|
||||
</objects>
|
||||
</privateData>
|
||||
</source>
|
||||
</src>
|
||||
</blockjob>
|
||||
<blockjob name='pull-vdc-libvirt-9-format' type='commit' state='running'>
|
||||
<disk dst='vdc'/>
|
||||
<base node='libvirt-11-format'/>
|
||||
@ -252,6 +269,34 @@
|
||||
<blockjob name='drive-virtio-disk0' type='copy' state='ready'>
|
||||
<disk dst='vda' mirror='yes'/>
|
||||
</blockjob>
|
||||
<blockjob name='create-libvirt-1338-format' type='create' state='running'>
|
||||
<chains>
|
||||
<disk type='file' format='qcow2'>
|
||||
<source file='/create/src1.qcow2' index='1339'>
|
||||
<privateData>
|
||||
<nodenames>
|
||||
<nodename type='storage' name='libvirt-1339-storage'/>
|
||||
<nodename type='format' name='libvirt-1339-format'/>
|
||||
</nodenames>
|
||||
</privateData>
|
||||
</source>
|
||||
<backingStore/>
|
||||
</disk>
|
||||
</chains>
|
||||
<src type='file' format='qcow2'>
|
||||
<source file='/tmp/create/overlay.qcow2' index='1338'>
|
||||
<privateData>
|
||||
<nodenames>
|
||||
<nodename type='storage' name='libvirt-1338-storage'/>
|
||||
<nodename type='format' name='libvirt-1338-format'/>
|
||||
</nodenames>
|
||||
<objects>
|
||||
<secret type='encryption' alias='libvirt-1338-storage-secret0'/>
|
||||
</objects>
|
||||
</privateData>
|
||||
</source>
|
||||
</src>
|
||||
</blockjob>
|
||||
<blockjob name='test-orphan-job0' type='copy' state='ready'>
|
||||
<chains>
|
||||
<disk type='file' format='qcow2'>
|
||||
|
Loading…
Reference in New Issue
Block a user