qemu: blockjob: Remove infrastructure for remembering to delete image

Now that we delete the images elsewhere it's not required. Additionally
it's safe to do as we never released an upstream version which required
this being in place.

Signed-off-by: Peter Krempa <pkrempa@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
This commit is contained in:
Peter Krempa 2019-12-20 11:02:35 +01:00
parent 40485059ab
commit 3a98fe9db3
5 changed files with 4 additions and 20 deletions

View File

@ -298,8 +298,7 @@ qemuBackupDiskPrepareDataOne(virDomainObjPtr vm,
virJSONValuePtr actions,
virDomainMomentDefPtr *incremental,
virHashTablePtr blockNamedNodeData,
virQEMUDriverConfigPtr cfg,
bool removeStore)
virQEMUDriverConfigPtr cfg)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@ -331,7 +330,6 @@ qemuBackupDiskPrepareDataOne(virDomainObjPtr vm,
}
if (!(dd->blockjob = qemuBlockJobDiskNewBackup(vm, dd->domdisk, dd->store,
removeStore,
dd->incrementalBitmap)))
return -1;
@ -389,13 +387,11 @@ qemuBackupDiskPrepareData(virDomainObjPtr vm,
virHashTablePtr blockNamedNodeData,
virJSONValuePtr actions,
virQEMUDriverConfigPtr cfg,
struct qemuBackupDiskData **rdd,
bool reuse_external)
struct qemuBackupDiskData **rdd)
{
struct qemuBackupDiskData *disks = NULL;
ssize_t ndisks = 0;
size_t i;
bool removeStore = !reuse_external && (def->type == VIR_DOMAIN_BACKUP_TYPE_PULL);
disks = g_new0(struct qemuBackupDiskData, def->ndisks);
@ -410,7 +406,7 @@ qemuBackupDiskPrepareData(virDomainObjPtr vm,
if (qemuBackupDiskPrepareDataOne(vm, backupdisk, dd, actions,
incremental, blockNamedNodeData,
cfg, removeStore) < 0)
cfg) < 0)
goto error;
if (def->type == VIR_DOMAIN_BACKUP_TYPE_PULL) {
@ -826,7 +822,7 @@ qemuBackupBegin(virDomainObjPtr vm,
goto endjob;
if ((ndd = qemuBackupDiskPrepareData(vm, def, incremental, blockNamedNodeData,
actions, cfg, &dd, reuse)) <= 0) {
actions, cfg, &dd)) <= 0) {
if (ndd == 0) {
virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("no disks selected for backup"));

View File

@ -382,7 +382,6 @@ qemuBlockJobDataPtr
qemuBlockJobDiskNewBackup(virDomainObjPtr vm,
virDomainDiskDefPtr disk,
virStorageSourcePtr store,
bool deleteStore,
const char *bitmap)
{
g_autoptr(qemuBlockJobData) job = NULL;
@ -395,7 +394,6 @@ qemuBlockJobDiskNewBackup(virDomainObjPtr vm,
job->data.backup.bitmap = g_strdup(bitmap);
job->data.backup.store = virObjectRef(store);
job->data.backup.deleteStore = deleteStore;
/* backup jobs are usually started in bulk by transaction so the caller
* shall save the status XML */

View File

@ -113,7 +113,6 @@ typedef qemuBlockJobBackupData *qemuBlockJobDataBackupPtr;
struct _qemuBlockJobBackupData {
virStorageSourcePtr store;
bool deleteStore;
char *bitmap;
};
@ -201,7 +200,6 @@ qemuBlockJobDataPtr
qemuBlockJobDiskNewBackup(virDomainObjPtr vm,
virDomainDiskDefPtr disk,
virStorageSourcePtr store,
bool deleteStore,
const char *bitmap);
qemuBlockJobDataPtr

View File

@ -2617,9 +2617,6 @@ qemuDomainObjPrivateXMLFormatBlockjobIterator(void *payload,
data->xmlopt,
false) < 0)
return -1;
if (job->data.backup.deleteStore)
virBufferAddLit(&childBuf, "<deleteStore/>\n");
}
break;
@ -3222,10 +3219,6 @@ qemuDomainObjPrivateXMLParseBlockjobDataSpecific(qemuBlockJobDataPtr job,
if (!(tmp = virXPathNode("./store", ctxt)) ||
!(job->data.backup.store = qemuDomainObjPrivateXMLParseBlockjobChain(tmp, ctxt, xmlopt)))
goto broken;
if (virXPathNode("./deleteStore", ctxt))
job->data.backup.deleteStore = true;
break;
case QEMU_BLOCKJOB_TYPE_BROKEN:

View File

@ -248,7 +248,6 @@
</privateData>
</source>
</store>
<deleteStore/>
</blockjob>
</blockjobs>
<agentTimeout>-2</agentTimeout>