From 457f8f33c9d46b614ab4abebb12a2e4408793b07 Mon Sep 17 00:00:00 2001 From: Scott Garfinkle Date: Thu, 17 Aug 2017 17:17:20 -0500 Subject: [PATCH] qemu: Implement virDomainMigrateGetMaxDowntime Add code to support querying maximum allowable downtime during live migration. --- src/qemu/qemu_driver.c | 56 ++++++++++++++++++++++++++++++++++++ src/qemu/qemu_monitor.h | 3 ++ src/qemu/qemu_monitor_json.c | 4 +++ src/remote/remote_driver.c | 1 + src/remote/remote_protocol.x | 16 ++++++++++- src/remote_protocol-structs | 8 ++++++ 6 files changed, 87 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index e9f07c6e76..2ba6c80c40 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -13150,6 +13150,61 @@ qemuDomainMigrateSetMaxDowntime(virDomainPtr dom, return ret; } + +static int +qemuDomainMigrateGetMaxDowntime(virDomainPtr dom, + unsigned long long *downtime, + unsigned int flags) +{ + virQEMUDriverPtr driver = dom->conn->privateData; + virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; + qemuMonitorMigrationParams migparams = { 0 }; + int ret = -1; + + virCheckFlags(0, -1); + + if (!(vm = qemuDomObjFromDomain(dom))) + return -1; + + if (virDomainMigrateGetMaxDowntimeEnsureACL(dom->conn, vm->def) < 0) + goto cleanup; + + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto endjob; + } + + priv = vm->privateData; + qemuDomainObjEnterMonitor(driver, vm); + + if (qemuMonitorGetMigrationParams(priv->mon, &migparams) == 0) { + if (migparams.downtimeLimit_set) { + *downtime = migparams.downtimeLimit; + ret = 0; + } else { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("Querying migration downtime is not supported by " + "QEMU binary")); + } + } + + if (qemuDomainObjExitMonitor(driver, vm) < 0) + ret = -1; + + endjob: + qemuDomainObjEndJob(driver, vm); + + cleanup: + virDomainObjEndAPI(&vm); + return ret; +} + + static int qemuDomainMigrateGetCompressionCache(virDomainPtr dom, unsigned long long *cacheSize, @@ -20829,6 +20884,7 @@ static virHypervisorDriver qemuHypervisorDriver = { .domainGetJobInfo = qemuDomainGetJobInfo, /* 0.7.7 */ .domainGetJobStats = qemuDomainGetJobStats, /* 1.0.3 */ .domainAbortJob = qemuDomainAbortJob, /* 0.7.7 */ + .domainMigrateGetMaxDowntime = qemuDomainMigrateGetMaxDowntime, /* 3.7.0 */ .domainMigrateSetMaxDowntime = qemuDomainMigrateSetMaxDowntime, /* 0.8.0 */ .domainMigrateGetCompressionCache = qemuDomainMigrateGetCompressionCache, /* 1.0.3 */ .domainMigrateSetCompressionCache = qemuDomainMigrateSetCompressionCache, /* 1.0.3 */ diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 31f7e97ba8..9805a33908 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -627,6 +627,9 @@ struct _qemuMonitorMigrationParams { * whereas, some string value indicates we can support setting/clearing */ char *migrateTLSAlias; char *migrateTLSHostname; + + bool downtimeLimit_set; + unsigned long long downtimeLimit; }; int qemuMonitorGetMigrationParams(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index b8a68154a7..df5fb7c8f6 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2705,6 +2705,10 @@ qemuMonitorJSONGetMigrationParams(qemuMonitorPtr mon, #undef PARSE + if (virJSONValueObjectGetNumberUlong(result, "downtime-limit", + ¶ms->downtimeLimit) == 0) + params->downtimeLimit_set = true; + if ((tlsStr = virJSONValueObjectGetString(result, "tls-creds"))) { if (VIR_STRDUP(params->migrateTLSAlias, tlsStr) < 0) goto cleanup; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index a57d25f994..027b073ec7 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -8400,6 +8400,7 @@ static virHypervisorDriver hypervisor_driver = { .domainGetJobInfo = remoteDomainGetJobInfo, /* 0.7.7 */ .domainGetJobStats = remoteDomainGetJobStats, /* 1.0.3 */ .domainAbortJob = remoteDomainAbortJob, /* 0.7.7 */ + .domainMigrateGetMaxDowntime = remoteDomainMigrateGetMaxDowntime, /* 3.7.0 */ .domainMigrateSetMaxDowntime = remoteDomainMigrateSetMaxDowntime, /* 0.8.0 */ .domainMigrateGetCompressionCache = remoteDomainMigrateGetCompressionCache, /* 1.0.3 */ .domainMigrateSetCompressionCache = remoteDomainMigrateSetCompressionCache, /* 1.0.3 */ diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 0943208b3b..2d49ceb3ab 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2326,6 +2326,15 @@ struct remote_domain_abort_job_args { }; +struct remote_domain_migrate_get_max_downtime_args { + remote_nonnull_domain dom; + unsigned int flags; +}; + +struct remote_domain_migrate_get_max_downtime_ret { + unsigned hyper downtime; /* insert@1 */ +}; + struct remote_domain_migrate_set_max_downtime_args { remote_nonnull_domain dom; unsigned hyper downtime; @@ -6064,7 +6073,12 @@ enum remote_procedure { * @generate: both * @acl: domain:write */ - REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD = 386 + REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD = 386, + /** + * @generate: both + * @acl: domain:migrate + */ + REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_DOWNTIME = 387 }; diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs index a46fe37bfb..540553efe3 100644 --- a/src/remote_protocol-structs +++ b/src/remote_protocol-structs @@ -1773,6 +1773,13 @@ struct remote_domain_get_job_stats_ret { struct remote_domain_abort_job_args { remote_nonnull_domain dom; }; +struct remote_domain_migrate_get_max_downtime_args { + remote_nonnull_domain dom; + u_int flags; +}; +struct remote_domain_migrate_get_max_downtime_ret { + uint64_t downtime; +}; struct remote_domain_migrate_set_max_downtime_args { remote_nonnull_domain dom; uint64_t downtime; @@ -3233,4 +3240,5 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_SET_VCPU = 384, REMOTE_PROC_DOMAIN_EVENT_BLOCK_THRESHOLD = 385, REMOTE_PROC_DOMAIN_SET_BLOCK_THRESHOLD = 386, + REMOTE_PROC_DOMAIN_MIGRATE_GET_MAX_DOWNTIME = 387, };