mirror of
https://gitlab.com/libvirt/libvirt.git
synced 2024-12-22 05:35:25 +00:00
qemu: stop passing virConnectPtr into qemuMonitorStartCPUs
There is a long standing hack to pass a virConnectPtr into the qemuMonitorStartCPUs method, so that when the text monitor prompts for a disk password, we can lookup virSecretPtr objects. This causes us to have to pass a virConnectPtr around through countless methods up the call chain....except some places don't have any virConnectPtr available so have always just passed NULL. We can finally fix this disastrous design by using virGetConnectSecret() to open a connection to the secret driver at time of use. Reviewed-by: John Ferlan <jferlan@redhat.com> Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
This commit is contained in:
parent
0c63c117a2
commit
aed679da85
@ -1954,7 +1954,7 @@ static int qemuDomainResume(virDomainPtr dom)
|
||||
} else if ((state == VIR_DOMAIN_CRASHED &&
|
||||
reason == VIR_DOMAIN_CRASHED_PANICKED) ||
|
||||
state == VIR_DOMAIN_PAUSED) {
|
||||
if (qemuProcessStartCPUs(driver, vm, dom->conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
if (virGetLastError() == NULL)
|
||||
@ -3346,7 +3346,7 @@ qemuDomainSaveMemory(virQEMUDriverPtr driver,
|
||||
* this returns (whether returning success or failure).
|
||||
*/
|
||||
static int
|
||||
qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
|
||||
qemuDomainSaveInternal(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm, const char *path,
|
||||
int compressed, const char *compressedpath,
|
||||
const char *xmlin, unsigned int flags)
|
||||
@ -3447,7 +3447,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
|
||||
if (ret < 0) {
|
||||
if (was_running && virDomainObjIsActive(vm)) {
|
||||
virErrorPtr save_err = virSaveLastError();
|
||||
if (qemuProcessStartCPUs(driver, vm, dom->conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_SAVE_CANCELED,
|
||||
QEMU_ASYNC_JOB_SAVE) < 0) {
|
||||
VIR_WARN("Unable to resume guest CPUs after save failure");
|
||||
@ -3582,7 +3582,7 @@ qemuDomainSaveFlags(virDomainPtr dom, const char *path, const char *dxml,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ret = qemuDomainSaveInternal(driver, dom, vm, path, compressed,
|
||||
ret = qemuDomainSaveInternal(driver, vm, path, compressed,
|
||||
compressedpath, dxml, flags);
|
||||
|
||||
cleanup:
|
||||
@ -3656,7 +3656,7 @@ qemuDomainManagedSave(virDomainPtr dom, unsigned int flags)
|
||||
|
||||
VIR_INFO("Saving state of domain '%s' to '%s'", vm->def->name, name);
|
||||
|
||||
ret = qemuDomainSaveInternal(driver, dom, vm, name, compressed,
|
||||
ret = qemuDomainSaveInternal(driver, vm, name, compressed,
|
||||
compressedpath, NULL, flags);
|
||||
if (ret == 0)
|
||||
vm->hasManagedSave = true;
|
||||
@ -4029,7 +4029,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
|
||||
}
|
||||
|
||||
if (resume && virDomainObjIsActive(vm)) {
|
||||
if (qemuProcessStartCPUs(driver, vm, dom->conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_DUMP) < 0) {
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
@ -4216,7 +4216,7 @@ processWatchdogEvent(virQEMUDriverPtr driver,
|
||||
virReportError(VIR_ERR_OPERATION_FAILED,
|
||||
"%s", _("Dump failed"));
|
||||
|
||||
ret = qemuProcessStartCPUs(driver, vm, NULL,
|
||||
ret = qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_DUMP);
|
||||
|
||||
@ -6677,7 +6677,7 @@ qemuDomainSaveImageStartVM(virConnectPtr conn,
|
||||
|
||||
/* If it was running before, resume it now unless caller requested pause. */
|
||||
if (header->was_running && !start_paused) {
|
||||
if (qemuProcessStartCPUs(driver, vm, conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_RESTORED,
|
||||
asyncJob) < 0) {
|
||||
if (virGetLastError() == NULL)
|
||||
@ -14005,8 +14005,7 @@ qemuDomainSnapshotCreateInactiveExternal(virQEMUDriverPtr driver,
|
||||
|
||||
/* The domain is expected to be locked and active. */
|
||||
static int
|
||||
qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
|
||||
virQEMUDriverPtr driver,
|
||||
qemuDomainSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
virDomainSnapshotObjPtr snap,
|
||||
unsigned int flags)
|
||||
@ -14062,7 +14061,7 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
|
||||
|
||||
cleanup:
|
||||
if (resume && virDomainObjIsActive(vm) &&
|
||||
qemuProcessStartCPUs(driver, vm, conn,
|
||||
qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
@ -14878,8 +14877,7 @@ qemuDomainSnapshotCreateDiskActive(virQEMUDriverPtr driver,
|
||||
|
||||
|
||||
static int
|
||||
qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
||||
virQEMUDriverPtr driver,
|
||||
qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
virDomainSnapshotObjPtr snap,
|
||||
unsigned int flags)
|
||||
@ -15026,7 +15024,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
|
||||
|
||||
cleanup:
|
||||
if (resume && virDomainObjIsActive(vm) &&
|
||||
qemuProcessStartCPUs(driver, vm, conn,
|
||||
qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
|
||||
event = virDomainEventLifecycleNewFromObj(vm,
|
||||
@ -15279,12 +15277,12 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
||||
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY ||
|
||||
snap->def->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) {
|
||||
/* external checkpoint or disk snapshot */
|
||||
if (qemuDomainSnapshotCreateActiveExternal(domain->conn, driver,
|
||||
if (qemuDomainSnapshotCreateActiveExternal(driver,
|
||||
vm, snap, flags) < 0)
|
||||
goto endjob;
|
||||
} else {
|
||||
/* internal checkpoint */
|
||||
if (qemuDomainSnapshotCreateActiveInternal(domain->conn, driver,
|
||||
if (qemuDomainSnapshotCreateActiveInternal(driver,
|
||||
vm, snap, flags) < 0)
|
||||
goto endjob;
|
||||
}
|
||||
@ -16003,7 +16001,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
|
||||
_("guest unexpectedly quit"));
|
||||
goto endjob;
|
||||
}
|
||||
rc = qemuProcessStartCPUs(driver, vm, snapshot->domain->conn,
|
||||
rc = qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
|
||||
QEMU_ASYNC_JOB_START);
|
||||
if (rc < 0)
|
||||
|
@ -273,7 +273,7 @@ qemuMigrationRestoreDomainState(virConnectPtr conn, virDomainObjPtr vm)
|
||||
VIR_DEBUG("Restoring pre-migration state due to migration error");
|
||||
|
||||
/* we got here through some sort of failure; start the domain again */
|
||||
if (qemuProcessStartCPUs(driver, vm, conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
|
||||
/* Hm, we already know we are in error here. We don't want to
|
||||
@ -2853,7 +2853,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
|
||||
goto stopjob;
|
||||
|
||||
if (qemuProcessFinishStartup(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
|
||||
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
|
||||
goto stopjob;
|
||||
|
||||
@ -5389,7 +5389,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
|
||||
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
||||
* older qemu's, but it also doesn't hurt anything there
|
||||
*/
|
||||
if (qemuProcessStartCPUs(driver, vm, dconn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
|
||||
: VIR_DOMAIN_RUNNING_MIGRATED,
|
||||
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {
|
||||
|
@ -1319,7 +1319,6 @@ qemuMonitorHMPCommandWithFd(qemuMonitorPtr mon,
|
||||
|
||||
int
|
||||
qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
|
||||
virConnectPtr conn,
|
||||
const char *path,
|
||||
char **secret,
|
||||
size_t *secretLen)
|
||||
@ -1328,7 +1327,7 @@ qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
|
||||
*secret = NULL;
|
||||
*secretLen = 0;
|
||||
|
||||
QEMU_MONITOR_CALLBACK(mon, ret, diskSecretLookup, conn, mon->vm,
|
||||
QEMU_MONITOR_CALLBACK(mon, ret, diskSecretLookup, mon->vm,
|
||||
path, secret, secretLen);
|
||||
return ret;
|
||||
}
|
||||
@ -1700,15 +1699,14 @@ qemuMonitorSetCapabilities(qemuMonitorPtr mon)
|
||||
|
||||
|
||||
int
|
||||
qemuMonitorStartCPUs(qemuMonitorPtr mon,
|
||||
virConnectPtr conn)
|
||||
qemuMonitorStartCPUs(qemuMonitorPtr mon)
|
||||
{
|
||||
QEMU_CHECK_MONITOR(mon);
|
||||
|
||||
if (mon->json)
|
||||
return qemuMonitorJSONStartCPUs(mon, conn);
|
||||
return qemuMonitorJSONStartCPUs(mon);
|
||||
else
|
||||
return qemuMonitorTextStartCPUs(mon, conn);
|
||||
return qemuMonitorTextStartCPUs(mon);
|
||||
}
|
||||
|
||||
|
||||
|
@ -109,13 +109,7 @@ typedef void (*qemuMonitorEofNotifyCallback)(qemuMonitorPtr mon,
|
||||
typedef void (*qemuMonitorErrorNotifyCallback)(qemuMonitorPtr mon,
|
||||
virDomainObjPtr vm,
|
||||
void *opaque);
|
||||
/* XXX we'd really like to avoid virConnectPtr here
|
||||
* It is required so the callback can find the active
|
||||
* secret driver. Need to change this to work like the
|
||||
* security drivers do, to avoid this
|
||||
*/
|
||||
typedef int (*qemuMonitorDiskSecretLookupCallback)(qemuMonitorPtr mon,
|
||||
virConnectPtr conn,
|
||||
virDomainObjPtr vm,
|
||||
const char *path,
|
||||
char **secret,
|
||||
@ -363,9 +357,7 @@ int qemuMonitorHMPCommandWithFd(qemuMonitorPtr mon,
|
||||
# define qemuMonitorHMPCommand(mon, cmd, reply) \
|
||||
qemuMonitorHMPCommandWithFd(mon, cmd, -1, reply)
|
||||
|
||||
/* XXX same comment about virConnectPtr as above */
|
||||
int qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
|
||||
virConnectPtr conn,
|
||||
const char *path,
|
||||
char **secret,
|
||||
size_t *secretLen);
|
||||
@ -440,8 +432,7 @@ int qemuMonitorEmitDumpCompleted(qemuMonitorPtr mon,
|
||||
qemuMonitorDumpStatsPtr stats,
|
||||
const char *error);
|
||||
|
||||
int qemuMonitorStartCPUs(qemuMonitorPtr mon,
|
||||
virConnectPtr conn);
|
||||
int qemuMonitorStartCPUs(qemuMonitorPtr mon);
|
||||
int qemuMonitorStopCPUs(qemuMonitorPtr mon);
|
||||
|
||||
typedef enum {
|
||||
|
@ -1274,8 +1274,7 @@ qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon)
|
||||
|
||||
|
||||
int
|
||||
qemuMonitorJSONStartCPUs(qemuMonitorPtr mon,
|
||||
virConnectPtr conn ATTRIBUTE_UNUSED)
|
||||
qemuMonitorJSONStartCPUs(qemuMonitorPtr mon)
|
||||
{
|
||||
int ret;
|
||||
virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("cont", NULL);
|
||||
|
@ -48,8 +48,7 @@ int qemuMonitorJSONHumanCommandWithFd(qemuMonitorPtr mon,
|
||||
|
||||
int qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon);
|
||||
|
||||
int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon,
|
||||
virConnectPtr conn);
|
||||
int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon);
|
||||
int qemuMonitorJSONStopCPUs(qemuMonitorPtr mon);
|
||||
int qemuMonitorJSONGetStatus(qemuMonitorPtr mon,
|
||||
bool *running,
|
||||
|
@ -293,9 +293,8 @@ qemuMonitorSendDiskPassphrase(qemuMonitorPtr mon,
|
||||
qemuMonitorMessagePtr msg,
|
||||
const char *data,
|
||||
size_t len ATTRIBUTE_UNUSED,
|
||||
void *opaque)
|
||||
void *opaque ATTRIBUTE_UNUSED)
|
||||
{
|
||||
virConnectPtr conn = opaque;
|
||||
char *path;
|
||||
char *passphrase = NULL;
|
||||
size_t passphrase_len = 0;
|
||||
@ -326,7 +325,6 @@ qemuMonitorSendDiskPassphrase(qemuMonitorPtr mon,
|
||||
|
||||
/* Fetch the disk password if possible */
|
||||
res = qemuMonitorGetDiskSecret(mon,
|
||||
conn,
|
||||
path,
|
||||
&passphrase,
|
||||
&passphrase_len);
|
||||
@ -358,14 +356,13 @@ qemuMonitorSendDiskPassphrase(qemuMonitorPtr mon,
|
||||
}
|
||||
|
||||
int
|
||||
qemuMonitorTextStartCPUs(qemuMonitorPtr mon,
|
||||
virConnectPtr conn)
|
||||
qemuMonitorTextStartCPUs(qemuMonitorPtr mon)
|
||||
{
|
||||
char *reply;
|
||||
|
||||
if (qemuMonitorTextCommandWithHandler(mon, "cont",
|
||||
qemuMonitorSendDiskPassphrase,
|
||||
conn,
|
||||
NULL,
|
||||
-1, &reply) < 0)
|
||||
return -1;
|
||||
|
||||
|
@ -39,8 +39,7 @@ int qemuMonitorTextCommandWithFd(qemuMonitorPtr mon,
|
||||
int scm_fd,
|
||||
char **reply);
|
||||
|
||||
int qemuMonitorTextStartCPUs(qemuMonitorPtr mon,
|
||||
virConnectPtr conn);
|
||||
int qemuMonitorTextStartCPUs(qemuMonitorPtr mon);
|
||||
int qemuMonitorTextStopCPUs(qemuMonitorPtr mon);
|
||||
int qemuMonitorTextGetStatus(qemuMonitorPtr mon,
|
||||
bool *running,
|
||||
|
@ -447,13 +447,13 @@ qemuProcessGetVolumeQcowPassphrase(virConnectPtr conn,
|
||||
|
||||
static int
|
||||
qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
||||
virConnectPtr conn,
|
||||
virDomainObjPtr vm,
|
||||
const char *path,
|
||||
char **secretRet,
|
||||
size_t *secretLen,
|
||||
void *opaque ATTRIBUTE_UNUSED)
|
||||
{
|
||||
virConnectPtr conn = NULL;
|
||||
virDomainDiskDefPtr disk;
|
||||
int ret = -1;
|
||||
|
||||
@ -465,9 +465,11 @@ qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
conn = virGetConnectSecret();
|
||||
ret = qemuProcessGetVolumeQcowPassphrase(conn, disk, secretRet, secretLen);
|
||||
|
||||
cleanup:
|
||||
virObjectUnref(conn);
|
||||
virObjectUnlock(vm);
|
||||
return ret;
|
||||
}
|
||||
@ -565,7 +567,7 @@ qemuProcessFakeReboot(void *opaque)
|
||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_CRASHED)
|
||||
reason = VIR_DOMAIN_RUNNING_CRASHED;
|
||||
|
||||
if (qemuProcessStartCPUs(driver, vm, NULL,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
reason,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
if (virGetLastError() == NULL)
|
||||
@ -2854,7 +2856,7 @@ qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
|
||||
*/
|
||||
int
|
||||
qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
||||
virConnectPtr conn, virDomainRunningReason reason,
|
||||
virDomainRunningReason reason,
|
||||
qemuDomainAsyncJob asyncJob)
|
||||
{
|
||||
int ret = -1;
|
||||
@ -2879,7 +2881,7 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
|
||||
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
|
||||
goto release;
|
||||
|
||||
ret = qemuMonitorStartCPUs(priv->mon, conn);
|
||||
ret = qemuMonitorStartCPUs(priv->mon);
|
||||
if (qemuDomainObjExitMonitor(driver, vm) < 0)
|
||||
ret = -1;
|
||||
|
||||
@ -3040,7 +3042,6 @@ qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm)
|
||||
static int
|
||||
qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
virConnectPtr conn,
|
||||
qemuMigrationJobPhase phase,
|
||||
virDomainState state,
|
||||
int reason)
|
||||
@ -3072,7 +3073,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
|
||||
* and hope we are all set */
|
||||
VIR_DEBUG("Incoming migration finished, resuming domain %s",
|
||||
vm->def->name);
|
||||
if (qemuProcessStartCPUs(driver, vm, conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_WARN("Could not resume domain %s", vm->def->name);
|
||||
@ -3099,7 +3100,6 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
|
||||
static int
|
||||
qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
virConnectPtr conn,
|
||||
qemuMigrationJobPhase phase,
|
||||
virDomainState state,
|
||||
int reason,
|
||||
@ -3179,7 +3179,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
|
||||
if (state == VIR_DOMAIN_PAUSED &&
|
||||
(reason == VIR_DOMAIN_PAUSED_MIGRATION ||
|
||||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
||||
if (qemuProcessStartCPUs(driver, vm, conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_WARN("Could not resume domain %s", vm->def->name);
|
||||
@ -3194,7 +3194,6 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
|
||||
static int
|
||||
qemuProcessRecoverJob(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
virConnectPtr conn,
|
||||
const struct qemuDomainJobObj *job,
|
||||
unsigned int *stopFlags)
|
||||
{
|
||||
@ -3206,13 +3205,13 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
|
||||
|
||||
switch (job->asyncJob) {
|
||||
case QEMU_ASYNC_JOB_MIGRATION_OUT:
|
||||
if (qemuProcessRecoverMigrationOut(driver, vm, conn, job->phase,
|
||||
if (qemuProcessRecoverMigrationOut(driver, vm, job->phase,
|
||||
state, reason, stopFlags) < 0)
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case QEMU_ASYNC_JOB_MIGRATION_IN:
|
||||
if (qemuProcessRecoverMigrationIn(driver, vm, conn, job->phase,
|
||||
if (qemuProcessRecoverMigrationIn(driver, vm, job->phase,
|
||||
state, reason) < 0)
|
||||
return -1;
|
||||
break;
|
||||
@ -3237,7 +3236,7 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
|
||||
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
|
||||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
|
||||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
|
||||
if (qemuProcessStartCPUs(driver, vm, conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_UNPAUSED,
|
||||
QEMU_ASYNC_JOB_NONE) < 0) {
|
||||
VIR_WARN("Could not resume domain '%s' after migration to file",
|
||||
@ -6260,8 +6259,7 @@ qemuProcessRefreshState(virQEMUDriverPtr driver,
|
||||
* Finish starting a new domain.
|
||||
*/
|
||||
int
|
||||
qemuProcessFinishStartup(virConnectPtr conn,
|
||||
virQEMUDriverPtr driver,
|
||||
qemuProcessFinishStartup(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
bool startCPUs,
|
||||
@ -6272,7 +6270,7 @@ qemuProcessFinishStartup(virConnectPtr conn,
|
||||
|
||||
if (startCPUs) {
|
||||
VIR_DEBUG("Starting domain CPUs");
|
||||
if (qemuProcessStartCPUs(driver, vm, conn,
|
||||
if (qemuProcessStartCPUs(driver, vm,
|
||||
VIR_DOMAIN_RUNNING_BOOTED,
|
||||
asyncJob) < 0) {
|
||||
if (!virGetLastError())
|
||||
@ -6366,7 +6364,7 @@ qemuProcessStart(virConnectPtr conn,
|
||||
qemuMigrationRunIncoming(driver, vm, incoming->deferredURI, asyncJob) < 0)
|
||||
goto stop;
|
||||
|
||||
if (qemuProcessFinishStartup(conn, driver, vm, asyncJob,
|
||||
if (qemuProcessFinishStartup(driver, vm, asyncJob,
|
||||
!(flags & VIR_QEMU_PROCESS_START_PAUSED),
|
||||
incoming ?
|
||||
VIR_DOMAIN_PAUSED_MIGRATION :
|
||||
@ -7470,7 +7468,7 @@ qemuProcessReconnect(void *opaque)
|
||||
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0)
|
||||
if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
|
||||
goto error;
|
||||
|
||||
if (qemuProcessUpdateDevices(driver, obj) < 0)
|
||||
|
@ -30,7 +30,6 @@ int qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
|
||||
|
||||
int qemuProcessStartCPUs(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
virConnectPtr conn,
|
||||
virDomainRunningReason reason,
|
||||
qemuDomainAsyncJob asyncJob);
|
||||
int qemuProcessStopCPUs(virQEMUDriverPtr driver,
|
||||
@ -126,8 +125,7 @@ int qemuProcessLaunch(virConnectPtr conn,
|
||||
virNetDevVPortProfileOp vmop,
|
||||
unsigned int flags);
|
||||
|
||||
int qemuProcessFinishStartup(virConnectPtr conn,
|
||||
virQEMUDriverPtr driver,
|
||||
int qemuProcessFinishStartup(virQEMUDriverPtr driver,
|
||||
virDomainObjPtr vm,
|
||||
qemuDomainAsyncJob asyncJob,
|
||||
bool startCPUs,
|
||||
|
@ -1238,7 +1238,7 @@ testQemuMonitorJSONCPU(const void *data)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (qemuMonitorJSONStartCPUs(qemuMonitorTestGetMonitor(test), NULL) < 0)
|
||||
if (qemuMonitorJSONStartCPUs(qemuMonitorTestGetMonitor(test)) < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (qemuMonitorGetStatus(qemuMonitorTestGetMonitor(test),
|
||||
|
Loading…
Reference in New Issue
Block a user