qemu: stop passing virConnectPtr into qemuMonitorStartCPUs

There is a long standing hack to pass a virConnectPtr into the
qemuMonitorStartCPUs method, so that when the text monitor prompts
for a disk password, we can lookup virSecretPtr objects. This causes
us to have to pass a virConnectPtr around through countless methods
up the call chain....except some places don't have any virConnectPtr
available so have always just passed NULL. We can finally fix this
disastrous design by using virGetConnectSecret() to open a connection
to the secret driver at time of use.

Reviewed-by: John Ferlan <jferlan@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
This commit is contained in:
Daniel P. Berrangé 2018-02-09 15:40:51 +00:00
parent 0c63c117a2
commit aed679da85
11 changed files with 46 additions and 69 deletions

View File

@ -1954,7 +1954,7 @@ static int qemuDomainResume(virDomainPtr dom)
} else if ((state == VIR_DOMAIN_CRASHED && } else if ((state == VIR_DOMAIN_CRASHED &&
reason == VIR_DOMAIN_CRASHED_PANICKED) || reason == VIR_DOMAIN_CRASHED_PANICKED) ||
state == VIR_DOMAIN_PAUSED) { state == VIR_DOMAIN_PAUSED) {
if (qemuProcessStartCPUs(driver, vm, dom->conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) { QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
@ -3346,7 +3346,7 @@ qemuDomainSaveMemory(virQEMUDriverPtr driver,
* this returns (whether returning success or failure). * this returns (whether returning success or failure).
*/ */
static int static int
qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, qemuDomainSaveInternal(virQEMUDriverPtr driver,
virDomainObjPtr vm, const char *path, virDomainObjPtr vm, const char *path,
int compressed, const char *compressedpath, int compressed, const char *compressedpath,
const char *xmlin, unsigned int flags) const char *xmlin, unsigned int flags)
@ -3447,7 +3447,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom,
if (ret < 0) { if (ret < 0) {
if (was_running && virDomainObjIsActive(vm)) { if (was_running && virDomainObjIsActive(vm)) {
virErrorPtr save_err = virSaveLastError(); virErrorPtr save_err = virSaveLastError();
if (qemuProcessStartCPUs(driver, vm, dom->conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_SAVE_CANCELED, VIR_DOMAIN_RUNNING_SAVE_CANCELED,
QEMU_ASYNC_JOB_SAVE) < 0) { QEMU_ASYNC_JOB_SAVE) < 0) {
VIR_WARN("Unable to resume guest CPUs after save failure"); VIR_WARN("Unable to resume guest CPUs after save failure");
@ -3582,7 +3582,7 @@ qemuDomainSaveFlags(virDomainPtr dom, const char *path, const char *dxml,
goto cleanup; goto cleanup;
} }
ret = qemuDomainSaveInternal(driver, dom, vm, path, compressed, ret = qemuDomainSaveInternal(driver, vm, path, compressed,
compressedpath, dxml, flags); compressedpath, dxml, flags);
cleanup: cleanup:
@ -3656,7 +3656,7 @@ qemuDomainManagedSave(virDomainPtr dom, unsigned int flags)
VIR_INFO("Saving state of domain '%s' to '%s'", vm->def->name, name); VIR_INFO("Saving state of domain '%s' to '%s'", vm->def->name, name);
ret = qemuDomainSaveInternal(driver, dom, vm, name, compressed, ret = qemuDomainSaveInternal(driver, vm, name, compressed,
compressedpath, NULL, flags); compressedpath, NULL, flags);
if (ret == 0) if (ret == 0)
vm->hasManagedSave = true; vm->hasManagedSave = true;
@ -4029,7 +4029,7 @@ qemuDomainCoreDumpWithFormat(virDomainPtr dom,
} }
if (resume && virDomainObjIsActive(vm)) { if (resume && virDomainObjIsActive(vm)) {
if (qemuProcessStartCPUs(driver, vm, dom->conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_DUMP) < 0) { QEMU_ASYNC_JOB_DUMP) < 0) {
event = virDomainEventLifecycleNewFromObj(vm, event = virDomainEventLifecycleNewFromObj(vm,
@ -4216,7 +4216,7 @@ processWatchdogEvent(virQEMUDriverPtr driver,
virReportError(VIR_ERR_OPERATION_FAILED, virReportError(VIR_ERR_OPERATION_FAILED,
"%s", _("Dump failed")); "%s", _("Dump failed"));
ret = qemuProcessStartCPUs(driver, vm, NULL, ret = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_DUMP); QEMU_ASYNC_JOB_DUMP);
@ -6677,7 +6677,7 @@ qemuDomainSaveImageStartVM(virConnectPtr conn,
/* If it was running before, resume it now unless caller requested pause. */ /* If it was running before, resume it now unless caller requested pause. */
if (header->was_running && !start_paused) { if (header->was_running && !start_paused) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_RESTORED, VIR_DOMAIN_RUNNING_RESTORED,
asyncJob) < 0) { asyncJob) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
@ -14005,8 +14005,7 @@ qemuDomainSnapshotCreateInactiveExternal(virQEMUDriverPtr driver,
/* The domain is expected to be locked and active. */ /* The domain is expected to be locked and active. */
static int static int
qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn, qemuDomainSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virDomainSnapshotObjPtr snap, virDomainSnapshotObjPtr snap,
unsigned int flags) unsigned int flags)
@ -14062,7 +14061,7 @@ qemuDomainSnapshotCreateActiveInternal(virConnectPtr conn,
cleanup: cleanup:
if (resume && virDomainObjIsActive(vm) && if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm, conn, qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_SNAPSHOT) < 0) { QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm, event = virDomainEventLifecycleNewFromObj(vm,
@ -14878,8 +14877,7 @@ qemuDomainSnapshotCreateDiskActive(virQEMUDriverPtr driver,
static int static int
qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn, qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virDomainSnapshotObjPtr snap, virDomainSnapshotObjPtr snap,
unsigned int flags) unsigned int flags)
@ -15026,7 +15024,7 @@ qemuDomainSnapshotCreateActiveExternal(virConnectPtr conn,
cleanup: cleanup:
if (resume && virDomainObjIsActive(vm) && if (resume && virDomainObjIsActive(vm) &&
qemuProcessStartCPUs(driver, vm, conn, qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_SNAPSHOT) < 0) { QEMU_ASYNC_JOB_SNAPSHOT) < 0) {
event = virDomainEventLifecycleNewFromObj(vm, event = virDomainEventLifecycleNewFromObj(vm,
@ -15279,12 +15277,12 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY || if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY ||
snap->def->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) { snap->def->memory == VIR_DOMAIN_SNAPSHOT_LOCATION_EXTERNAL) {
/* external checkpoint or disk snapshot */ /* external checkpoint or disk snapshot */
if (qemuDomainSnapshotCreateActiveExternal(domain->conn, driver, if (qemuDomainSnapshotCreateActiveExternal(driver,
vm, snap, flags) < 0) vm, snap, flags) < 0)
goto endjob; goto endjob;
} else { } else {
/* internal checkpoint */ /* internal checkpoint */
if (qemuDomainSnapshotCreateActiveInternal(domain->conn, driver, if (qemuDomainSnapshotCreateActiveInternal(driver,
vm, snap, flags) < 0) vm, snap, flags) < 0)
goto endjob; goto endjob;
} }
@ -16003,7 +16001,7 @@ qemuDomainRevertToSnapshot(virDomainSnapshotPtr snapshot,
_("guest unexpectedly quit")); _("guest unexpectedly quit"));
goto endjob; goto endjob;
} }
rc = qemuProcessStartCPUs(driver, vm, snapshot->domain->conn, rc = qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_FROM_SNAPSHOT, VIR_DOMAIN_RUNNING_FROM_SNAPSHOT,
QEMU_ASYNC_JOB_START); QEMU_ASYNC_JOB_START);
if (rc < 0) if (rc < 0)

View File

@ -273,7 +273,7 @@ qemuMigrationRestoreDomainState(virConnectPtr conn, virDomainObjPtr vm)
VIR_DEBUG("Restoring pre-migration state due to migration error"); VIR_DEBUG("Restoring pre-migration state due to migration error");
/* we got here through some sort of failure; start the domain again */ /* we got here through some sort of failure; start the domain again */
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED, VIR_DOMAIN_RUNNING_MIGRATION_CANCELED,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) { QEMU_ASYNC_JOB_MIGRATION_OUT) < 0) {
/* Hm, we already know we are in error here. We don't want to /* Hm, we already know we are in error here. We don't want to
@ -2853,7 +2853,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob; goto stopjob;
if (qemuProcessFinishStartup(dconn, driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN, if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
false, VIR_DOMAIN_PAUSED_MIGRATION) < 0) false, VIR_DOMAIN_PAUSED_MIGRATION) < 0)
goto stopjob; goto stopjob;
@ -5389,7 +5389,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
* >= 0.10.6 to work properly. This isn't strictly necessary on * >= 0.10.6 to work properly. This isn't strictly necessary on
* older qemu's, but it also doesn't hurt anything there * older qemu's, but it also doesn't hurt anything there
*/ */
if (qemuProcessStartCPUs(driver, vm, dconn, if (qemuProcessStartCPUs(driver, vm,
inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY inPostCopy ? VIR_DOMAIN_RUNNING_POSTCOPY
: VIR_DOMAIN_RUNNING_MIGRATED, : VIR_DOMAIN_RUNNING_MIGRATED,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0) { QEMU_ASYNC_JOB_MIGRATION_IN) < 0) {

View File

@ -1319,7 +1319,6 @@ qemuMonitorHMPCommandWithFd(qemuMonitorPtr mon,
int int
qemuMonitorGetDiskSecret(qemuMonitorPtr mon, qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
virConnectPtr conn,
const char *path, const char *path,
char **secret, char **secret,
size_t *secretLen) size_t *secretLen)
@ -1328,7 +1327,7 @@ qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
*secret = NULL; *secret = NULL;
*secretLen = 0; *secretLen = 0;
QEMU_MONITOR_CALLBACK(mon, ret, diskSecretLookup, conn, mon->vm, QEMU_MONITOR_CALLBACK(mon, ret, diskSecretLookup, mon->vm,
path, secret, secretLen); path, secret, secretLen);
return ret; return ret;
} }
@ -1700,15 +1699,14 @@ qemuMonitorSetCapabilities(qemuMonitorPtr mon)
int int
qemuMonitorStartCPUs(qemuMonitorPtr mon, qemuMonitorStartCPUs(qemuMonitorPtr mon)
virConnectPtr conn)
{ {
QEMU_CHECK_MONITOR(mon); QEMU_CHECK_MONITOR(mon);
if (mon->json) if (mon->json)
return qemuMonitorJSONStartCPUs(mon, conn); return qemuMonitorJSONStartCPUs(mon);
else else
return qemuMonitorTextStartCPUs(mon, conn); return qemuMonitorTextStartCPUs(mon);
} }

View File

@ -109,13 +109,7 @@ typedef void (*qemuMonitorEofNotifyCallback)(qemuMonitorPtr mon,
typedef void (*qemuMonitorErrorNotifyCallback)(qemuMonitorPtr mon, typedef void (*qemuMonitorErrorNotifyCallback)(qemuMonitorPtr mon,
virDomainObjPtr vm, virDomainObjPtr vm,
void *opaque); void *opaque);
/* XXX we'd really like to avoid virConnectPtr here
* It is required so the callback can find the active
* secret driver. Need to change this to work like the
* security drivers do, to avoid this
*/
typedef int (*qemuMonitorDiskSecretLookupCallback)(qemuMonitorPtr mon, typedef int (*qemuMonitorDiskSecretLookupCallback)(qemuMonitorPtr mon,
virConnectPtr conn,
virDomainObjPtr vm, virDomainObjPtr vm,
const char *path, const char *path,
char **secret, char **secret,
@ -363,9 +357,7 @@ int qemuMonitorHMPCommandWithFd(qemuMonitorPtr mon,
# define qemuMonitorHMPCommand(mon, cmd, reply) \ # define qemuMonitorHMPCommand(mon, cmd, reply) \
qemuMonitorHMPCommandWithFd(mon, cmd, -1, reply) qemuMonitorHMPCommandWithFd(mon, cmd, -1, reply)
/* XXX same comment about virConnectPtr as above */
int qemuMonitorGetDiskSecret(qemuMonitorPtr mon, int qemuMonitorGetDiskSecret(qemuMonitorPtr mon,
virConnectPtr conn,
const char *path, const char *path,
char **secret, char **secret,
size_t *secretLen); size_t *secretLen);
@ -440,8 +432,7 @@ int qemuMonitorEmitDumpCompleted(qemuMonitorPtr mon,
qemuMonitorDumpStatsPtr stats, qemuMonitorDumpStatsPtr stats,
const char *error); const char *error);
int qemuMonitorStartCPUs(qemuMonitorPtr mon, int qemuMonitorStartCPUs(qemuMonitorPtr mon);
virConnectPtr conn);
int qemuMonitorStopCPUs(qemuMonitorPtr mon); int qemuMonitorStopCPUs(qemuMonitorPtr mon);
typedef enum { typedef enum {

View File

@ -1274,8 +1274,7 @@ qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon)
int int
qemuMonitorJSONStartCPUs(qemuMonitorPtr mon, qemuMonitorJSONStartCPUs(qemuMonitorPtr mon)
virConnectPtr conn ATTRIBUTE_UNUSED)
{ {
int ret; int ret;
virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("cont", NULL); virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("cont", NULL);

View File

@ -48,8 +48,7 @@ int qemuMonitorJSONHumanCommandWithFd(qemuMonitorPtr mon,
int qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon); int qemuMonitorJSONSetCapabilities(qemuMonitorPtr mon);
int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon, int qemuMonitorJSONStartCPUs(qemuMonitorPtr mon);
virConnectPtr conn);
int qemuMonitorJSONStopCPUs(qemuMonitorPtr mon); int qemuMonitorJSONStopCPUs(qemuMonitorPtr mon);
int qemuMonitorJSONGetStatus(qemuMonitorPtr mon, int qemuMonitorJSONGetStatus(qemuMonitorPtr mon,
bool *running, bool *running,

View File

@ -293,9 +293,8 @@ qemuMonitorSendDiskPassphrase(qemuMonitorPtr mon,
qemuMonitorMessagePtr msg, qemuMonitorMessagePtr msg,
const char *data, const char *data,
size_t len ATTRIBUTE_UNUSED, size_t len ATTRIBUTE_UNUSED,
void *opaque) void *opaque ATTRIBUTE_UNUSED)
{ {
virConnectPtr conn = opaque;
char *path; char *path;
char *passphrase = NULL; char *passphrase = NULL;
size_t passphrase_len = 0; size_t passphrase_len = 0;
@ -326,7 +325,6 @@ qemuMonitorSendDiskPassphrase(qemuMonitorPtr mon,
/* Fetch the disk password if possible */ /* Fetch the disk password if possible */
res = qemuMonitorGetDiskSecret(mon, res = qemuMonitorGetDiskSecret(mon,
conn,
path, path,
&passphrase, &passphrase,
&passphrase_len); &passphrase_len);
@ -358,14 +356,13 @@ qemuMonitorSendDiskPassphrase(qemuMonitorPtr mon,
} }
int int
qemuMonitorTextStartCPUs(qemuMonitorPtr mon, qemuMonitorTextStartCPUs(qemuMonitorPtr mon)
virConnectPtr conn)
{ {
char *reply; char *reply;
if (qemuMonitorTextCommandWithHandler(mon, "cont", if (qemuMonitorTextCommandWithHandler(mon, "cont",
qemuMonitorSendDiskPassphrase, qemuMonitorSendDiskPassphrase,
conn, NULL,
-1, &reply) < 0) -1, &reply) < 0)
return -1; return -1;

View File

@ -39,8 +39,7 @@ int qemuMonitorTextCommandWithFd(qemuMonitorPtr mon,
int scm_fd, int scm_fd,
char **reply); char **reply);
int qemuMonitorTextStartCPUs(qemuMonitorPtr mon, int qemuMonitorTextStartCPUs(qemuMonitorPtr mon);
virConnectPtr conn);
int qemuMonitorTextStopCPUs(qemuMonitorPtr mon); int qemuMonitorTextStopCPUs(qemuMonitorPtr mon);
int qemuMonitorTextGetStatus(qemuMonitorPtr mon, int qemuMonitorTextGetStatus(qemuMonitorPtr mon,
bool *running, bool *running,

View File

@ -447,13 +447,13 @@ qemuProcessGetVolumeQcowPassphrase(virConnectPtr conn,
static int static int
qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED, qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
virConnectPtr conn,
virDomainObjPtr vm, virDomainObjPtr vm,
const char *path, const char *path,
char **secretRet, char **secretRet,
size_t *secretLen, size_t *secretLen,
void *opaque ATTRIBUTE_UNUSED) void *opaque ATTRIBUTE_UNUSED)
{ {
virConnectPtr conn = NULL;
virDomainDiskDefPtr disk; virDomainDiskDefPtr disk;
int ret = -1; int ret = -1;
@ -465,9 +465,11 @@ qemuProcessFindVolumeQcowPassphrase(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
goto cleanup; goto cleanup;
} }
conn = virGetConnectSecret();
ret = qemuProcessGetVolumeQcowPassphrase(conn, disk, secretRet, secretLen); ret = qemuProcessGetVolumeQcowPassphrase(conn, disk, secretRet, secretLen);
cleanup: cleanup:
virObjectUnref(conn);
virObjectUnlock(vm); virObjectUnlock(vm);
return ret; return ret;
} }
@ -565,7 +567,7 @@ qemuProcessFakeReboot(void *opaque)
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_CRASHED) if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_CRASHED)
reason = VIR_DOMAIN_RUNNING_CRASHED; reason = VIR_DOMAIN_RUNNING_CRASHED;
if (qemuProcessStartCPUs(driver, vm, NULL, if (qemuProcessStartCPUs(driver, vm,
reason, reason,
QEMU_ASYNC_JOB_NONE) < 0) { QEMU_ASYNC_JOB_NONE) < 0) {
if (virGetLastError() == NULL) if (virGetLastError() == NULL)
@ -2854,7 +2856,7 @@ qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
*/ */
int int
qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm, qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
virConnectPtr conn, virDomainRunningReason reason, virDomainRunningReason reason,
qemuDomainAsyncJob asyncJob) qemuDomainAsyncJob asyncJob)
{ {
int ret = -1; int ret = -1;
@ -2879,7 +2881,7 @@ qemuProcessStartCPUs(virQEMUDriverPtr driver, virDomainObjPtr vm,
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
goto release; goto release;
ret = qemuMonitorStartCPUs(priv->mon, conn); ret = qemuMonitorStartCPUs(priv->mon);
if (qemuDomainObjExitMonitor(driver, vm) < 0) if (qemuDomainObjExitMonitor(driver, vm) < 0)
ret = -1; ret = -1;
@ -3040,7 +3042,6 @@ qemuProcessUpdateState(virQEMUDriverPtr driver, virDomainObjPtr vm)
static int static int
qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver, qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virConnectPtr conn,
qemuMigrationJobPhase phase, qemuMigrationJobPhase phase,
virDomainState state, virDomainState state,
int reason) int reason)
@ -3072,7 +3073,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
* and hope we are all set */ * and hope we are all set */
VIR_DEBUG("Incoming migration finished, resuming domain %s", VIR_DEBUG("Incoming migration finished, resuming domain %s",
vm->def->name); vm->def->name);
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) { QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name); VIR_WARN("Could not resume domain %s", vm->def->name);
@ -3099,7 +3100,6 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
static int static int
qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver, qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virConnectPtr conn,
qemuMigrationJobPhase phase, qemuMigrationJobPhase phase,
virDomainState state, virDomainState state,
int reason, int reason,
@ -3179,7 +3179,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
if (state == VIR_DOMAIN_PAUSED && if (state == VIR_DOMAIN_PAUSED &&
(reason == VIR_DOMAIN_PAUSED_MIGRATION || (reason == VIR_DOMAIN_PAUSED_MIGRATION ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) { QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain %s", vm->def->name); VIR_WARN("Could not resume domain %s", vm->def->name);
@ -3194,7 +3194,6 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
static int static int
qemuProcessRecoverJob(virQEMUDriverPtr driver, qemuProcessRecoverJob(virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virConnectPtr conn,
const struct qemuDomainJobObj *job, const struct qemuDomainJobObj *job,
unsigned int *stopFlags) unsigned int *stopFlags)
{ {
@ -3206,13 +3205,13 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
switch (job->asyncJob) { switch (job->asyncJob) {
case QEMU_ASYNC_JOB_MIGRATION_OUT: case QEMU_ASYNC_JOB_MIGRATION_OUT:
if (qemuProcessRecoverMigrationOut(driver, vm, conn, job->phase, if (qemuProcessRecoverMigrationOut(driver, vm, job->phase,
state, reason, stopFlags) < 0) state, reason, stopFlags) < 0)
return -1; return -1;
break; break;
case QEMU_ASYNC_JOB_MIGRATION_IN: case QEMU_ASYNC_JOB_MIGRATION_IN:
if (qemuProcessRecoverMigrationIn(driver, vm, conn, job->phase, if (qemuProcessRecoverMigrationIn(driver, vm, job->phase,
state, reason) < 0) state, reason) < 0)
return -1; return -1;
break; break;
@ -3237,7 +3236,7 @@ qemuProcessRecoverJob(virQEMUDriverPtr driver,
(reason == VIR_DOMAIN_PAUSED_SNAPSHOT || (reason == VIR_DOMAIN_PAUSED_SNAPSHOT ||
reason == VIR_DOMAIN_PAUSED_MIGRATION)) || reason == VIR_DOMAIN_PAUSED_MIGRATION)) ||
reason == VIR_DOMAIN_PAUSED_UNKNOWN)) { reason == VIR_DOMAIN_PAUSED_UNKNOWN)) {
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_UNPAUSED, VIR_DOMAIN_RUNNING_UNPAUSED,
QEMU_ASYNC_JOB_NONE) < 0) { QEMU_ASYNC_JOB_NONE) < 0) {
VIR_WARN("Could not resume domain '%s' after migration to file", VIR_WARN("Could not resume domain '%s' after migration to file",
@ -6260,8 +6259,7 @@ qemuProcessRefreshState(virQEMUDriverPtr driver,
* Finish starting a new domain. * Finish starting a new domain.
*/ */
int int
qemuProcessFinishStartup(virConnectPtr conn, qemuProcessFinishStartup(virQEMUDriverPtr driver,
virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob, qemuDomainAsyncJob asyncJob,
bool startCPUs, bool startCPUs,
@ -6272,7 +6270,7 @@ qemuProcessFinishStartup(virConnectPtr conn,
if (startCPUs) { if (startCPUs) {
VIR_DEBUG("Starting domain CPUs"); VIR_DEBUG("Starting domain CPUs");
if (qemuProcessStartCPUs(driver, vm, conn, if (qemuProcessStartCPUs(driver, vm,
VIR_DOMAIN_RUNNING_BOOTED, VIR_DOMAIN_RUNNING_BOOTED,
asyncJob) < 0) { asyncJob) < 0) {
if (!virGetLastError()) if (!virGetLastError())
@ -6366,7 +6364,7 @@ qemuProcessStart(virConnectPtr conn,
qemuMigrationRunIncoming(driver, vm, incoming->deferredURI, asyncJob) < 0) qemuMigrationRunIncoming(driver, vm, incoming->deferredURI, asyncJob) < 0)
goto stop; goto stop;
if (qemuProcessFinishStartup(conn, driver, vm, asyncJob, if (qemuProcessFinishStartup(driver, vm, asyncJob,
!(flags & VIR_QEMU_PROCESS_START_PAUSED), !(flags & VIR_QEMU_PROCESS_START_PAUSED),
incoming ? incoming ?
VIR_DOMAIN_PAUSED_MIGRATION : VIR_DOMAIN_PAUSED_MIGRATION :
@ -7470,7 +7468,7 @@ qemuProcessReconnect(void *opaque)
if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) if (qemuProcessRefreshBalloonState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0)
goto error; goto error;
if (qemuProcessRecoverJob(driver, obj, conn, &oldjob, &stopFlags) < 0) if (qemuProcessRecoverJob(driver, obj, &oldjob, &stopFlags) < 0)
goto error; goto error;
if (qemuProcessUpdateDevices(driver, obj) < 0) if (qemuProcessUpdateDevices(driver, obj) < 0)

View File

@ -30,7 +30,6 @@ int qemuProcessPrepareMonitorChr(virDomainChrSourceDefPtr monConfig,
int qemuProcessStartCPUs(virQEMUDriverPtr driver, int qemuProcessStartCPUs(virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
virConnectPtr conn,
virDomainRunningReason reason, virDomainRunningReason reason,
qemuDomainAsyncJob asyncJob); qemuDomainAsyncJob asyncJob);
int qemuProcessStopCPUs(virQEMUDriverPtr driver, int qemuProcessStopCPUs(virQEMUDriverPtr driver,
@ -126,8 +125,7 @@ int qemuProcessLaunch(virConnectPtr conn,
virNetDevVPortProfileOp vmop, virNetDevVPortProfileOp vmop,
unsigned int flags); unsigned int flags);
int qemuProcessFinishStartup(virConnectPtr conn, int qemuProcessFinishStartup(virQEMUDriverPtr driver,
virQEMUDriverPtr driver,
virDomainObjPtr vm, virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob, qemuDomainAsyncJob asyncJob,
bool startCPUs, bool startCPUs,

View File

@ -1238,7 +1238,7 @@ testQemuMonitorJSONCPU(const void *data)
goto cleanup; goto cleanup;
} }
if (qemuMonitorJSONStartCPUs(qemuMonitorTestGetMonitor(test), NULL) < 0) if (qemuMonitorJSONStartCPUs(qemuMonitorTestGetMonitor(test)) < 0)
goto cleanup; goto cleanup;
if (qemuMonitorGetStatus(qemuMonitorTestGetMonitor(test), if (qemuMonitorGetStatus(qemuMonitorTestGetMonitor(test),