processSerialChangedEvent: Close agent monitor early

https://bugzilla.redhat.com/show_bug.cgi?id=890648

So, imagine you've issued an API that involves guest agent. For
instance, you want to query guest's IP addresses. So the API acquires
QUERY_JOB, locks the guest agent and issues the agent command.
However, for some reason, guest agent replies to initial ping
correctly, but then crashes tragically while executing real command
(in this case guest-network-get-interfaces). Since initial ping went
well, libvirt thinks guest agent is accessible and awaits reply to the
real command. But it will never come. What will is a monitor event.
Our handler (processSerialChangedEvent) will try to acquire
MODIFY_JOB, which will fail obviously because the other thread that's
executing the API already holds a job. So the event handler exits
early, and the QUERY_JOB is never released nor ended.

The way how to solve this is to put flag somewhere in the monitor
internals. The flag is called @running and agent commands are issued
iff the flag is set. The flag itself is set when we connect to the
agent socket. And unset whenever we see DISCONNECT event from the
agent. Moreover, we must wake up all the threads waiting for the
agent. This is done by signalizing the condition they're waiting on.

Signed-off-by: Michal Privoznik <mprivozn@redhat.com>
This commit is contained in:
Michal Privoznik 2015-05-07 11:19:38 +02:00
parent 21e8fc36c6
commit 2af51483cc
3 changed files with 55 additions and 8 deletions

View File

@ -92,6 +92,7 @@ struct _qemuAgent {
int watch;
bool connectPending;
bool running;
virDomainObjPtr vm;
@ -772,6 +773,7 @@ qemuAgentOpen(virDomainObjPtr vm,
goto cleanup;
}
mon->running = true;
VIR_DEBUG("New mon %p fd =%d watch=%d", mon, mon->fd, mon->watch);
return mon;
@ -788,6 +790,36 @@ qemuAgentOpen(virDomainObjPtr vm,
}
static void
qemuAgentNotifyCloseLocked(qemuAgentPtr mon)
{
if (mon) {
mon->running = false;
/* If there is somebody waiting for a message
* wake him up. No message will arrive anyway. */
if (mon->msg && !mon->msg->finished) {
mon->msg->finished = 1;
virCondSignal(&mon->notify);
}
}
}
void
qemuAgentNotifyClose(qemuAgentPtr mon)
{
if (!mon)
return;
VIR_DEBUG("mon=%p", mon);
virObjectLock(mon);
qemuAgentNotifyCloseLocked(mon);
virObjectUnlock(mon);
}
void qemuAgentClose(qemuAgentPtr mon)
{
if (!mon)
@ -803,12 +835,7 @@ void qemuAgentClose(qemuAgentPtr mon)
VIR_FORCE_CLOSE(mon->fd);
}
/* If there is somebody waiting for a message
* wake him up. No message will arrive anyway. */
if (mon->msg && !mon->msg->finished) {
mon->msg->finished = 1;
virCondSignal(&mon->notify);
}
qemuAgentNotifyCloseLocked(mon);
virObjectUnlock(mon);
virObjectUnref(mon);
@ -1087,6 +1114,12 @@ qemuAgentCommand(qemuAgentPtr mon,
*reply = NULL;
if (!mon->running) {
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
_("Guest agent disappeared while executing command"));
return -1;
}
if (qemuAgentGuestSync(mon) < 0)
return -1;
@ -1112,8 +1145,12 @@ qemuAgentCommand(qemuAgentPtr mon,
if (await_event && !needReply) {
VIR_DEBUG("Woken up by event %d", await_event);
} else {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing monitor reply object"));
if (mon->running)
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing monitor reply object"));
else
virReportError(VIR_ERR_AGENT_UNRESPONSIVE, "%s",
_("Guest agent disappeared while executing command"));
ret = -1;
}
} else {

View File

@ -49,6 +49,8 @@ qemuAgentPtr qemuAgentOpen(virDomainObjPtr vm,
void qemuAgentClose(qemuAgentPtr mon);
void qemuAgentNotifyClose(qemuAgentPtr mon);
typedef enum {
QEMU_AGENT_EVENT_NONE = 0,
QEMU_AGENT_EVENT_SHUTDOWN,

View File

@ -4480,6 +4480,14 @@ processSerialChangedEvent(virQEMUDriverPtr driver,
VIR_DEBUG("Changing serial port state %s in domain %p %s",
devAlias, vm, vm->def->name);
if (newstate == VIR_DOMAIN_CHR_DEVICE_STATE_DISCONNECTED &&
virDomainObjIsActive(vm) && priv->agent) {
/* Close agent monitor early, so that other threads
* waiting for the agent to reply can finish and our
* job we acquire below can succeed. */
qemuAgentNotifyClose(priv->agent);
}
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;