libvirt/src/lxc/lxc_monitor.c

232 lines
6.2 KiB
C
Raw Normal View History

/*
* Copyright (C) 2010-2012 Red Hat, Inc.
*
* lxc_monitor.c: client for LXC controller monitor
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see
* <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include "lxc_monitor.h"
#include "lxc_conf.h"
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
#include "lxc_monitor_dispatch.h"
2012-12-12 18:06:53 +00:00
#include "viralloc.h"
#include "virerror.h"
2012-12-12 17:59:27 +00:00
#include "virlog.h"
#include "virthread.h"
#include "rpc/virnetclient.h"
#include "virstring.h"
#define VIR_FROM_THIS VIR_FROM_LXC
VIR_LOG_INIT("lxc.lxc_monitor");
struct _virLXCMonitor {
virObjectLockable parent;
virDomainObjPtr vm;
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
virLXCMonitorCallbacks cb;
virNetClientPtr client;
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
virNetClientProgramPtr program;
};
static virClassPtr virLXCMonitorClass;
static void virLXCMonitorDispose(void *obj);
static int virLXCMonitorOnceInit(void)
{
if (!VIR_CLASS_NEW(virLXCMonitor, virClassForObjectLockable()))
return -1;
return 0;
}
VIR_ONCE_GLOBAL_INIT(virLXCMonitor)
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
static void
virLXCMonitorHandleEventExit(virNetClientProgramPtr prog,
virNetClientPtr client,
void *evdata, void *opaque);
static void
virLXCMonitorHandleEventInit(virNetClientProgramPtr prog,
virNetClientPtr client,
void *evdata, void *opaque);
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
static virNetClientProgramEvent virLXCMonitorEvents[] = {
{ VIR_LXC_MONITOR_PROC_EXIT_EVENT,
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
virLXCMonitorHandleEventExit,
sizeof(virLXCMonitorExitEventMsg),
(xdrproc_t)xdr_virLXCMonitorExitEventMsg },
{ VIR_LXC_MONITOR_PROC_INIT_EVENT,
virLXCMonitorHandleEventInit,
sizeof(virLXCMonitorInitEventMsg),
(xdrproc_t)xdr_virLXCMonitorInitEventMsg },
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
};
static void
virLXCMonitorHandleEventExit(virNetClientProgramPtr prog ATTRIBUTE_UNUSED,
virNetClientPtr client ATTRIBUTE_UNUSED,
void *evdata, void *opaque)
{
virLXCMonitorPtr mon = opaque;
virLXCMonitorExitEventMsg *msg = evdata;
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
VIR_DEBUG("Event exit %d", msg->status);
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
if (mon->cb.exitNotify)
mon->cb.exitNotify(mon, msg->status, mon->vm);
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
}
static void
virLXCMonitorHandleEventInit(virNetClientProgramPtr prog ATTRIBUTE_UNUSED,
virNetClientPtr client ATTRIBUTE_UNUSED,
void *evdata, void *opaque)
{
virLXCMonitorPtr mon = opaque;
virLXCMonitorInitEventMsg *msg = evdata;
VIR_DEBUG("Event init %lld", (long long)msg->initpid);
if (mon->cb.initNotify)
mon->cb.initNotify(mon, (pid_t)msg->initpid, mon->vm);
}
static void virLXCMonitorEOFNotify(virNetClientPtr client ATTRIBUTE_UNUSED,
int reason ATTRIBUTE_UNUSED,
void *opaque)
{
virLXCMonitorPtr mon = opaque;
virLXCMonitorCallbackEOFNotify eofNotify;
virDomainObjPtr vm;
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
VIR_DEBUG("EOF notify mon=%p", mon);
virObjectLock(mon);
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
eofNotify = mon->cb.eofNotify;
vm = mon->vm;
virObjectUnlock(mon);
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
if (eofNotify) {
VIR_DEBUG("EOF callback mon=%p vm=%p", mon, vm);
eofNotify(mon, vm);
} else {
VIR_DEBUG("No EOF callback");
}
}
static void virLXCMonitorCloseFreeCallback(void *opaque)
{
virLXCMonitorPtr mon = opaque;
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
virObjectUnref(mon);
}
virLXCMonitorPtr virLXCMonitorNew(virDomainObjPtr vm,
const char *socketdir,
virLXCMonitorCallbacksPtr cb)
{
virLXCMonitorPtr mon;
char *sockpath = NULL;
if (virLXCMonitorInitialize() < 0)
return NULL;
if (!(mon = virObjectLockableNew(virLXCMonitorClass)))
return NULL;
if (virAsprintf(&sockpath, "%s/%s.sock",
socketdir, vm->def->name) < 0)
goto error;
if (!(mon->client = virNetClientNewUNIX(sockpath, false, NULL)))
goto error;
if (virNetClientRegisterAsyncIO(mon->client) < 0)
goto error;
if (!(mon->program = virNetClientProgramNew(VIR_LXC_MONITOR_PROGRAM,
VIR_LXC_MONITOR_PROGRAM_VERSION,
virLXCMonitorEvents,
ARRAY_CARDINALITY(virLXCMonitorEvents),
Run an RPC protocol over the LXC controller monitor This defines a new RPC protocol to be used between the LXC controller and the libvirtd LXC driver. There is only a single RPC message defined thus far, an asynchronous "EXIT" event that is emitted just before the LXC controller process exits. This provides the LXC driver with details about how the container shutdown - normally, or abnormally (crashed), thus allowing the driver to emit better libvirt events. Emitting the event in the LXC controller requires a few little tricks with the RPC service. Simply calling the virNetServiceClientSendMessage does not work, since this merely queues the message for asynchronous processing. In addition the main event loop is no longer running at the point the event is emitted, so no I/O is processed. Thus after invoking virNetServiceClientSendMessage it is necessary to mark the client as being in "delayed close" mode. Then the event loop is run again, until the client completes its close - this happens only after the queued message has been fully transmitted. The final complexity is that it is not safe to run virNetServerQuit() from the client close callback, since that is invoked from a context where the server is locked. Thus a zero-second timer is used to trigger shutdown of the event loop, causing the controller to finally exit. * src/Makefile.am: Add rules for generating RPC protocol files and dispatch methods * src/lxc/lxc_controller.c: Emit an RPC event immediately before exiting * src/lxc/lxc_domain.h: Record the shutdown reason given by the controller * src/lxc/lxc_monitor.c, src/lxc/lxc_monitor.h: Register RPC program and event handler. Add callback to let driver receive EXIT event. * src/lxc/lxc_process.c: Use monitor exit event to decide what kind of domain event to emit * src/lxc/lxc_protocol.x: Define wire protocol for LXC controller monitor. Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-07-17 14:54:08 +00:00
mon)))
goto error;
if (virNetClientAddProgram(mon->client,
mon->program) < 0)
goto error;
mon->vm = virObjectRef(vm);
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
memcpy(&mon->cb, cb, sizeof(mon->cb));
virObjectRef(mon);
virNetClientSetCloseCallback(mon->client, virLXCMonitorEOFNotify, mon,
virLXCMonitorCloseFreeCallback);
cleanup:
VIR_FREE(sockpath);
return mon;
error:
virObjectUnref(mon);
mon = NULL;
goto cleanup;
}
static void virLXCMonitorDispose(void *opaque)
{
virLXCMonitorPtr mon = opaque;
VIR_DEBUG("mon=%p", mon);
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
if (mon->cb.destroy)
(mon->cb.destroy)(mon, mon->vm);
virObjectUnref(mon->program);
virObjectUnref(mon->vm);
}
void virLXCMonitorClose(virLXCMonitorPtr mon)
{
virLXCMonitorClose: Unlock domain while closing monitor There's a race in lxc driver causing a deadlock. If a domain is destroyed immediately after started, the deadlock can occur. When domain is started, the even loop tries to connect to the monitor. If the connecting succeeds, virLXCProcessMonitorInitNotify() is called with @mon->client locked. The first thing that callee does, is virObjectLock(vm). So the order of locking is: 1) @mon->client, 2) @vm. However, if there's another thread executing virDomainDestroy on the very same domain, the first thing done here is locking the @vm. Then, the corresponding libvirt_lxc process is killed and monitor is closed via calling virLXCMonitorClose(). This callee tries to lock @mon->client too. So the order is reversed to the first case. This situation results in deadlock and unresponsive libvirtd (since the eventloop is involved). The proper solution is to unlock the @vm in virLXCMonitorClose prior entering virNetClientClose(). See the backtrace as follows: Thread 25 (Thread 0x7f1b7c9b8700 (LWP 16312)): 0 0x00007f1b80539714 in __lll_lock_wait () from /lib64/libpthread.so.0 1 0x00007f1b8053516c in _L_lock_516 () from /lib64/libpthread.so.0 2 0x00007f1b80534fbb in pthread_mutex_lock () from /lib64/libpthread.so.0 3 0x00007f1b82a637cf in virMutexLock (m=0x7f1b3c0038d0) at util/virthreadpthread.c:85 4 0x00007f1b82a4ccf2 in virObjectLock (anyobj=0x7f1b3c0038c0) at util/virobject.c:320 5 0x00007f1b82b861f6 in virNetClientCloseInternal (client=0x7f1b3c0038c0, reason=3) at rpc/virnetclient.c:696 6 0x00007f1b82b862f5 in virNetClientClose (client=0x7f1b3c0038c0) at rpc/virnetclient.c:721 7 0x00007f1b6ee12500 in virLXCMonitorClose (mon=0x7f1b3c007210) at lxc/lxc_monitor.c:216 8 0x00007f1b6ee129f0 in virLXCProcessCleanup (driver=0x7f1b68100240, vm=0x7f1b680ceb70, reason=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:174 9 0x00007f1b6ee14106 in virLXCProcessStop (driver=0x7f1b68100240, vm=0x7f1b680ceb70, reason=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:710 10 0x00007f1b6ee1aa36 in lxcDomainDestroyFlags (dom=0x7f1b5c002560, flags=0) at lxc/lxc_driver.c:1291 11 0x00007f1b6ee1ab1a in lxcDomainDestroy (dom=0x7f1b5c002560) at lxc/lxc_driver.c:1321 12 0x00007f1b82b05be5 in virDomainDestroy (domain=0x7f1b5c002560) at libvirt.c:2303 13 0x00007f1b835a7e85 in remoteDispatchDomainDestroy (server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0, rerr=0x7f1b7c9b7c30, args=0x7f1b5c004a50) at remote_dispatch.h:3143 14 0x00007f1b835a7d78 in remoteDispatchDomainDestroyHelper (server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0, rerr=0x7f1b7c9b7c30, args=0x7f1b5c004a50, ret=0x7f1b5c0029e0) at remote_dispatch.h:3121 15 0x00007f1b82b93704 in virNetServerProgramDispatchCall (prog=0x7f1b8573af90, server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0) at rpc/virnetserverprogram.c:435 16 0x00007f1b82b93263 in virNetServerProgramDispatch (prog=0x7f1b8573af90, server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0) at rpc/virnetserverprogram.c:305 17 0x00007f1b82b8c0f6 in virNetServerProcessMsg (srv=0x7f1b857419d0, client=0x7f1b8574ae40, prog=0x7f1b8573af90, msg=0x7f1b8574acf0) at rpc/virnetserver.c:163 18 0x00007f1b82b8c1da in virNetServerHandleJob (jobOpaque=0x7f1b8574dca0, opaque=0x7f1b857419d0) at rpc/virnetserver.c:184 19 0x00007f1b82a64158 in virThreadPoolWorker (opaque=0x7f1b8573cb10) at util/virthreadpool.c:144 20 0x00007f1b82a63ae5 in virThreadHelper (data=0x7f1b8574b9f0) at util/virthreadpthread.c:161 21 0x00007f1b80532f4a in start_thread () from /lib64/libpthread.so.0 22 0x00007f1b7fc4f20d in clone () from /lib64/libc.so.6 Thread 1 (Thread 0x7f1b83546740 (LWP 16297)): 0 0x00007f1b80539714 in __lll_lock_wait () from /lib64/libpthread.so.0 1 0x00007f1b8053516c in _L_lock_516 () from /lib64/libpthread.so.0 2 0x00007f1b80534fbb in pthread_mutex_lock () from /lib64/libpthread.so.0 3 0x00007f1b82a637cf in virMutexLock (m=0x7f1b680ceb80) at util/virthreadpthread.c:85 4 0x00007f1b82a4ccf2 in virObjectLock (anyobj=0x7f1b680ceb70) at util/virobject.c:320 5 0x00007f1b6ee13bd7 in virLXCProcessMonitorInitNotify (mon=0x7f1b3c007210, initpid=4832, vm=0x7f1b680ceb70) at lxc/lxc_process.c:601 6 0x00007f1b6ee11fd3 in virLXCMonitorHandleEventInit (prog=0x7f1b3c001f10, client=0x7f1b3c0038c0, evdata=0x7f1b8574a7d0, opaque=0x7f1b3c007210) at lxc/lxc_monitor.c:109 7 0x00007f1b82b8a196 in virNetClientProgramDispatch (prog=0x7f1b3c001f10, client=0x7f1b3c0038c0, msg=0x7f1b3c003928) at rpc/virnetclientprogram.c:259 8 0x00007f1b82b87030 in virNetClientCallDispatchMessage (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1019 9 0x00007f1b82b876bb in virNetClientCallDispatch (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1140 10 0x00007f1b82b87d41 in virNetClientIOHandleInput (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1312 11 0x00007f1b82b88f51 in virNetClientIncomingEvent (sock=0x7f1b3c0044e0, events=1, opaque=0x7f1b3c0038c0) at rpc/virnetclient.c:1832 12 0x00007f1b82b9e1c8 in virNetSocketEventHandle (watch=3321, fd=54, events=1, opaque=0x7f1b3c0044e0) at rpc/virnetsocket.c:1695 13 0x00007f1b82a272cf in virEventPollDispatchHandles (nfds=21, fds=0x7f1b8574ded0) at util/vireventpoll.c:498 14 0x00007f1b82a27af2 in virEventPollRunOnce () at util/vireventpoll.c:645 15 0x00007f1b82a25a61 in virEventRunDefaultImpl () at util/virevent.c:273 16 0x00007f1b82b8e97e in virNetServerRun (srv=0x7f1b857419d0) at rpc/virnetserver.c:1097 17 0x00007f1b8359db6b in main (argc=2, argv=0x7ffff98dbaa8) at libvirtd.c:1512
2013-07-24 07:47:03 +00:00
virDomainObjPtr vm;
virNetClientPtr client;
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
VIR_DEBUG("mon=%p", mon);
if (mon->client) {
Fix deadlock in handling EOF in LXC monitor Depending on the scenario in which LXC containers exit, it is possible for the EOF callback of the LXC monitor to deadlock the driver. #0 0x00000038a0a0de4d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00000038a0a09ca6 in _L_lock_840 () from /lib64/libpthread.so.0 #2 0x00000038a0a09ba8 in pthread_mutex_lock () from /lib64/libpthread.so.0 #3 0x00007f4bd9579d55 in virMutexLock (m=<optimized out>) at util/threads-pthread.c:85 #4 0x00007f4bcacc7597 in lxcDriverLock (driver=0x7f4bc40c8290) at lxc/lxc_conf.h:81 #5 virLXCProcessMonitorEOFNotify (mon=<optimized out>, vm=0x7f4bb4000b00) at lxc/lxc_process.c:581 #6 0x00007f4bd9645c91 in virNetClientCloseLocked (client=client@entry=0x7f4bb4009e60) at rpc/virnetclient.c:554 #7 0x00007f4bd96460f8 in virNetClientIOEventLoopPassTheBuck (thiscall=0x0, client=0x7f4bb4009e60) at rpc/virnetclient.c:1306 #8 virNetClientIOEventLoopPassTheBuck (client=0x7f4bb4009e60, thiscall=0x0) at rpc/virnetclient.c:1287 #9 0x00007f4bd96467a2 in virNetClientCloseInternal (reason=3, client=0x7f4bb4009e60) at rpc/virnetclient.c:589 #10 virNetClientCloseInternal (client=0x7f4bb4009e60, reason=3) at rpc/virnetclient.c:561 #11 0x00007f4bcacc4a82 in virLXCMonitorClose (mon=0x7f4bb4000a00) at lxc/lxc_monitor.c:201 #12 0x00007f4bcacc55ac in virLXCProcessCleanup (reason=<optimized out>, vm=0x7f4bb4000b00, driver=0x7f4bc40c8290) at lxc/lxc_process.c:240 #13 virLXCProcessStop (driver=0x7f4bc40c8290, vm=vm@entry=0x7f4bb4000b00, reason=reason@entry=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:735 #14 0x00007f4bcacc5bd2 in virLXCProcessAutoDestroyDom (payload=<optimized out>, name=0x7f4bb4003c80, opaque=0x7fff41af2df0) at lxc/lxc_process.c:94 #15 0x00007f4bd9586649 in virHashForEach (table=0x7f4bc409b270, iter=iter@entry=0x7f4bcacc5ab0 <virLXCProcessAutoDestroyDom>, data=data@entry=0x7fff41af2df0) at util/virhash.c:514 #16 0x00007f4bcacc52d7 in virLXCProcessAutoDestroyRun (driver=driver@entry=0x7f4bc40c8290, conn=conn@entry=0x7f4bb8000ab0) at lxc/lxc_process.c:120 #17 0x00007f4bcacca628 in lxcClose (conn=0x7f4bb8000ab0) at lxc/lxc_driver.c:128 #18 0x00007f4bd95e67ab in virReleaseConnect (conn=conn@entry=0x7f4bb8000ab0) at datatypes.c:114 When the driver calls virLXCMonitorClose, there is really no need for the EOF callback to be invoked in this case, since the caller can easily handle events itself. In changing this, the monitor needs to take a deep copy of the callback list, not merely a reference. Also adds debug statements in various places to aid troubleshooting Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
2012-09-26 14:08:20 +00:00
/* When manually closing the monitor, we don't
* want to have callbacks back into us, since
* the caller is not re-entrant safe
*/
VIR_DEBUG("Clear EOF callback mon=%p", mon);
virLXCMonitorClose: Unlock domain while closing monitor There's a race in lxc driver causing a deadlock. If a domain is destroyed immediately after started, the deadlock can occur. When domain is started, the even loop tries to connect to the monitor. If the connecting succeeds, virLXCProcessMonitorInitNotify() is called with @mon->client locked. The first thing that callee does, is virObjectLock(vm). So the order of locking is: 1) @mon->client, 2) @vm. However, if there's another thread executing virDomainDestroy on the very same domain, the first thing done here is locking the @vm. Then, the corresponding libvirt_lxc process is killed and monitor is closed via calling virLXCMonitorClose(). This callee tries to lock @mon->client too. So the order is reversed to the first case. This situation results in deadlock and unresponsive libvirtd (since the eventloop is involved). The proper solution is to unlock the @vm in virLXCMonitorClose prior entering virNetClientClose(). See the backtrace as follows: Thread 25 (Thread 0x7f1b7c9b8700 (LWP 16312)): 0 0x00007f1b80539714 in __lll_lock_wait () from /lib64/libpthread.so.0 1 0x00007f1b8053516c in _L_lock_516 () from /lib64/libpthread.so.0 2 0x00007f1b80534fbb in pthread_mutex_lock () from /lib64/libpthread.so.0 3 0x00007f1b82a637cf in virMutexLock (m=0x7f1b3c0038d0) at util/virthreadpthread.c:85 4 0x00007f1b82a4ccf2 in virObjectLock (anyobj=0x7f1b3c0038c0) at util/virobject.c:320 5 0x00007f1b82b861f6 in virNetClientCloseInternal (client=0x7f1b3c0038c0, reason=3) at rpc/virnetclient.c:696 6 0x00007f1b82b862f5 in virNetClientClose (client=0x7f1b3c0038c0) at rpc/virnetclient.c:721 7 0x00007f1b6ee12500 in virLXCMonitorClose (mon=0x7f1b3c007210) at lxc/lxc_monitor.c:216 8 0x00007f1b6ee129f0 in virLXCProcessCleanup (driver=0x7f1b68100240, vm=0x7f1b680ceb70, reason=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:174 9 0x00007f1b6ee14106 in virLXCProcessStop (driver=0x7f1b68100240, vm=0x7f1b680ceb70, reason=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:710 10 0x00007f1b6ee1aa36 in lxcDomainDestroyFlags (dom=0x7f1b5c002560, flags=0) at lxc/lxc_driver.c:1291 11 0x00007f1b6ee1ab1a in lxcDomainDestroy (dom=0x7f1b5c002560) at lxc/lxc_driver.c:1321 12 0x00007f1b82b05be5 in virDomainDestroy (domain=0x7f1b5c002560) at libvirt.c:2303 13 0x00007f1b835a7e85 in remoteDispatchDomainDestroy (server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0, rerr=0x7f1b7c9b7c30, args=0x7f1b5c004a50) at remote_dispatch.h:3143 14 0x00007f1b835a7d78 in remoteDispatchDomainDestroyHelper (server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0, rerr=0x7f1b7c9b7c30, args=0x7f1b5c004a50, ret=0x7f1b5c0029e0) at remote_dispatch.h:3121 15 0x00007f1b82b93704 in virNetServerProgramDispatchCall (prog=0x7f1b8573af90, server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0) at rpc/virnetserverprogram.c:435 16 0x00007f1b82b93263 in virNetServerProgramDispatch (prog=0x7f1b8573af90, server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0) at rpc/virnetserverprogram.c:305 17 0x00007f1b82b8c0f6 in virNetServerProcessMsg (srv=0x7f1b857419d0, client=0x7f1b8574ae40, prog=0x7f1b8573af90, msg=0x7f1b8574acf0) at rpc/virnetserver.c:163 18 0x00007f1b82b8c1da in virNetServerHandleJob (jobOpaque=0x7f1b8574dca0, opaque=0x7f1b857419d0) at rpc/virnetserver.c:184 19 0x00007f1b82a64158 in virThreadPoolWorker (opaque=0x7f1b8573cb10) at util/virthreadpool.c:144 20 0x00007f1b82a63ae5 in virThreadHelper (data=0x7f1b8574b9f0) at util/virthreadpthread.c:161 21 0x00007f1b80532f4a in start_thread () from /lib64/libpthread.so.0 22 0x00007f1b7fc4f20d in clone () from /lib64/libc.so.6 Thread 1 (Thread 0x7f1b83546740 (LWP 16297)): 0 0x00007f1b80539714 in __lll_lock_wait () from /lib64/libpthread.so.0 1 0x00007f1b8053516c in _L_lock_516 () from /lib64/libpthread.so.0 2 0x00007f1b80534fbb in pthread_mutex_lock () from /lib64/libpthread.so.0 3 0x00007f1b82a637cf in virMutexLock (m=0x7f1b680ceb80) at util/virthreadpthread.c:85 4 0x00007f1b82a4ccf2 in virObjectLock (anyobj=0x7f1b680ceb70) at util/virobject.c:320 5 0x00007f1b6ee13bd7 in virLXCProcessMonitorInitNotify (mon=0x7f1b3c007210, initpid=4832, vm=0x7f1b680ceb70) at lxc/lxc_process.c:601 6 0x00007f1b6ee11fd3 in virLXCMonitorHandleEventInit (prog=0x7f1b3c001f10, client=0x7f1b3c0038c0, evdata=0x7f1b8574a7d0, opaque=0x7f1b3c007210) at lxc/lxc_monitor.c:109 7 0x00007f1b82b8a196 in virNetClientProgramDispatch (prog=0x7f1b3c001f10, client=0x7f1b3c0038c0, msg=0x7f1b3c003928) at rpc/virnetclientprogram.c:259 8 0x00007f1b82b87030 in virNetClientCallDispatchMessage (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1019 9 0x00007f1b82b876bb in virNetClientCallDispatch (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1140 10 0x00007f1b82b87d41 in virNetClientIOHandleInput (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1312 11 0x00007f1b82b88f51 in virNetClientIncomingEvent (sock=0x7f1b3c0044e0, events=1, opaque=0x7f1b3c0038c0) at rpc/virnetclient.c:1832 12 0x00007f1b82b9e1c8 in virNetSocketEventHandle (watch=3321, fd=54, events=1, opaque=0x7f1b3c0044e0) at rpc/virnetsocket.c:1695 13 0x00007f1b82a272cf in virEventPollDispatchHandles (nfds=21, fds=0x7f1b8574ded0) at util/vireventpoll.c:498 14 0x00007f1b82a27af2 in virEventPollRunOnce () at util/vireventpoll.c:645 15 0x00007f1b82a25a61 in virEventRunDefaultImpl () at util/virevent.c:273 16 0x00007f1b82b8e97e in virNetServerRun (srv=0x7f1b857419d0) at rpc/virnetserver.c:1097 17 0x00007f1b8359db6b in main (argc=2, argv=0x7ffff98dbaa8) at libvirtd.c:1512
2013-07-24 07:47:03 +00:00
vm = mon->vm;
client = mon->client;
mon->client = NULL;
virLXCMonitorClose: Unlock domain while closing monitor There's a race in lxc driver causing a deadlock. If a domain is destroyed immediately after started, the deadlock can occur. When domain is started, the even loop tries to connect to the monitor. If the connecting succeeds, virLXCProcessMonitorInitNotify() is called with @mon->client locked. The first thing that callee does, is virObjectLock(vm). So the order of locking is: 1) @mon->client, 2) @vm. However, if there's another thread executing virDomainDestroy on the very same domain, the first thing done here is locking the @vm. Then, the corresponding libvirt_lxc process is killed and monitor is closed via calling virLXCMonitorClose(). This callee tries to lock @mon->client too. So the order is reversed to the first case. This situation results in deadlock and unresponsive libvirtd (since the eventloop is involved). The proper solution is to unlock the @vm in virLXCMonitorClose prior entering virNetClientClose(). See the backtrace as follows: Thread 25 (Thread 0x7f1b7c9b8700 (LWP 16312)): 0 0x00007f1b80539714 in __lll_lock_wait () from /lib64/libpthread.so.0 1 0x00007f1b8053516c in _L_lock_516 () from /lib64/libpthread.so.0 2 0x00007f1b80534fbb in pthread_mutex_lock () from /lib64/libpthread.so.0 3 0x00007f1b82a637cf in virMutexLock (m=0x7f1b3c0038d0) at util/virthreadpthread.c:85 4 0x00007f1b82a4ccf2 in virObjectLock (anyobj=0x7f1b3c0038c0) at util/virobject.c:320 5 0x00007f1b82b861f6 in virNetClientCloseInternal (client=0x7f1b3c0038c0, reason=3) at rpc/virnetclient.c:696 6 0x00007f1b82b862f5 in virNetClientClose (client=0x7f1b3c0038c0) at rpc/virnetclient.c:721 7 0x00007f1b6ee12500 in virLXCMonitorClose (mon=0x7f1b3c007210) at lxc/lxc_monitor.c:216 8 0x00007f1b6ee129f0 in virLXCProcessCleanup (driver=0x7f1b68100240, vm=0x7f1b680ceb70, reason=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:174 9 0x00007f1b6ee14106 in virLXCProcessStop (driver=0x7f1b68100240, vm=0x7f1b680ceb70, reason=VIR_DOMAIN_SHUTOFF_DESTROYED) at lxc/lxc_process.c:710 10 0x00007f1b6ee1aa36 in lxcDomainDestroyFlags (dom=0x7f1b5c002560, flags=0) at lxc/lxc_driver.c:1291 11 0x00007f1b6ee1ab1a in lxcDomainDestroy (dom=0x7f1b5c002560) at lxc/lxc_driver.c:1321 12 0x00007f1b82b05be5 in virDomainDestroy (domain=0x7f1b5c002560) at libvirt.c:2303 13 0x00007f1b835a7e85 in remoteDispatchDomainDestroy (server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0, rerr=0x7f1b7c9b7c30, args=0x7f1b5c004a50) at remote_dispatch.h:3143 14 0x00007f1b835a7d78 in remoteDispatchDomainDestroyHelper (server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0, rerr=0x7f1b7c9b7c30, args=0x7f1b5c004a50, ret=0x7f1b5c0029e0) at remote_dispatch.h:3121 15 0x00007f1b82b93704 in virNetServerProgramDispatchCall (prog=0x7f1b8573af90, server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0) at rpc/virnetserverprogram.c:435 16 0x00007f1b82b93263 in virNetServerProgramDispatch (prog=0x7f1b8573af90, server=0x7f1b857419d0, client=0x7f1b8574ae40, msg=0x7f1b8574acf0) at rpc/virnetserverprogram.c:305 17 0x00007f1b82b8c0f6 in virNetServerProcessMsg (srv=0x7f1b857419d0, client=0x7f1b8574ae40, prog=0x7f1b8573af90, msg=0x7f1b8574acf0) at rpc/virnetserver.c:163 18 0x00007f1b82b8c1da in virNetServerHandleJob (jobOpaque=0x7f1b8574dca0, opaque=0x7f1b857419d0) at rpc/virnetserver.c:184 19 0x00007f1b82a64158 in virThreadPoolWorker (opaque=0x7f1b8573cb10) at util/virthreadpool.c:144 20 0x00007f1b82a63ae5 in virThreadHelper (data=0x7f1b8574b9f0) at util/virthreadpthread.c:161 21 0x00007f1b80532f4a in start_thread () from /lib64/libpthread.so.0 22 0x00007f1b7fc4f20d in clone () from /lib64/libc.so.6 Thread 1 (Thread 0x7f1b83546740 (LWP 16297)): 0 0x00007f1b80539714 in __lll_lock_wait () from /lib64/libpthread.so.0 1 0x00007f1b8053516c in _L_lock_516 () from /lib64/libpthread.so.0 2 0x00007f1b80534fbb in pthread_mutex_lock () from /lib64/libpthread.so.0 3 0x00007f1b82a637cf in virMutexLock (m=0x7f1b680ceb80) at util/virthreadpthread.c:85 4 0x00007f1b82a4ccf2 in virObjectLock (anyobj=0x7f1b680ceb70) at util/virobject.c:320 5 0x00007f1b6ee13bd7 in virLXCProcessMonitorInitNotify (mon=0x7f1b3c007210, initpid=4832, vm=0x7f1b680ceb70) at lxc/lxc_process.c:601 6 0x00007f1b6ee11fd3 in virLXCMonitorHandleEventInit (prog=0x7f1b3c001f10, client=0x7f1b3c0038c0, evdata=0x7f1b8574a7d0, opaque=0x7f1b3c007210) at lxc/lxc_monitor.c:109 7 0x00007f1b82b8a196 in virNetClientProgramDispatch (prog=0x7f1b3c001f10, client=0x7f1b3c0038c0, msg=0x7f1b3c003928) at rpc/virnetclientprogram.c:259 8 0x00007f1b82b87030 in virNetClientCallDispatchMessage (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1019 9 0x00007f1b82b876bb in virNetClientCallDispatch (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1140 10 0x00007f1b82b87d41 in virNetClientIOHandleInput (client=0x7f1b3c0038c0) at rpc/virnetclient.c:1312 11 0x00007f1b82b88f51 in virNetClientIncomingEvent (sock=0x7f1b3c0044e0, events=1, opaque=0x7f1b3c0038c0) at rpc/virnetclient.c:1832 12 0x00007f1b82b9e1c8 in virNetSocketEventHandle (watch=3321, fd=54, events=1, opaque=0x7f1b3c0044e0) at rpc/virnetsocket.c:1695 13 0x00007f1b82a272cf in virEventPollDispatchHandles (nfds=21, fds=0x7f1b8574ded0) at util/vireventpoll.c:498 14 0x00007f1b82a27af2 in virEventPollRunOnce () at util/vireventpoll.c:645 15 0x00007f1b82a25a61 in virEventRunDefaultImpl () at util/virevent.c:273 16 0x00007f1b82b8e97e in virNetServerRun (srv=0x7f1b857419d0) at rpc/virnetserver.c:1097 17 0x00007f1b8359db6b in main (argc=2, argv=0x7ffff98dbaa8) at libvirtd.c:1512
2013-07-24 07:47:03 +00:00
mon->cb.eofNotify = NULL;
virObjectRef(vm);
virObjectUnlock(vm);
virNetClientClose(client);
virObjectUnref(client);
virObjectLock(vm);
virObjectUnref(vm);
}
}