libvirt/src/util/event_poll.c

739 lines
23 KiB
C
Raw Normal View History

2007-06-26 19:11:00 +00:00
/*
* event.c: event loop for monitoring file handles
2007-06-26 19:11:00 +00:00
*
* Copyright (C) 2007, 2010-2011 Red Hat, Inc.
2007-06-26 19:11:00 +00:00
* Copyright (C) 2007 Daniel P. Berrange
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Daniel P. Berrange <berrange@redhat.com>
*/
#include <config.h>
2007-06-26 19:11:00 +00:00
#include <stdlib.h>
#include <string.h>
#include <poll.h>
#include <sys/time.h>
#include <errno.h>
#include <unistd.h>
2007-06-26 19:11:00 +00:00
#include "threads.h"
#include "logging.h"
#include "event_poll.h"
#include "memory.h"
#include "util.h"
#include "ignore-value.h"
#include "virterror_internal.h"
2007-06-26 19:11:00 +00:00
#define EVENT_DEBUG(fmt, ...) VIR_DEBUG(fmt, __VA_ARGS__)
#define VIR_FROM_THIS VIR_FROM_EVENT
#define virEventError(code, ...) \
virReportErrorHelper(NULL, VIR_FROM_EVENT, code, __FILE__, \
__FUNCTION__, __LINE__, __VA_ARGS__)
static int virEventPollInterruptLocked(void);
2007-06-26 19:11:00 +00:00
/* State for a single file handle being monitored */
struct virEventPollHandle {
int watch;
2007-06-26 19:11:00 +00:00
int fd;
int events;
virEventHandleCallback cb;
virFreeCallback ff;
2007-06-26 19:11:00 +00:00
void *opaque;
int deleted;
};
/* State for a single timer being generated */
struct virEventPollTimeout {
2007-06-26 19:11:00 +00:00
int timer;
int frequency;
2007-06-26 19:11:00 +00:00
unsigned long long expiresAt;
virEventTimeoutCallback cb;
virFreeCallback ff;
2007-06-26 19:11:00 +00:00
void *opaque;
int deleted;
};
/* Allocate extra slots for virEventPollHandle/virEventPollTimeout
2007-06-26 19:11:00 +00:00
records in this multiple */
#define EVENT_ALLOC_EXTENT 10
/* State for the main event loop */
struct virEventPollLoop {
virMutex lock;
int running;
virThread leader;
int wakeupfd[2];
size_t handlesCount;
size_t handlesAlloc;
struct virEventPollHandle *handles;
size_t timeoutsCount;
size_t timeoutsAlloc;
struct virEventPollTimeout *timeouts;
2007-06-26 19:11:00 +00:00
};
/* Only have one event loop */
static struct virEventPollLoop eventLoop;
2007-06-26 19:11:00 +00:00
/* Unique ID for the next FD watch to be registered */
static int nextWatch = 1;
2007-06-26 19:11:00 +00:00
/* Unique ID for the next timer to be registered */
static int nextTimer = 1;
2007-06-26 19:11:00 +00:00
/*
* Register a callback for monitoring file handle events.
* NB, it *must* be safe to call this from within a callback
* For this reason we only ever append to existing list.
*/
int virEventPollAddHandle(int fd, int events,
virEventHandleCallback cb,
void *opaque,
virFreeCallback ff) {
int watch;
EVENT_DEBUG("Add handle fd=%d events=%d cb=%p opaque=%p", fd, events, cb, opaque);
virMutexLock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
if (eventLoop.handlesCount == eventLoop.handlesAlloc) {
EVENT_DEBUG("Used %zu handle slots, adding at least %d more",
eventLoop.handlesAlloc, EVENT_ALLOC_EXTENT);
if (VIR_RESIZE_N(eventLoop.handles, eventLoop.handlesAlloc,
eventLoop.handlesCount, EVENT_ALLOC_EXTENT) < 0) {
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
return -1;
}
2007-06-26 19:11:00 +00:00
}
watch = nextWatch++;
eventLoop.handles[eventLoop.handlesCount].watch = watch;
2007-06-26 19:11:00 +00:00
eventLoop.handles[eventLoop.handlesCount].fd = fd;
eventLoop.handles[eventLoop.handlesCount].events =
virEventPollToNativeEvents(events);
2007-06-26 19:11:00 +00:00
eventLoop.handles[eventLoop.handlesCount].cb = cb;
eventLoop.handles[eventLoop.handlesCount].ff = ff;
2007-06-26 19:11:00 +00:00
eventLoop.handles[eventLoop.handlesCount].opaque = opaque;
eventLoop.handles[eventLoop.handlesCount].deleted = 0;
eventLoop.handlesCount++;
virEventPollInterruptLocked();
virMutexUnlock(&eventLoop.lock);
return watch;
2007-06-26 19:11:00 +00:00
}
void virEventPollUpdateHandle(int watch, int events) {
int i;
EVENT_DEBUG("Update handle w=%d e=%d", watch, events);
if (watch <= 0) {
VIR_WARN("Ignoring invalid update watch %d", watch);
return;
}
virMutexLock(&eventLoop.lock);
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
if (eventLoop.handles[i].watch == watch) {
eventLoop.handles[i].events =
virEventPollToNativeEvents(events);
virEventPollInterruptLocked();
break;
}
}
virMutexUnlock(&eventLoop.lock);
}
2007-06-26 19:11:00 +00:00
/*
* Unregister a callback from a file handle
* NB, it *must* be safe to call this from within a callback
* For this reason we only ever set a flag in the existing list.
* Actual deletion will be done out-of-band
*/
int virEventPollRemoveHandle(int watch) {
2007-06-26 19:11:00 +00:00
int i;
EVENT_DEBUG("Remove handle w=%d", watch);
if (watch <= 0) {
VIR_WARN("Ignoring invalid remove watch %d", watch);
return -1;
}
virMutexLock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
if (eventLoop.handles[i].deleted)
continue;
if (eventLoop.handles[i].watch == watch) {
EVENT_DEBUG("mark delete %d %d", i, eventLoop.handles[i].fd);
2007-06-26 19:11:00 +00:00
eventLoop.handles[i].deleted = 1;
virEventPollInterruptLocked();
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
return 0;
}
}
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
return -1;
}
/*
* Register a callback for a timer event
* NB, it *must* be safe to call this from within a callback
* For this reason we only ever append to existing list.
*/
int virEventPollAddTimeout(int frequency,
virEventTimeoutCallback cb,
void *opaque,
virFreeCallback ff) {
struct timeval now;
int ret;
EVENT_DEBUG("Adding timer %d with %d ms freq", nextTimer, frequency);
if (gettimeofday(&now, NULL) < 0) {
2007-06-26 19:11:00 +00:00
return -1;
}
virMutexLock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
if (eventLoop.timeoutsCount == eventLoop.timeoutsAlloc) {
EVENT_DEBUG("Used %zu timeout slots, adding at least %d more",
eventLoop.timeoutsAlloc, EVENT_ALLOC_EXTENT);
if (VIR_RESIZE_N(eventLoop.timeouts, eventLoop.timeoutsAlloc,
eventLoop.timeoutsCount, EVENT_ALLOC_EXTENT) < 0) {
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
return -1;
}
2007-06-26 19:11:00 +00:00
}
eventLoop.timeouts[eventLoop.timeoutsCount].timer = nextTimer++;
eventLoop.timeouts[eventLoop.timeoutsCount].frequency = frequency;
2007-06-26 19:11:00 +00:00
eventLoop.timeouts[eventLoop.timeoutsCount].cb = cb;
eventLoop.timeouts[eventLoop.timeoutsCount].ff = ff;
2007-06-26 19:11:00 +00:00
eventLoop.timeouts[eventLoop.timeoutsCount].opaque = opaque;
eventLoop.timeouts[eventLoop.timeoutsCount].deleted = 0;
eventLoop.timeouts[eventLoop.timeoutsCount].expiresAt =
frequency >= 0 ? frequency +
(((unsigned long long)now.tv_sec)*1000) +
(((unsigned long long)now.tv_usec)/1000) : 0;
2007-06-26 19:11:00 +00:00
eventLoop.timeoutsCount++;
ret = nextTimer-1;
virEventPollInterruptLocked();
virMutexUnlock(&eventLoop.lock);
return ret;
2007-06-26 19:11:00 +00:00
}
void virEventPollUpdateTimeout(int timer, int frequency) {
struct timeval tv;
int i;
EVENT_DEBUG("Updating timer %d timeout with %d ms freq", timer, frequency);
if (timer <= 0) {
VIR_WARN("Ignoring invalid update timer %d", timer);
return;
}
if (gettimeofday(&tv, NULL) < 0) {
return;
}
virMutexLock(&eventLoop.lock);
for (i = 0 ; i < eventLoop.timeoutsCount ; i++) {
if (eventLoop.timeouts[i].timer == timer) {
eventLoop.timeouts[i].frequency = frequency;
eventLoop.timeouts[i].expiresAt =
frequency >= 0 ? frequency +
(((unsigned long long)tv.tv_sec)*1000) +
(((unsigned long long)tv.tv_usec)/1000) : 0;
virEventPollInterruptLocked();
break;
}
}
virMutexUnlock(&eventLoop.lock);
}
2007-06-26 19:11:00 +00:00
/*
* Unregister a callback for a timer
* NB, it *must* be safe to call this from within a callback
* For this reason we only ever set a flag in the existing list.
* Actual deletion will be done out-of-band
*/
int virEventPollRemoveTimeout(int timer) {
2007-06-26 19:11:00 +00:00
int i;
EVENT_DEBUG("Remove timer %d", timer);
if (timer <= 0) {
VIR_WARN("Ignoring invalid remove timer %d", timer);
return -1;
}
virMutexLock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
for (i = 0 ; i < eventLoop.timeoutsCount ; i++) {
if (eventLoop.timeouts[i].deleted)
continue;
if (eventLoop.timeouts[i].timer == timer) {
eventLoop.timeouts[i].deleted = 1;
virEventPollInterruptLocked();
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
return 0;
}
}
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
return -1;
}
/* Iterates over all registered timeouts and determine which
* will be the first to expire.
* @timeout: filled with expiry time of soonest timer, or -1 if
* no timeout is pending
* returns: 0 on success, -1 on error
*/
static int virEventPollCalculateTimeout(int *timeout) {
2007-06-26 19:11:00 +00:00
unsigned long long then = 0;
int i;
EVENT_DEBUG("Calculate expiry of %zu timers", eventLoop.timeoutsCount);
2007-06-26 19:11:00 +00:00
/* Figure out if we need a timeout */
for (i = 0 ; i < eventLoop.timeoutsCount ; i++) {
2009-05-12 16:43:04 +00:00
if (eventLoop.timeouts[i].frequency < 0)
2007-06-26 19:11:00 +00:00
continue;
EVENT_DEBUG("Got a timeout scheduled for %llu", eventLoop.timeouts[i].expiresAt);
2007-06-26 19:11:00 +00:00
if (then == 0 ||
eventLoop.timeouts[i].expiresAt < then)
then = eventLoop.timeouts[i].expiresAt;
}
/* Calculate how long we should wait for a timeout if needed */
if (then > 0) {
struct timeval tv;
if (gettimeofday(&tv, NULL) < 0) {
virReportSystemError(errno, "%s",
_("Unable to get current time"));
2007-06-26 19:11:00 +00:00
return -1;
}
*timeout = then -
((((unsigned long long)tv.tv_sec)*1000) +
(((unsigned long long)tv.tv_usec)/1000));
if (*timeout < 0)
*timeout = 0;
2007-06-26 19:11:00 +00:00
} else {
*timeout = -1;
}
EVENT_DEBUG("Timeout at %llu due in %d ms", then, *timeout);
2007-06-26 19:11:00 +00:00
return 0;
}
/*
* Allocate a pollfd array containing data for all registered
* file handles. The caller must free the returned data struct
* returns: the pollfd array, or NULL on error
*/
static struct pollfd *virEventPollMakePollFDs(int *nfds) {
2007-06-26 19:11:00 +00:00
struct pollfd *fds;
2009-05-12 16:43:04 +00:00
int i;
2007-06-26 19:11:00 +00:00
*nfds = 0;
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
unlock eventLoop before calling callback function When I use newest libvirt to save a domain, libvirtd will be deadlock. Here is the output of gdb: (gdb) thread 3 [Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt at qemu/qemu_driver.c:2074 ret=0x7f972a1fbbe0) at remote.c:2273 (gdb) thread 7 [Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt (gdb) p *(virMutexPtr)0x6fdd60 $2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) p *(virMutexPtr)0x1a63ac0 $3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) info threads 7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 * 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0 The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object. In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles(). Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
if (eventLoop.handles[i].events && !eventLoop.handles[i].deleted)
(*nfds)++;
}
2007-06-26 19:11:00 +00:00
/* Setup the poll file handle data structs */
if (VIR_ALLOC_N(fds, *nfds) < 0) {
virReportOOMError();
2009-05-12 16:43:04 +00:00
return NULL;
}
2007-06-26 19:11:00 +00:00
*nfds = 0;
2009-05-12 16:43:04 +00:00
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
unlock eventLoop before calling callback function When I use newest libvirt to save a domain, libvirtd will be deadlock. Here is the output of gdb: (gdb) thread 3 [Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt at qemu/qemu_driver.c:2074 ret=0x7f972a1fbbe0) at remote.c:2273 (gdb) thread 7 [Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt (gdb) p *(virMutexPtr)0x6fdd60 $2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) p *(virMutexPtr)0x1a63ac0 $3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) info threads 7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 * 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0 The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object. In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles(). Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
EVENT_DEBUG("Prepare n=%d w=%d, f=%d e=%d d=%d", i,
2009-05-12 16:43:04 +00:00
eventLoop.handles[i].watch,
eventLoop.handles[i].fd,
unlock eventLoop before calling callback function When I use newest libvirt to save a domain, libvirtd will be deadlock. Here is the output of gdb: (gdb) thread 3 [Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt at qemu/qemu_driver.c:2074 ret=0x7f972a1fbbe0) at remote.c:2273 (gdb) thread 7 [Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt (gdb) p *(virMutexPtr)0x6fdd60 $2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) p *(virMutexPtr)0x1a63ac0 $3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) info threads 7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 * 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0 The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object. In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles(). Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
eventLoop.handles[i].events,
eventLoop.handles[i].deleted);
if (!eventLoop.handles[i].events || eventLoop.handles[i].deleted)
continue;
fds[*nfds].fd = eventLoop.handles[i].fd;
fds[*nfds].events = eventLoop.handles[i].events;
fds[*nfds].revents = 0;
(*nfds)++;
//EVENT_DEBUG("Wait for %d %d", eventLoop.handles[i].fd, eventLoop.handles[i].events);
2007-06-26 19:11:00 +00:00
}
2009-05-12 16:43:04 +00:00
return fds;
2007-06-26 19:11:00 +00:00
}
/*
* Iterate over all timers and determine if any have expired.
* Invoke the user supplied callback for each timer whose
* expiry time is met, and schedule the next timeout. Does
* not try to 'catch up' on time if the actual expiry time
* was later than the requested time.
*
* This method must cope with new timers being registered
* by a callback, and must skip any timers marked as deleted.
*
* Returns 0 upon success, -1 if an error occurred
*/
static int virEventPollDispatchTimeouts(void) {
2007-06-26 19:11:00 +00:00
struct timeval tv;
unsigned long long now;
int i;
/* Save this now - it may be changed during dispatch */
int ntimeouts = eventLoop.timeoutsCount;
VIR_DEBUG("Dispatch %d", ntimeouts);
2007-06-26 19:11:00 +00:00
if (gettimeofday(&tv, NULL) < 0) {
virReportSystemError(errno, "%s",
_("Unable to get current time"));
2007-06-26 19:11:00 +00:00
return -1;
}
now = (((unsigned long long)tv.tv_sec)*1000) +
(((unsigned long long)tv.tv_usec)/1000);
for (i = 0 ; i < ntimeouts ; i++) {
if (eventLoop.timeouts[i].deleted || eventLoop.timeouts[i].frequency < 0)
2007-06-26 19:11:00 +00:00
continue;
/* Add 20ms fuzz so we don't pointlessly spin doing
* <10ms sleeps, particularly on kernels with low HZ
* it is fine that a timer expires 20ms earlier than
* requested
*/
if (eventLoop.timeouts[i].expiresAt <= (now+20)) {
virEventTimeoutCallback cb = eventLoop.timeouts[i].cb;
int timer = eventLoop.timeouts[i].timer;
void *opaque = eventLoop.timeouts[i].opaque;
2007-06-26 19:11:00 +00:00
eventLoop.timeouts[i].expiresAt =
now + eventLoop.timeouts[i].frequency;
virMutexUnlock(&eventLoop.lock);
(cb)(timer, opaque);
virMutexLock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
}
}
return 0;
}
/* Iterate over all file handles and dispatch any which
* have pending events listed in the poll() data. Invoke
* the user supplied callback for each handle which has
* pending events
*
* This method must cope with new handles being registered
* by a callback, and must skip any handles marked as deleted.
*
* Returns 0 upon success, -1 if an error occurred
*/
static int virEventPollDispatchHandles(int nfds, struct pollfd *fds) {
int i, n;
VIR_DEBUG("Dispatch %d", nfds);
2007-06-26 19:11:00 +00:00
2009-05-12 16:43:04 +00:00
/* NB, use nfds not eventLoop.handlesCount, because new
* fds might be added on end of list, and they're not
* in the fds array we've got */
for (i = 0, n = 0 ; n < nfds && i < eventLoop.handlesCount ; n++) {
while ((eventLoop.handles[i].fd != fds[n].fd ||
eventLoop.handles[i].events == 0) &&
i < eventLoop.handlesCount) {
i++;
}
if (i == eventLoop.handlesCount)
break;
VIR_DEBUG("i=%d w=%d", i, eventLoop.handles[i].watch);
2007-06-26 19:11:00 +00:00
if (eventLoop.handles[i].deleted) {
2009-05-12 16:43:04 +00:00
EVENT_DEBUG("Skip deleted n=%d w=%d f=%d", i,
eventLoop.handles[i].watch, eventLoop.handles[i].fd);
2007-06-26 19:11:00 +00:00
continue;
}
if (fds[n].revents) {
virEventHandleCallback cb = eventLoop.handles[i].cb;
int watch = eventLoop.handles[i].watch;
void *opaque = eventLoop.handles[i].opaque;
int hEvents = virEventPollFromNativeEvents(fds[n].revents);
2009-05-12 16:43:04 +00:00
EVENT_DEBUG("Dispatch n=%d f=%d w=%d e=%d %p", i,
fds[n].fd, watch, fds[n].revents, opaque);
virMutexUnlock(&eventLoop.lock);
(cb)(watch, fds[n].fd, hEvents, opaque);
virMutexLock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
}
}
return 0;
}
/* Used post dispatch to actually remove any timers that
* were previously marked as deleted. This asynchronous
* cleanup is needed to make dispatch re-entrant safe.
*/
static void virEventPollCleanupTimeouts(void) {
2007-06-26 19:11:00 +00:00
int i;
size_t gap;
VIR_DEBUG("Cleanup %zu", eventLoop.timeoutsCount);
2007-06-26 19:11:00 +00:00
/* Remove deleted entries, shuffling down remaining
2008-02-29 12:53:10 +00:00
* entries as needed to form contiguous series
2007-06-26 19:11:00 +00:00
*/
for (i = 0 ; i < eventLoop.timeoutsCount ; ) {
if (!eventLoop.timeouts[i].deleted) {
i++;
continue;
}
EVENT_DEBUG("Purging timeout %d with id %d", i,
eventLoop.timeouts[i].timer);
unlock eventLoop before calling callback function When I use newest libvirt to save a domain, libvirtd will be deadlock. Here is the output of gdb: (gdb) thread 3 [Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt at qemu/qemu_driver.c:2074 ret=0x7f972a1fbbe0) at remote.c:2273 (gdb) thread 7 [Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt (gdb) p *(virMutexPtr)0x6fdd60 $2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) p *(virMutexPtr)0x1a63ac0 $3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) info threads 7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 * 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0 The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object. In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles(). Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
if (eventLoop.timeouts[i].ff) {
virFreeCallback ff = eventLoop.timeouts[i].ff;
void *opaque = eventLoop.timeouts[i].opaque;
virMutexUnlock(&eventLoop.lock);
ff(opaque);
virMutexLock(&eventLoop.lock);
}
2007-06-26 19:11:00 +00:00
if ((i+1) < eventLoop.timeoutsCount) {
memmove(eventLoop.timeouts+i,
eventLoop.timeouts+i+1,
sizeof(struct virEventPollTimeout)*(eventLoop.timeoutsCount
-(i+1)));
2007-06-26 19:11:00 +00:00
}
eventLoop.timeoutsCount--;
}
/* Release some memory if we've got a big chunk free */
gap = eventLoop.timeoutsAlloc - eventLoop.timeoutsCount;
if (eventLoop.timeoutsCount == 0 ||
(gap > eventLoop.timeoutsCount && gap > EVENT_ALLOC_EXTENT)) {
EVENT_DEBUG("Found %zu out of %zu timeout slots used, releasing %zu",
eventLoop.timeoutsCount, eventLoop.timeoutsAlloc, gap);
VIR_SHRINK_N(eventLoop.timeouts, eventLoop.timeoutsAlloc, gap);
2007-06-26 19:11:00 +00:00
}
}
/* Used post dispatch to actually remove any handles that
* were previously marked as deleted. This asynchronous
* cleanup is needed to make dispatch re-entrant safe.
*/
static void virEventPollCleanupHandles(void) {
2007-06-26 19:11:00 +00:00
int i;
size_t gap;
VIR_DEBUG("Cleanup %zu", eventLoop.handlesCount);
2007-06-26 19:11:00 +00:00
/* Remove deleted entries, shuffling down remaining
2008-02-29 12:53:10 +00:00
* entries as needed to form contiguous series
2007-06-26 19:11:00 +00:00
*/
for (i = 0 ; i < eventLoop.handlesCount ; ) {
if (!eventLoop.handles[i].deleted) {
i++;
continue;
}
unlock eventLoop before calling callback function When I use newest libvirt to save a domain, libvirtd will be deadlock. Here is the output of gdb: (gdb) thread 3 [Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt at qemu/qemu_driver.c:2074 ret=0x7f972a1fbbe0) at remote.c:2273 (gdb) thread 7 [Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 (gdb) bt (gdb) p *(virMutexPtr)0x6fdd60 $2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) p *(virMutexPtr)0x1a63ac0 $3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}}, __size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}} (gdb) info threads 7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 * 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0 2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0 1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0 The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object. In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles(). Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
if (eventLoop.handles[i].ff) {
virFreeCallback ff = eventLoop.handles[i].ff;
void *opaque = eventLoop.handles[i].opaque;
virMutexUnlock(&eventLoop.lock);
ff(opaque);
virMutexLock(&eventLoop.lock);
}
2007-06-26 19:11:00 +00:00
if ((i+1) < eventLoop.handlesCount) {
memmove(eventLoop.handles+i,
eventLoop.handles+i+1,
sizeof(struct virEventPollHandle)*(eventLoop.handlesCount
-(i+1)));
2007-06-26 19:11:00 +00:00
}
eventLoop.handlesCount--;
}
/* Release some memory if we've got a big chunk free */
gap = eventLoop.handlesAlloc - eventLoop.handlesCount;
if (eventLoop.handlesCount == 0 ||
(gap > eventLoop.handlesCount && gap > EVENT_ALLOC_EXTENT)) {
EVENT_DEBUG("Found %zu out of %zu handles slots used, releasing %zu",
eventLoop.handlesCount, eventLoop.handlesAlloc, gap);
VIR_SHRINK_N(eventLoop.handles, eventLoop.handlesAlloc, gap);
2007-06-26 19:11:00 +00:00
}
}
/*
* Run a single iteration of the event loop, blocking until
* at least one file handle has an event, or a timer expires
*/
int virEventPollRunOnce(void) {
2009-05-12 16:43:04 +00:00
struct pollfd *fds = NULL;
2007-06-26 19:11:00 +00:00
int ret, timeout, nfds;
virMutexLock(&eventLoop.lock);
eventLoop.running = 1;
virThreadSelf(&eventLoop.leader);
2007-06-26 19:11:00 +00:00
virEventPollCleanupTimeouts();
virEventPollCleanupHandles();
2009-05-12 16:43:04 +00:00
if (!(fds = virEventPollMakePollFDs(&nfds)) ||
virEventPollCalculateTimeout(&timeout) < 0)
2009-05-12 16:43:04 +00:00
goto error;
2007-06-26 19:11:00 +00:00
virMutexUnlock(&eventLoop.lock);
2007-06-26 19:11:00 +00:00
retry:
EVENT_DEBUG("Poll on %d handles %p timeout %d", nfds, fds, timeout);
2007-06-26 19:11:00 +00:00
ret = poll(fds, nfds, timeout);
if (ret < 0) {
EVENT_DEBUG("Poll got error event %d", errno);
2007-06-26 19:11:00 +00:00
if (errno == EINTR) {
goto retry;
}
virReportSystemError(errno, "%s",
_("Unable to poll on file handles"));
2009-05-12 16:43:04 +00:00
goto error_unlocked;
2007-06-26 19:11:00 +00:00
}
EVENT_DEBUG("Poll got %d event(s)", ret);
virMutexLock(&eventLoop.lock);
if (virEventPollDispatchTimeouts() < 0)
2009-05-12 16:43:04 +00:00
goto error;
2007-06-26 19:11:00 +00:00
if (ret > 0 &&
virEventPollDispatchHandles(nfds, fds) < 0)
2009-05-12 16:43:04 +00:00
goto error;
virEventPollCleanupTimeouts();
virEventPollCleanupHandles();
eventLoop.running = 0;
virMutexUnlock(&eventLoop.lock);
2009-05-12 16:43:04 +00:00
VIR_FREE(fds);
return 0;
2009-05-12 16:43:04 +00:00
error:
virMutexUnlock(&eventLoop.lock);
2009-05-12 16:43:04 +00:00
error_unlocked:
VIR_FREE(fds);
return -1;
}
static void virEventPollHandleWakeup(int watch ATTRIBUTE_UNUSED,
int fd,
int events ATTRIBUTE_UNUSED,
void *opaque ATTRIBUTE_UNUSED)
{
char c;
virMutexLock(&eventLoop.lock);
ignore_value(saferead(fd, &c, sizeof(c)));
virMutexUnlock(&eventLoop.lock);
}
int virEventPollInit(void)
{
if (virMutexInit(&eventLoop.lock) < 0) {
virReportSystemError(errno, "%s",
_("Unable to initialize mutex"));
return -1;
}
if (pipe(eventLoop.wakeupfd) < 0 ||
virSetNonBlock(eventLoop.wakeupfd[0]) < 0 ||
virSetNonBlock(eventLoop.wakeupfd[1]) < 0 ||
virSetCloseExec(eventLoop.wakeupfd[0]) < 0 ||
virSetCloseExec(eventLoop.wakeupfd[1]) < 0) {
virReportSystemError(errno, "%s",
_("Unable to setup wakeup pipe"));
2007-06-26 19:11:00 +00:00
return -1;
}
2007-06-26 19:11:00 +00:00
if (virEventPollAddHandle(eventLoop.wakeupfd[0],
VIR_EVENT_HANDLE_READABLE,
virEventPollHandleWakeup, NULL, NULL) < 0) {
virEventError(VIR_ERR_INTERNAL_ERROR,
_("Unable to add handle %d to event loop"),
eventLoop.wakeupfd[0]);
2007-06-26 19:11:00 +00:00
return -1;
}
2007-06-26 19:11:00 +00:00
return 0;
}
static int virEventPollInterruptLocked(void)
{
char c = '\0';
if (!eventLoop.running ||
virThreadIsSelf(&eventLoop.leader)) {
VIR_DEBUG("Skip interrupt, %d %d", eventLoop.running,
virThreadID(&eventLoop.leader));
return 0;
}
VIR_DEBUG0("Interrupting");
if (safewrite(eventLoop.wakeupfd[1], &c, sizeof(c)) != sizeof(c))
return -1;
return 0;
}
int virEventPollInterrupt(void)
{
int ret;
virMutexLock(&eventLoop.lock);
ret = virEventPollInterruptLocked();
virMutexUnlock(&eventLoop.lock);
return ret;
}
int
virEventPollToNativeEvents(int events)
{
int ret = 0;
if(events & VIR_EVENT_HANDLE_READABLE)
ret |= POLLIN;
if(events & VIR_EVENT_HANDLE_WRITABLE)
ret |= POLLOUT;
if(events & VIR_EVENT_HANDLE_ERROR)
ret |= POLLERR;
if(events & VIR_EVENT_HANDLE_HANGUP)
ret |= POLLHUP;
return ret;
}
int
virEventPollFromNativeEvents(int events)
{
int ret = 0;
if(events & POLLIN)
ret |= VIR_EVENT_HANDLE_READABLE;
if(events & POLLOUT)
ret |= VIR_EVENT_HANDLE_WRITABLE;
if(events & POLLERR)
ret |= VIR_EVENT_HANDLE_ERROR;
if(events & POLLNVAL) /* Treat NVAL as error, since libvirt doesn't distinguish */
ret |= VIR_EVENT_HANDLE_ERROR;
if(events & POLLHUP)
ret |= VIR_EVENT_HANDLE_HANGUP;
return ret;
}