2007-06-26 19:11:00 +00:00
|
|
|
/*
|
2008-05-15 06:12:32 +00:00
|
|
|
* event.c: event loop for monitoring file handles
|
2007-06-26 19:11:00 +00:00
|
|
|
*
|
2011-01-21 19:57:03 +00:00
|
|
|
* Copyright (C) 2007, 2010-2011 Red Hat, Inc.
|
2007-06-26 19:11:00 +00:00
|
|
|
* Copyright (C) 2007 Daniel P. Berrange
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
* Author: Daniel P. Berrange <berrange@redhat.com>
|
|
|
|
*/
|
|
|
|
|
2008-01-29 18:15:54 +00:00
|
|
|
#include <config.h>
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <poll.h>
|
|
|
|
#include <sys/time.h>
|
|
|
|
#include <errno.h>
|
2008-12-04 22:14:15 +00:00
|
|
|
#include <unistd.h>
|
2011-04-06 14:10:28 +00:00
|
|
|
#include <fcntl.h>
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2009-05-12 15:43:07 +00:00
|
|
|
#include "threads.h"
|
|
|
|
#include "logging.h"
|
2011-02-24 17:58:04 +00:00
|
|
|
#include "event_poll.h"
|
2008-06-06 10:52:01 +00:00
|
|
|
#include "memory.h"
|
2008-12-04 22:14:15 +00:00
|
|
|
#include "util.h"
|
2010-03-02 16:35:01 +00:00
|
|
|
#include "ignore-value.h"
|
2011-03-02 16:59:54 +00:00
|
|
|
#include "virterror_internal.h"
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2011-02-16 23:37:57 +00:00
|
|
|
#define EVENT_DEBUG(fmt, ...) VIR_DEBUG(fmt, __VA_ARGS__)
|
2007-09-19 01:27:32 +00:00
|
|
|
|
2011-03-02 16:59:54 +00:00
|
|
|
#define VIR_FROM_THIS VIR_FROM_EVENT
|
|
|
|
|
|
|
|
#define virEventError(code, ...) \
|
2011-04-16 08:30:22 +00:00
|
|
|
virReportErrorHelper(VIR_FROM_EVENT, code, __FILE__, \
|
2011-03-02 16:59:54 +00:00
|
|
|
__FUNCTION__, __LINE__, __VA_ARGS__)
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
static int virEventPollInterruptLocked(void);
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
/* State for a single file handle being monitored */
|
2011-02-24 17:58:04 +00:00
|
|
|
struct virEventPollHandle {
|
2008-11-19 16:19:36 +00:00
|
|
|
int watch;
|
2007-06-26 19:11:00 +00:00
|
|
|
int fd;
|
|
|
|
int events;
|
|
|
|
virEventHandleCallback cb;
|
2008-11-19 16:24:01 +00:00
|
|
|
virFreeCallback ff;
|
2007-06-26 19:11:00 +00:00
|
|
|
void *opaque;
|
|
|
|
int deleted;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* State for a single timer being generated */
|
2011-02-24 17:58:04 +00:00
|
|
|
struct virEventPollTimeout {
|
2007-06-26 19:11:00 +00:00
|
|
|
int timer;
|
2007-09-19 01:27:32 +00:00
|
|
|
int frequency;
|
2007-06-26 19:11:00 +00:00
|
|
|
unsigned long long expiresAt;
|
|
|
|
virEventTimeoutCallback cb;
|
2008-11-19 16:24:01 +00:00
|
|
|
virFreeCallback ff;
|
2007-06-26 19:11:00 +00:00
|
|
|
void *opaque;
|
|
|
|
int deleted;
|
|
|
|
};
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
/* Allocate extra slots for virEventPollHandle/virEventPollTimeout
|
2007-06-26 19:11:00 +00:00
|
|
|
records in this multiple */
|
|
|
|
#define EVENT_ALLOC_EXTENT 10
|
|
|
|
|
|
|
|
/* State for the main event loop */
|
2011-02-24 17:58:04 +00:00
|
|
|
struct virEventPollLoop {
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutex lock;
|
2009-02-06 14:43:52 +00:00
|
|
|
int running;
|
2010-11-02 15:56:44 +00:00
|
|
|
virThread leader;
|
2008-12-04 22:14:15 +00:00
|
|
|
int wakeupfd[2];
|
2010-08-13 22:19:54 +00:00
|
|
|
size_t handlesCount;
|
|
|
|
size_t handlesAlloc;
|
2011-02-24 17:58:04 +00:00
|
|
|
struct virEventPollHandle *handles;
|
2010-08-13 22:19:54 +00:00
|
|
|
size_t timeoutsCount;
|
|
|
|
size_t timeoutsAlloc;
|
2011-02-24 17:58:04 +00:00
|
|
|
struct virEventPollTimeout *timeouts;
|
2007-06-26 19:11:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Only have one event loop */
|
2011-02-24 17:58:04 +00:00
|
|
|
static struct virEventPollLoop eventLoop;
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2008-11-19 16:19:36 +00:00
|
|
|
/* Unique ID for the next FD watch to be registered */
|
2009-05-12 16:41:49 +00:00
|
|
|
static int nextWatch = 1;
|
2008-11-19 16:19:36 +00:00
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
/* Unique ID for the next timer to be registered */
|
2009-05-12 16:41:49 +00:00
|
|
|
static int nextTimer = 1;
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Register a callback for monitoring file handle events.
|
|
|
|
* NB, it *must* be safe to call this from within a callback
|
|
|
|
* For this reason we only ever append to existing list.
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollAddHandle(int fd, int events,
|
2008-11-19 16:24:01 +00:00
|
|
|
virEventHandleCallback cb,
|
|
|
|
void *opaque,
|
|
|
|
virFreeCallback ff) {
|
2008-12-04 22:14:15 +00:00
|
|
|
int watch;
|
2009-08-24 16:27:55 +00:00
|
|
|
EVENT_DEBUG("Add handle fd=%d events=%d cb=%p opaque=%p", fd, events, cb, opaque);
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
if (eventLoop.handlesCount == eventLoop.handlesAlloc) {
|
2010-08-13 22:19:54 +00:00
|
|
|
EVENT_DEBUG("Used %zu handle slots, adding at least %d more",
|
2007-09-19 01:27:32 +00:00
|
|
|
eventLoop.handlesAlloc, EVENT_ALLOC_EXTENT);
|
2010-08-13 22:19:54 +00:00
|
|
|
if (VIR_RESIZE_N(eventLoop.handles, eventLoop.handlesAlloc,
|
|
|
|
eventLoop.handlesCount, EVENT_ALLOC_EXTENT) < 0) {
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
2008-12-04 22:14:15 +00:00
|
|
|
}
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
|
2008-12-04 22:14:15 +00:00
|
|
|
watch = nextWatch++;
|
|
|
|
|
|
|
|
eventLoop.handles[eventLoop.handlesCount].watch = watch;
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.handles[eventLoop.handlesCount].fd = fd;
|
2008-10-23 13:18:18 +00:00
|
|
|
eventLoop.handles[eventLoop.handlesCount].events =
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollToNativeEvents(events);
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.handles[eventLoop.handlesCount].cb = cb;
|
2008-11-19 16:24:01 +00:00
|
|
|
eventLoop.handles[eventLoop.handlesCount].ff = ff;
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.handles[eventLoop.handlesCount].opaque = opaque;
|
|
|
|
eventLoop.handles[eventLoop.handlesCount].deleted = 0;
|
|
|
|
|
|
|
|
eventLoop.handlesCount++;
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollInterruptLocked();
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2008-12-04 22:14:15 +00:00
|
|
|
|
|
|
|
return watch;
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
void virEventPollUpdateHandle(int watch, int events) {
|
2007-09-19 01:27:32 +00:00
|
|
|
int i;
|
2009-05-12 16:41:49 +00:00
|
|
|
EVENT_DEBUG("Update handle w=%d e=%d", watch, events);
|
|
|
|
|
|
|
|
if (watch <= 0) {
|
|
|
|
VIR_WARN("Ignoring invalid update watch %d", watch);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-09-19 01:27:32 +00:00
|
|
|
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
|
2008-11-19 16:19:36 +00:00
|
|
|
if (eventLoop.handles[i].watch == watch) {
|
2008-10-23 13:18:18 +00:00
|
|
|
eventLoop.handles[i].events =
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollToNativeEvents(events);
|
|
|
|
virEventPollInterruptLocked();
|
2007-09-19 01:27:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-09-19 01:27:32 +00:00
|
|
|
}
|
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
/*
|
|
|
|
* Unregister a callback from a file handle
|
|
|
|
* NB, it *must* be safe to call this from within a callback
|
|
|
|
* For this reason we only ever set a flag in the existing list.
|
|
|
|
* Actual deletion will be done out-of-band
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollRemoveHandle(int watch) {
|
2007-06-26 19:11:00 +00:00
|
|
|
int i;
|
2009-08-24 16:27:55 +00:00
|
|
|
EVENT_DEBUG("Remove handle w=%d", watch);
|
2009-05-12 16:41:49 +00:00
|
|
|
|
|
|
|
if (watch <= 0) {
|
|
|
|
VIR_WARN("Ignoring invalid remove watch %d", watch);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
|
|
|
|
if (eventLoop.handles[i].deleted)
|
|
|
|
continue;
|
|
|
|
|
2008-11-19 16:19:36 +00:00
|
|
|
if (eventLoop.handles[i].watch == watch) {
|
|
|
|
EVENT_DEBUG("mark delete %d %d", i, eventLoop.handles[i].fd);
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.handles[i].deleted = 1;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollInterruptLocked();
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register a callback for a timer event
|
|
|
|
* NB, it *must* be safe to call this from within a callback
|
|
|
|
* For this reason we only ever append to existing list.
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollAddTimeout(int frequency,
|
2008-11-19 16:24:01 +00:00
|
|
|
virEventTimeoutCallback cb,
|
|
|
|
void *opaque,
|
|
|
|
virFreeCallback ff) {
|
2007-09-19 01:27:32 +00:00
|
|
|
struct timeval now;
|
2008-12-04 22:14:15 +00:00
|
|
|
int ret;
|
2007-09-19 01:27:32 +00:00
|
|
|
EVENT_DEBUG("Adding timer %d with %d ms freq", nextTimer, frequency);
|
|
|
|
if (gettimeofday(&now, NULL) < 0) {
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
if (eventLoop.timeoutsCount == eventLoop.timeoutsAlloc) {
|
2010-08-13 22:19:54 +00:00
|
|
|
EVENT_DEBUG("Used %zu timeout slots, adding at least %d more",
|
2007-09-19 01:27:32 +00:00
|
|
|
eventLoop.timeoutsAlloc, EVENT_ALLOC_EXTENT);
|
2010-08-13 22:19:54 +00:00
|
|
|
if (VIR_RESIZE_N(eventLoop.timeouts, eventLoop.timeoutsAlloc,
|
|
|
|
eventLoop.timeoutsCount, EVENT_ALLOC_EXTENT) < 0) {
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
2008-12-04 22:14:15 +00:00
|
|
|
}
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].timer = nextTimer++;
|
2007-09-19 01:27:32 +00:00
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].frequency = frequency;
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].cb = cb;
|
2008-11-19 16:24:01 +00:00
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].ff = ff;
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].opaque = opaque;
|
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].deleted = 0;
|
|
|
|
eventLoop.timeouts[eventLoop.timeoutsCount].expiresAt =
|
2007-09-19 01:27:32 +00:00
|
|
|
frequency >= 0 ? frequency +
|
|
|
|
(((unsigned long long)now.tv_sec)*1000) +
|
|
|
|
(((unsigned long long)now.tv_usec)/1000) : 0;
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
eventLoop.timeoutsCount++;
|
2008-12-04 22:14:15 +00:00
|
|
|
ret = nextTimer-1;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollInterruptLocked();
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2008-12-04 22:14:15 +00:00
|
|
|
return ret;
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
void virEventPollUpdateTimeout(int timer, int frequency) {
|
2007-09-19 01:27:32 +00:00
|
|
|
struct timeval tv;
|
|
|
|
int i;
|
|
|
|
EVENT_DEBUG("Updating timer %d timeout with %d ms freq", timer, frequency);
|
2009-05-12 16:41:49 +00:00
|
|
|
|
|
|
|
if (timer <= 0) {
|
|
|
|
VIR_WARN("Ignoring invalid update timer %d", timer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-09-19 01:27:32 +00:00
|
|
|
if (gettimeofday(&tv, NULL) < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-09-19 01:27:32 +00:00
|
|
|
for (i = 0 ; i < eventLoop.timeoutsCount ; i++) {
|
|
|
|
if (eventLoop.timeouts[i].timer == timer) {
|
|
|
|
eventLoop.timeouts[i].frequency = frequency;
|
|
|
|
eventLoop.timeouts[i].expiresAt =
|
|
|
|
frequency >= 0 ? frequency +
|
|
|
|
(((unsigned long long)tv.tv_sec)*1000) +
|
|
|
|
(((unsigned long long)tv.tv_usec)/1000) : 0;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollInterruptLocked();
|
2007-09-19 01:27:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-09-19 01:27:32 +00:00
|
|
|
}
|
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
/*
|
|
|
|
* Unregister a callback for a timer
|
|
|
|
* NB, it *must* be safe to call this from within a callback
|
|
|
|
* For this reason we only ever set a flag in the existing list.
|
|
|
|
* Actual deletion will be done out-of-band
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollRemoveTimeout(int timer) {
|
2007-06-26 19:11:00 +00:00
|
|
|
int i;
|
2007-09-19 01:27:32 +00:00
|
|
|
EVENT_DEBUG("Remove timer %d", timer);
|
2009-05-12 16:41:49 +00:00
|
|
|
|
|
|
|
if (timer <= 0) {
|
|
|
|
VIR_WARN("Ignoring invalid remove timer %d", timer);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
for (i = 0 ; i < eventLoop.timeoutsCount ; i++) {
|
|
|
|
if (eventLoop.timeouts[i].deleted)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (eventLoop.timeouts[i].timer == timer) {
|
|
|
|
eventLoop.timeouts[i].deleted = 1;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollInterruptLocked();
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterates over all registered timeouts and determine which
|
|
|
|
* will be the first to expire.
|
|
|
|
* @timeout: filled with expiry time of soonest timer, or -1 if
|
|
|
|
* no timeout is pending
|
|
|
|
* returns: 0 on success, -1 on error
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
static int virEventPollCalculateTimeout(int *timeout) {
|
2007-06-26 19:11:00 +00:00
|
|
|
unsigned long long then = 0;
|
|
|
|
int i;
|
2010-08-13 22:19:54 +00:00
|
|
|
EVENT_DEBUG("Calculate expiry of %zu timers", eventLoop.timeoutsCount);
|
2007-06-26 19:11:00 +00:00
|
|
|
/* Figure out if we need a timeout */
|
|
|
|
for (i = 0 ; i < eventLoop.timeoutsCount ; i++) {
|
2009-05-12 16:43:04 +00:00
|
|
|
if (eventLoop.timeouts[i].frequency < 0)
|
2007-06-26 19:11:00 +00:00
|
|
|
continue;
|
|
|
|
|
2007-09-19 01:27:32 +00:00
|
|
|
EVENT_DEBUG("Got a timeout scheduled for %llu", eventLoop.timeouts[i].expiresAt);
|
2007-06-26 19:11:00 +00:00
|
|
|
if (then == 0 ||
|
|
|
|
eventLoop.timeouts[i].expiresAt < then)
|
|
|
|
then = eventLoop.timeouts[i].expiresAt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate how long we should wait for a timeout if needed */
|
|
|
|
if (then > 0) {
|
|
|
|
struct timeval tv;
|
|
|
|
|
|
|
|
if (gettimeofday(&tv, NULL) < 0) {
|
2011-03-02 16:59:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to get current time"));
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*timeout = then -
|
|
|
|
((((unsigned long long)tv.tv_sec)*1000) +
|
|
|
|
(((unsigned long long)tv.tv_usec)/1000));
|
|
|
|
|
|
|
|
if (*timeout < 0)
|
2007-09-19 01:27:32 +00:00
|
|
|
*timeout = 0;
|
2007-06-26 19:11:00 +00:00
|
|
|
} else {
|
|
|
|
*timeout = -1;
|
|
|
|
}
|
|
|
|
|
2007-09-19 01:27:32 +00:00
|
|
|
EVENT_DEBUG("Timeout at %llu due in %d ms", then, *timeout);
|
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a pollfd array containing data for all registered
|
|
|
|
* file handles. The caller must free the returned data struct
|
|
|
|
* returns: the pollfd array, or NULL on error
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
static struct pollfd *virEventPollMakePollFDs(int *nfds) {
|
2007-06-26 19:11:00 +00:00
|
|
|
struct pollfd *fds;
|
2009-05-12 16:43:04 +00:00
|
|
|
int i;
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2009-08-24 16:27:55 +00:00
|
|
|
*nfds = 0;
|
|
|
|
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
|
unlock eventLoop before calling callback function
When I use newest libvirt to save a domain, libvirtd will be deadlock.
Here is the output of gdb:
(gdb) thread 3
[Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
at qemu/qemu_driver.c:2074
ret=0x7f972a1fbbe0) at remote.c:2273
(gdb) thread 7
[Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
(gdb) p *(virMutexPtr)0x6fdd60
$2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) p *(virMutexPtr)0x1a63ac0
$3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) info threads
7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
* 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0
The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object.
In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should
do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles().
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
|
|
|
if (eventLoop.handles[i].events && !eventLoop.handles[i].deleted)
|
2009-08-24 16:27:55 +00:00
|
|
|
(*nfds)++;
|
|
|
|
}
|
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
/* Setup the poll file handle data structs */
|
2011-03-02 16:59:54 +00:00
|
|
|
if (VIR_ALLOC_N(fds, *nfds) < 0) {
|
|
|
|
virReportOOMError();
|
2009-05-12 16:43:04 +00:00
|
|
|
return NULL;
|
2011-03-02 16:59:54 +00:00
|
|
|
}
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2009-08-24 16:27:55 +00:00
|
|
|
*nfds = 0;
|
2009-05-12 16:43:04 +00:00
|
|
|
for (i = 0 ; i < eventLoop.handlesCount ; i++) {
|
unlock eventLoop before calling callback function
When I use newest libvirt to save a domain, libvirtd will be deadlock.
Here is the output of gdb:
(gdb) thread 3
[Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
at qemu/qemu_driver.c:2074
ret=0x7f972a1fbbe0) at remote.c:2273
(gdb) thread 7
[Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
(gdb) p *(virMutexPtr)0x6fdd60
$2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) p *(virMutexPtr)0x1a63ac0
$3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) info threads
7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
* 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0
The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object.
In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should
do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles().
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
|
|
|
EVENT_DEBUG("Prepare n=%d w=%d, f=%d e=%d d=%d", i,
|
2009-05-12 16:43:04 +00:00
|
|
|
eventLoop.handles[i].watch,
|
|
|
|
eventLoop.handles[i].fd,
|
unlock eventLoop before calling callback function
When I use newest libvirt to save a domain, libvirtd will be deadlock.
Here is the output of gdb:
(gdb) thread 3
[Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
at qemu/qemu_driver.c:2074
ret=0x7f972a1fbbe0) at remote.c:2273
(gdb) thread 7
[Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
(gdb) p *(virMutexPtr)0x6fdd60
$2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) p *(virMutexPtr)0x1a63ac0
$3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) info threads
7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
* 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0
The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object.
In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should
do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles().
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
|
|
|
eventLoop.handles[i].events,
|
|
|
|
eventLoop.handles[i].deleted);
|
|
|
|
if (!eventLoop.handles[i].events || eventLoop.handles[i].deleted)
|
2009-08-24 16:27:55 +00:00
|
|
|
continue;
|
|
|
|
fds[*nfds].fd = eventLoop.handles[i].fd;
|
|
|
|
fds[*nfds].events = eventLoop.handles[i].events;
|
|
|
|
fds[*nfds].revents = 0;
|
|
|
|
(*nfds)++;
|
2007-09-19 01:27:32 +00:00
|
|
|
//EVENT_DEBUG("Wait for %d %d", eventLoop.handles[i].fd, eventLoop.handles[i].events);
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
|
2009-05-12 16:43:04 +00:00
|
|
|
return fds;
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Iterate over all timers and determine if any have expired.
|
|
|
|
* Invoke the user supplied callback for each timer whose
|
|
|
|
* expiry time is met, and schedule the next timeout. Does
|
|
|
|
* not try to 'catch up' on time if the actual expiry time
|
|
|
|
* was later than the requested time.
|
|
|
|
*
|
|
|
|
* This method must cope with new timers being registered
|
|
|
|
* by a callback, and must skip any timers marked as deleted.
|
|
|
|
*
|
|
|
|
* Returns 0 upon success, -1 if an error occurred
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
static int virEventPollDispatchTimeouts(void) {
|
2007-06-26 19:11:00 +00:00
|
|
|
struct timeval tv;
|
|
|
|
unsigned long long now;
|
|
|
|
int i;
|
|
|
|
/* Save this now - it may be changed during dispatch */
|
|
|
|
int ntimeouts = eventLoop.timeoutsCount;
|
2011-02-16 23:37:57 +00:00
|
|
|
VIR_DEBUG("Dispatch %d", ntimeouts);
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
if (gettimeofday(&tv, NULL) < 0) {
|
2011-03-02 16:59:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to get current time"));
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
now = (((unsigned long long)tv.tv_sec)*1000) +
|
|
|
|
(((unsigned long long)tv.tv_usec)/1000);
|
|
|
|
|
|
|
|
for (i = 0 ; i < ntimeouts ; i++) {
|
2007-09-19 01:27:32 +00:00
|
|
|
if (eventLoop.timeouts[i].deleted || eventLoop.timeouts[i].frequency < 0)
|
2007-06-26 19:11:00 +00:00
|
|
|
continue;
|
|
|
|
|
2009-12-02 11:53:42 +00:00
|
|
|
/* Add 20ms fuzz so we don't pointlessly spin doing
|
|
|
|
* <10ms sleeps, particularly on kernels with low HZ
|
|
|
|
* it is fine that a timer expires 20ms earlier than
|
|
|
|
* requested
|
|
|
|
*/
|
|
|
|
if (eventLoop.timeouts[i].expiresAt <= (now+20)) {
|
2008-12-04 22:14:15 +00:00
|
|
|
virEventTimeoutCallback cb = eventLoop.timeouts[i].cb;
|
|
|
|
int timer = eventLoop.timeouts[i].timer;
|
|
|
|
void *opaque = eventLoop.timeouts[i].opaque;
|
2007-06-26 19:11:00 +00:00
|
|
|
eventLoop.timeouts[i].expiresAt =
|
2007-09-19 01:27:32 +00:00
|
|
|
now + eventLoop.timeouts[i].frequency;
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2008-12-04 22:14:15 +00:00
|
|
|
(cb)(timer, opaque);
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Iterate over all file handles and dispatch any which
|
|
|
|
* have pending events listed in the poll() data. Invoke
|
|
|
|
* the user supplied callback for each handle which has
|
|
|
|
* pending events
|
|
|
|
*
|
|
|
|
* This method must cope with new handles being registered
|
|
|
|
* by a callback, and must skip any handles marked as deleted.
|
|
|
|
*
|
|
|
|
* Returns 0 upon success, -1 if an error occurred
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
static int virEventPollDispatchHandles(int nfds, struct pollfd *fds) {
|
2009-08-24 16:27:55 +00:00
|
|
|
int i, n;
|
2011-02-16 23:37:57 +00:00
|
|
|
VIR_DEBUG("Dispatch %d", nfds);
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2009-05-12 16:43:04 +00:00
|
|
|
/* NB, use nfds not eventLoop.handlesCount, because new
|
|
|
|
* fds might be added on end of list, and they're not
|
|
|
|
* in the fds array we've got */
|
2009-08-24 16:27:55 +00:00
|
|
|
for (i = 0, n = 0 ; n < nfds && i < eventLoop.handlesCount ; n++) {
|
|
|
|
while ((eventLoop.handles[i].fd != fds[n].fd ||
|
|
|
|
eventLoop.handles[i].events == 0) &&
|
|
|
|
i < eventLoop.handlesCount) {
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
if (i == eventLoop.handlesCount)
|
|
|
|
break;
|
|
|
|
|
2011-02-16 23:37:57 +00:00
|
|
|
VIR_DEBUG("i=%d w=%d", i, eventLoop.handles[i].watch);
|
2007-06-26 19:11:00 +00:00
|
|
|
if (eventLoop.handles[i].deleted) {
|
2009-05-12 16:43:04 +00:00
|
|
|
EVENT_DEBUG("Skip deleted n=%d w=%d f=%d", i,
|
|
|
|
eventLoop.handles[i].watch, eventLoop.handles[i].fd);
|
2007-06-26 19:11:00 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-08-24 16:27:55 +00:00
|
|
|
if (fds[n].revents) {
|
2008-12-04 22:14:15 +00:00
|
|
|
virEventHandleCallback cb = eventLoop.handles[i].cb;
|
2011-01-21 19:57:03 +00:00
|
|
|
int watch = eventLoop.handles[i].watch;
|
2008-12-04 22:14:15 +00:00
|
|
|
void *opaque = eventLoop.handles[i].opaque;
|
2011-02-24 17:58:04 +00:00
|
|
|
int hEvents = virEventPollFromNativeEvents(fds[n].revents);
|
2009-05-12 16:43:04 +00:00
|
|
|
EVENT_DEBUG("Dispatch n=%d f=%d w=%d e=%d %p", i,
|
2011-01-21 19:57:03 +00:00
|
|
|
fds[n].fd, watch, fds[n].revents, opaque);
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2011-01-21 19:57:03 +00:00
|
|
|
(cb)(watch, fds[n].fd, hEvents, opaque);
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Used post dispatch to actually remove any timers that
|
|
|
|
* were previously marked as deleted. This asynchronous
|
|
|
|
* cleanup is needed to make dispatch re-entrant safe.
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
static void virEventPollCleanupTimeouts(void) {
|
2007-06-26 19:11:00 +00:00
|
|
|
int i;
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
size_t gap;
|
2011-02-16 23:37:57 +00:00
|
|
|
VIR_DEBUG("Cleanup %zu", eventLoop.timeoutsCount);
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
/* Remove deleted entries, shuffling down remaining
|
2008-02-29 12:53:10 +00:00
|
|
|
* entries as needed to form contiguous series
|
2007-06-26 19:11:00 +00:00
|
|
|
*/
|
|
|
|
for (i = 0 ; i < eventLoop.timeoutsCount ; ) {
|
|
|
|
if (!eventLoop.timeouts[i].deleted) {
|
|
|
|
i++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
EVENT_DEBUG("Purging timeout %d with id %d", i,
|
|
|
|
eventLoop.timeouts[i].timer);
|
unlock eventLoop before calling callback function
When I use newest libvirt to save a domain, libvirtd will be deadlock.
Here is the output of gdb:
(gdb) thread 3
[Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
at qemu/qemu_driver.c:2074
ret=0x7f972a1fbbe0) at remote.c:2273
(gdb) thread 7
[Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
(gdb) p *(virMutexPtr)0x6fdd60
$2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) p *(virMutexPtr)0x1a63ac0
$3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) info threads
7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
* 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0
The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object.
In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should
do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles().
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
|
|
|
if (eventLoop.timeouts[i].ff) {
|
|
|
|
virFreeCallback ff = eventLoop.timeouts[i].ff;
|
|
|
|
void *opaque = eventLoop.timeouts[i].opaque;
|
|
|
|
virMutexUnlock(&eventLoop.lock);
|
|
|
|
ff(opaque);
|
|
|
|
virMutexLock(&eventLoop.lock);
|
|
|
|
}
|
2008-11-19 16:24:01 +00:00
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
if ((i+1) < eventLoop.timeoutsCount) {
|
|
|
|
memmove(eventLoop.timeouts+i,
|
|
|
|
eventLoop.timeouts+i+1,
|
2011-02-24 17:58:04 +00:00
|
|
|
sizeof(struct virEventPollTimeout)*(eventLoop.timeoutsCount
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
-(i+1)));
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
eventLoop.timeoutsCount--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release some memory if we've got a big chunk free */
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
gap = eventLoop.timeoutsAlloc - eventLoop.timeoutsCount;
|
|
|
|
if (eventLoop.timeoutsCount == 0 ||
|
|
|
|
(gap > eventLoop.timeoutsCount && gap > EVENT_ALLOC_EXTENT)) {
|
|
|
|
EVENT_DEBUG("Found %zu out of %zu timeout slots used, releasing %zu",
|
|
|
|
eventLoop.timeoutsCount, eventLoop.timeoutsAlloc, gap);
|
|
|
|
VIR_SHRINK_N(eventLoop.timeouts, eventLoop.timeoutsAlloc, gap);
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Used post dispatch to actually remove any handles that
|
|
|
|
* were previously marked as deleted. This asynchronous
|
|
|
|
* cleanup is needed to make dispatch re-entrant safe.
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
static void virEventPollCleanupHandles(void) {
|
2007-06-26 19:11:00 +00:00
|
|
|
int i;
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
size_t gap;
|
2011-02-16 23:37:57 +00:00
|
|
|
VIR_DEBUG("Cleanup %zu", eventLoop.handlesCount);
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
/* Remove deleted entries, shuffling down remaining
|
2008-02-29 12:53:10 +00:00
|
|
|
* entries as needed to form contiguous series
|
2007-06-26 19:11:00 +00:00
|
|
|
*/
|
|
|
|
for (i = 0 ; i < eventLoop.handlesCount ; ) {
|
|
|
|
if (!eventLoop.handles[i].deleted) {
|
|
|
|
i++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
unlock eventLoop before calling callback function
When I use newest libvirt to save a domain, libvirtd will be deadlock.
Here is the output of gdb:
(gdb) thread 3
[Switching to thread 3 (Thread 0x7f972a1fc710 (LWP 30265))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
at qemu/qemu_driver.c:2074
ret=0x7f972a1fbbe0) at remote.c:2273
(gdb) thread 7
[Switching to thread 7 (Thread 0x7f9730bcd710 (LWP 30261))]#0 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
(gdb) bt
(gdb) p *(virMutexPtr)0x6fdd60
$2 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30261, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\065v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) p *(virMutexPtr)0x1a63ac0
$3 = {lock = {__data = {__lock = 2, __count = 0, __owner = 30265, __nusers = 1, __kind = 0, __spins = 0, __list = {__prev = 0x0, __next = 0x0}},
__size = "\002\000\000\000\000\000\000\000\071v\000\000\001", '\000' <repeats 26 times>, __align = 2}}
(gdb) info threads
7 Thread 0x7f9730bcd710 (LWP 30261) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
6 Thread 0x7f972bfff710 (LWP 30262) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
5 Thread 0x7f972b5fe710 (LWP 30263) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
4 Thread 0x7f972abfd710 (LWP 30264) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
* 3 Thread 0x7f972a1fc710 (LWP 30265) 0x000000351fe0e034 in __lll_lock_wait () from /lib64/libpthread.so.0
2 Thread 0x7f97297fb710 (LWP 30266) 0x000000351fe0b43c in pthread_cond_wait@@GLIBC_2.3.2 () from /lib64/libpthread.so.0
1 Thread 0x7f9737aac800 (LWP 30260) 0x000000351fe0803d in pthread_join () from /lib64/libpthread.so.0
The reason is that we will try to lock some object in callback function, and we may call event API with locking the same object.
In the function virEventDispatchHandles(), we unlock eventLoop before calling callback function. I think we should
do the same thing in the function virEventCleanupTimeouts() and virEventCleanupHandles().
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2011-03-07 06:06:49 +00:00
|
|
|
if (eventLoop.handles[i].ff) {
|
|
|
|
virFreeCallback ff = eventLoop.handles[i].ff;
|
|
|
|
void *opaque = eventLoop.handles[i].opaque;
|
|
|
|
virMutexUnlock(&eventLoop.lock);
|
|
|
|
ff(opaque);
|
|
|
|
virMutexLock(&eventLoop.lock);
|
|
|
|
}
|
2008-11-19 16:24:01 +00:00
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
if ((i+1) < eventLoop.handlesCount) {
|
|
|
|
memmove(eventLoop.handles+i,
|
|
|
|
eventLoop.handles+i+1,
|
2011-02-24 17:58:04 +00:00
|
|
|
sizeof(struct virEventPollHandle)*(eventLoop.handlesCount
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
-(i+1)));
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
eventLoop.handlesCount--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release some memory if we've got a big chunk free */
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
gap = eventLoop.handlesAlloc - eventLoop.handlesCount;
|
|
|
|
if (eventLoop.handlesCount == 0 ||
|
|
|
|
(gap > eventLoop.handlesCount && gap > EVENT_ALLOC_EXTENT)) {
|
|
|
|
EVENT_DEBUG("Found %zu out of %zu handles slots used, releasing %zu",
|
|
|
|
eventLoop.handlesCount, eventLoop.handlesAlloc, gap);
|
|
|
|
VIR_SHRINK_N(eventLoop.handles, eventLoop.handlesAlloc, gap);
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Run a single iteration of the event loop, blocking until
|
|
|
|
* at least one file handle has an event, or a timer expires
|
|
|
|
*/
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollRunOnce(void) {
|
2009-05-12 16:43:04 +00:00
|
|
|
struct pollfd *fds = NULL;
|
2007-06-26 19:11:00 +00:00
|
|
|
int ret, timeout, nfds;
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2009-02-06 14:43:52 +00:00
|
|
|
eventLoop.running = 1;
|
2010-11-02 15:56:44 +00:00
|
|
|
virThreadSelf(&eventLoop.leader);
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollCleanupTimeouts();
|
|
|
|
virEventPollCleanupHandles();
|
2009-05-12 16:43:04 +00:00
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
if (!(fds = virEventPollMakePollFDs(&nfds)) ||
|
|
|
|
virEventPollCalculateTimeout(&timeout) < 0)
|
2009-05-12 16:43:04 +00:00
|
|
|
goto error;
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2007-06-26 19:11:00 +00:00
|
|
|
retry:
|
2007-09-19 01:27:32 +00:00
|
|
|
EVENT_DEBUG("Poll on %d handles %p timeout %d", nfds, fds, timeout);
|
2007-06-26 19:11:00 +00:00
|
|
|
ret = poll(fds, nfds, timeout);
|
|
|
|
if (ret < 0) {
|
2010-12-01 16:41:14 +00:00
|
|
|
EVENT_DEBUG("Poll got error event %d", errno);
|
2007-06-26 19:11:00 +00:00
|
|
|
if (errno == EINTR) {
|
|
|
|
goto retry;
|
|
|
|
}
|
2011-03-02 16:59:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to poll on file handles"));
|
2009-05-12 16:43:04 +00:00
|
|
|
goto error_unlocked;
|
2007-06-26 19:11:00 +00:00
|
|
|
}
|
2010-12-01 16:41:14 +00:00
|
|
|
EVENT_DEBUG("Poll got %d event(s)", ret);
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2011-02-24 17:58:04 +00:00
|
|
|
if (virEventPollDispatchTimeouts() < 0)
|
2009-05-12 16:43:04 +00:00
|
|
|
goto error;
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
if (ret > 0 &&
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollDispatchHandles(nfds, fds) < 0)
|
2009-05-12 16:43:04 +00:00
|
|
|
goto error;
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollCleanupTimeouts();
|
|
|
|
virEventPollCleanupHandles();
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2009-02-06 14:43:52 +00:00
|
|
|
eventLoop.running = 0;
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2009-05-12 16:43:04 +00:00
|
|
|
VIR_FREE(fds);
|
2008-12-04 22:14:15 +00:00
|
|
|
return 0;
|
2009-05-12 16:43:04 +00:00
|
|
|
|
|
|
|
error:
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2009-05-12 16:43:04 +00:00
|
|
|
error_unlocked:
|
|
|
|
VIR_FREE(fds);
|
|
|
|
return -1;
|
2008-12-04 22:14:15 +00:00
|
|
|
}
|
|
|
|
|
2010-11-02 15:56:44 +00:00
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
static void virEventPollHandleWakeup(int watch ATTRIBUTE_UNUSED,
|
|
|
|
int fd,
|
|
|
|
int events ATTRIBUTE_UNUSED,
|
|
|
|
void *opaque ATTRIBUTE_UNUSED)
|
2008-12-04 22:14:15 +00:00
|
|
|
{
|
|
|
|
char c;
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2010-03-02 16:35:01 +00:00
|
|
|
ignore_value(saferead(fd, &c, sizeof(c)));
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
|
|
|
}
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollInit(void)
|
2008-12-04 22:14:15 +00:00
|
|
|
{
|
2011-02-24 17:58:04 +00:00
|
|
|
if (virMutexInit(&eventLoop.lock) < 0) {
|
2011-03-02 16:59:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to initialize mutex"));
|
2008-12-04 22:14:15 +00:00
|
|
|
return -1;
|
2011-02-24 17:58:04 +00:00
|
|
|
}
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2011-04-06 14:10:28 +00:00
|
|
|
if (pipe2(eventLoop.wakeupfd, O_CLOEXEC | O_NONBLOCK) < 0) {
|
2011-03-02 16:59:54 +00:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to setup wakeup pipe"));
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
2011-02-24 17:58:04 +00:00
|
|
|
}
|
2007-06-26 19:11:00 +00:00
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
if (virEventPollAddHandle(eventLoop.wakeupfd[0],
|
2008-12-04 22:14:15 +00:00
|
|
|
VIR_EVENT_HANDLE_READABLE,
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollHandleWakeup, NULL, NULL) < 0) {
|
2011-03-02 16:59:54 +00:00
|
|
|
virEventError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to add handle %d to event loop"),
|
|
|
|
eventLoop.wakeupfd[0]);
|
2007-06-26 19:11:00 +00:00
|
|
|
return -1;
|
2011-02-24 17:58:04 +00:00
|
|
|
}
|
2007-06-26 19:11:00 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2008-10-23 13:18:18 +00:00
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
static int virEventPollInterruptLocked(void)
|
2008-12-04 22:14:15 +00:00
|
|
|
{
|
|
|
|
char c = '\0';
|
2009-02-06 14:43:52 +00:00
|
|
|
|
|
|
|
if (!eventLoop.running ||
|
2010-11-02 15:56:44 +00:00
|
|
|
virThreadIsSelf(&eventLoop.leader)) {
|
2010-12-04 21:33:23 +00:00
|
|
|
VIR_DEBUG("Skip interrupt, %d %d", eventLoop.running,
|
|
|
|
virThreadID(&eventLoop.leader));
|
2008-12-04 22:14:15 +00:00
|
|
|
return 0;
|
2009-05-12 16:41:49 +00:00
|
|
|
}
|
2008-12-04 22:14:15 +00:00
|
|
|
|
2011-05-09 09:24:09 +00:00
|
|
|
VIR_DEBUG("Interrupting");
|
2008-12-04 22:14:15 +00:00
|
|
|
if (safewrite(eventLoop.wakeupfd[1], &c, sizeof(c)) != sizeof(c))
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
int virEventPollInterrupt(void)
|
2008-12-04 22:14:15 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexLock(&eventLoop.lock);
|
2011-02-24 17:58:04 +00:00
|
|
|
ret = virEventPollInterruptLocked();
|
2010-11-02 15:56:44 +00:00
|
|
|
virMutexUnlock(&eventLoop.lock);
|
2008-12-04 22:14:15 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-10-23 13:18:18 +00:00
|
|
|
int
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollToNativeEvents(int events)
|
2008-10-23 13:18:18 +00:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
if(events & VIR_EVENT_HANDLE_READABLE)
|
|
|
|
ret |= POLLIN;
|
|
|
|
if(events & VIR_EVENT_HANDLE_WRITABLE)
|
|
|
|
ret |= POLLOUT;
|
|
|
|
if(events & VIR_EVENT_HANDLE_ERROR)
|
|
|
|
ret |= POLLERR;
|
|
|
|
if(events & VIR_EVENT_HANDLE_HANGUP)
|
|
|
|
ret |= POLLHUP;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-04 23:33:57 +00:00
|
|
|
int
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollFromNativeEvents(int events)
|
2008-10-23 13:18:18 +00:00
|
|
|
{
|
2008-11-04 23:33:57 +00:00
|
|
|
int ret = 0;
|
2008-10-23 13:18:18 +00:00
|
|
|
if(events & POLLIN)
|
|
|
|
ret |= VIR_EVENT_HANDLE_READABLE;
|
|
|
|
if(events & POLLOUT)
|
|
|
|
ret |= VIR_EVENT_HANDLE_WRITABLE;
|
|
|
|
if(events & POLLERR)
|
|
|
|
ret |= VIR_EVENT_HANDLE_ERROR;
|
2009-02-17 09:44:18 +00:00
|
|
|
if(events & POLLNVAL) /* Treat NVAL as error, since libvirt doesn't distinguish */
|
|
|
|
ret |= VIR_EVENT_HANDLE_ERROR;
|
2008-10-23 13:18:18 +00:00
|
|
|
if(events & POLLHUP)
|
|
|
|
ret |= VIR_EVENT_HANDLE_HANGUP;
|
|
|
|
return ret;
|
|
|
|
}
|