2009-05-12 16:45:14 +00:00
|
|
|
/*
|
|
|
|
* eventtest.c: Test the libvirtd event loop impl
|
|
|
|
*
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
* Copyright (C) 2009, 2011 Red Hat, Inc.
|
2009-05-12 16:45:14 +00:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
* Author: Daniel P. Berrange <berrange@redhat.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <signal.h>
|
|
|
|
#include <time.h>
|
|
|
|
|
|
|
|
#include "testutils.h"
|
|
|
|
#include "internal.h"
|
|
|
|
#include "threads.h"
|
|
|
|
#include "logging.h"
|
2009-05-12 20:44:29 +00:00
|
|
|
#include "util.h"
|
2011-02-24 17:58:04 +00:00
|
|
|
#include "event.h"
|
|
|
|
#include "event_poll.h"
|
2009-05-12 16:45:14 +00:00
|
|
|
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
#define NUM_FDS 31
|
|
|
|
#define NUM_TIME 31
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
static struct handleInfo {
|
|
|
|
int pipeFD[2];
|
|
|
|
int fired;
|
|
|
|
int watch;
|
|
|
|
int error;
|
|
|
|
int delete;
|
|
|
|
} handles[NUM_FDS];
|
|
|
|
|
|
|
|
static struct timerInfo {
|
|
|
|
int timeout;
|
|
|
|
int timer;
|
|
|
|
int fired;
|
|
|
|
int error;
|
|
|
|
int delete;
|
|
|
|
} timers[NUM_TIME];
|
|
|
|
|
|
|
|
enum {
|
|
|
|
EV_ERROR_NONE,
|
|
|
|
EV_ERROR_WATCH,
|
|
|
|
EV_ERROR_FD,
|
|
|
|
EV_ERROR_EVENT,
|
|
|
|
EV_ERROR_DATA,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
testPipeReader(int watch, int fd, int events, void *data)
|
|
|
|
{
|
|
|
|
struct handleInfo *info = data;
|
|
|
|
char one;
|
|
|
|
|
|
|
|
info->fired = 1;
|
|
|
|
|
|
|
|
if (watch != info->watch) {
|
|
|
|
info->error = EV_ERROR_WATCH;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fd != info->pipeFD[0]) {
|
|
|
|
info->error = EV_ERROR_FD;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(events & VIR_EVENT_HANDLE_READABLE)) {
|
|
|
|
info->error = EV_ERROR_EVENT;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (read(fd, &one, 1) != 1) {
|
|
|
|
info->error = EV_ERROR_DATA;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
info->error = EV_ERROR_NONE;
|
|
|
|
|
|
|
|
if (info->delete != -1)
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveHandle(info->delete);
|
2009-05-12 16:45:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
testTimer(int timer, void *data)
|
|
|
|
{
|
|
|
|
struct timerInfo *info = data;
|
|
|
|
|
|
|
|
info->fired = 1;
|
|
|
|
|
|
|
|
if (timer != info->timer) {
|
|
|
|
info->error = EV_ERROR_WATCH;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
info->error = EV_ERROR_NONE;
|
|
|
|
|
|
|
|
if (info->delete != -1)
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveTimeout(info->delete);
|
2009-05-12 16:45:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static pthread_mutex_t eventThreadMutex = PTHREAD_MUTEX_INITIALIZER;
|
|
|
|
static pthread_cond_t eventThreadRunCond = PTHREAD_COND_INITIALIZER;
|
|
|
|
static int eventThreadRunOnce = 0;
|
|
|
|
static pthread_cond_t eventThreadJobCond = PTHREAD_COND_INITIALIZER;
|
|
|
|
static int eventThreadJobDone = 0;
|
|
|
|
|
|
|
|
|
2010-07-16 16:16:19 +00:00
|
|
|
ATTRIBUTE_NORETURN static void *eventThreadLoop(void *data ATTRIBUTE_UNUSED) {
|
2009-05-12 16:45:14 +00:00
|
|
|
while (1) {
|
|
|
|
pthread_mutex_lock(&eventThreadMutex);
|
|
|
|
while (!eventThreadRunOnce) {
|
|
|
|
pthread_cond_wait(&eventThreadRunCond, &eventThreadMutex);
|
|
|
|
}
|
|
|
|
eventThreadRunOnce = 0;
|
|
|
|
pthread_mutex_unlock(&eventThreadMutex);
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRunOnce();
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
pthread_mutex_lock(&eventThreadMutex);
|
|
|
|
eventThreadJobDone = 1;
|
|
|
|
pthread_cond_signal(&eventThreadJobCond);
|
|
|
|
pthread_mutex_unlock(&eventThreadMutex);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
2009-11-30 19:01:31 +00:00
|
|
|
verifyFired(const char *name, int handle, int timer)
|
2009-05-12 16:45:14 +00:00
|
|
|
{
|
|
|
|
int handleFired = 0;
|
|
|
|
int timerFired = 0;
|
|
|
|
int i;
|
|
|
|
for (i = 0 ; i < NUM_FDS ; i++) {
|
|
|
|
if (handles[i].fired) {
|
|
|
|
if (i != handle) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Handle %d fired, but expected %d\n", i,
|
|
|
|
handle);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
} else {
|
|
|
|
if (handles[i].error != EV_ERROR_NONE) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Handle %d fired, but had error %d\n", i,
|
2009-11-30 19:01:31 +00:00
|
|
|
handles[i].error);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
handleFired = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (i == handle) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Handle %d should have fired, but didn't\n",
|
|
|
|
handle);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (handleFired != 1 && handle != -1) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Something weird happened, expecting handle %d\n",
|
|
|
|
handle);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for (i = 0 ; i < NUM_TIME ; i++) {
|
|
|
|
if (timers[i].fired) {
|
|
|
|
if (i != timer) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Timer %d fired, but expected %d\n", i, timer);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
} else {
|
|
|
|
if (timers[i].error != EV_ERROR_NONE) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Timer %d fired, but had error %d\n", i,
|
2009-11-30 19:01:31 +00:00
|
|
|
timers[i].error);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
timerFired = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (i == timer) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Timer %d should have fired, but didn't\n",
|
|
|
|
timer);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (timerFired != 1 && timer != -1) {
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
virtTestResult(name, 1,
|
|
|
|
"Something weird happened, expecting timer %d\n",
|
|
|
|
timer);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
return EXIT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob(void)
|
2009-05-12 16:45:14 +00:00
|
|
|
{
|
|
|
|
eventThreadRunOnce = 1;
|
|
|
|
eventThreadJobDone = 0;
|
|
|
|
pthread_cond_signal(&eventThreadRunCond);
|
|
|
|
pthread_mutex_unlock(&eventThreadMutex);
|
|
|
|
sched_yield();
|
|
|
|
pthread_mutex_lock(&eventThreadMutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2009-11-30 19:01:31 +00:00
|
|
|
finishJob(const char *name, int handle, int timer)
|
2009-05-12 16:45:14 +00:00
|
|
|
{
|
|
|
|
struct timespec waitTime;
|
|
|
|
int rc;
|
|
|
|
clock_gettime(CLOCK_REALTIME, &waitTime);
|
|
|
|
waitTime.tv_sec += 5;
|
|
|
|
rc = 0;
|
|
|
|
while (!eventThreadJobDone && rc == 0)
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
rc = pthread_cond_timedwait(&eventThreadJobCond, &eventThreadMutex,
|
|
|
|
&waitTime);
|
2009-05-12 16:45:14 +00:00
|
|
|
if (rc != 0) {
|
2009-11-30 19:01:31 +00:00
|
|
|
virtTestResult(name, 1, "Timed out waiting for pipe event\n");
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
|
2009-11-30 19:01:31 +00:00
|
|
|
if (verifyFired(name, handle, timer) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
2009-11-30 19:01:31 +00:00
|
|
|
virtTestResult(name, 0, NULL);
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
resetAll(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0 ; i < NUM_FDS ; i++) {
|
|
|
|
handles[i].fired = 0;
|
|
|
|
handles[i].error = EV_ERROR_NONE;
|
|
|
|
}
|
|
|
|
for (i = 0 ; i < NUM_TIME ; i++) {
|
|
|
|
timers[i].fired = 0;
|
|
|
|
timers[i].error = EV_ERROR_NONE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2011-04-29 16:21:20 +00:00
|
|
|
mymain(void)
|
2009-05-12 16:45:14 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
pthread_t eventThread;
|
|
|
|
char one = '1';
|
|
|
|
|
|
|
|
for (i = 0 ; i < NUM_FDS ; i++) {
|
|
|
|
if (pipe(handles[i].pipeFD) < 0) {
|
|
|
|
fprintf(stderr, "Cannot create pipe: %d", errno);
|
|
|
|
return EXIT_FAILURE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virThreadInitialize() < 0)
|
|
|
|
return EXIT_FAILURE;
|
|
|
|
char *debugEnv = getenv("LIBVIRT_DEBUG");
|
2009-08-06 13:55:07 +00:00
|
|
|
if (debugEnv && *debugEnv && (virLogParseDefaultPriority(debugEnv) == -1)) {
|
|
|
|
fprintf(stderr, "Invalid log level setting.\n");
|
|
|
|
return EXIT_FAILURE;
|
2009-05-12 16:45:14 +00:00
|
|
|
}
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollInit();
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
for (i = 0 ; i < NUM_FDS ; i++) {
|
|
|
|
handles[i].delete = -1;
|
|
|
|
handles[i].watch =
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollAddHandle(handles[i].pipeFD[0],
|
2009-05-12 16:45:14 +00:00
|
|
|
VIR_EVENT_HANDLE_READABLE,
|
|
|
|
testPipeReader,
|
|
|
|
&handles[i], NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0 ; i < NUM_TIME ; i++) {
|
|
|
|
timers[i].delete = -1;
|
|
|
|
timers[i].timeout = -1;
|
|
|
|
timers[i].timer =
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollAddTimeout(timers[i].timeout,
|
2009-05-12 16:45:14 +00:00
|
|
|
testTimer,
|
|
|
|
&timers[i], NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_create(&eventThread, NULL, eventThreadLoop, NULL);
|
|
|
|
|
|
|
|
pthread_mutex_lock(&eventThreadMutex);
|
|
|
|
|
|
|
|
/* First time, is easy - just try triggering one of our
|
|
|
|
* registered handles */
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-09-03 16:25:03 +00:00
|
|
|
if (safewrite(handles[1].pipeFD[1], &one, 1) != 1)
|
|
|
|
return EXIT_FAILURE;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Simple write", 1, -1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Now lets delete one before starting poll(), and
|
|
|
|
* try triggering another handle */
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveHandle(handles[0].watch);
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-09-03 16:25:03 +00:00
|
|
|
if (safewrite(handles[1].pipeFD[1], &one, 1) != 1)
|
|
|
|
return EXIT_FAILURE;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Deleted before poll", 1, -1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Next lets delete *during* poll, which should interrupt
|
|
|
|
* the loop with no event showing */
|
|
|
|
|
|
|
|
/* NB: this case is subject to a bit of a race condition.
|
|
|
|
* We yield & sleep, and pray that the other thread gets
|
2011-02-24 17:58:04 +00:00
|
|
|
* scheduled before we run EventRemoveHandle */
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-05-12 16:45:14 +00:00
|
|
|
pthread_mutex_unlock(&eventThreadMutex);
|
|
|
|
sched_yield();
|
|
|
|
usleep(100 * 1000);
|
|
|
|
pthread_mutex_lock(&eventThreadMutex);
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveHandle(handles[1].watch);
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Interrupted during poll", -1, -1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Getting more fun, lets delete a later handle during dispatch */
|
|
|
|
|
|
|
|
/* NB: this case is subject to a bit of a race condition.
|
|
|
|
* Only 1 time in 3 does the 2nd write get triggered by
|
2009-05-12 20:44:29 +00:00
|
|
|
* before poll() exits for the first safewrite(). We don't
|
2009-05-12 16:45:14 +00:00
|
|
|
* see a hard failure in other cases, so nothing to worry
|
|
|
|
* about */
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-05-12 16:45:14 +00:00
|
|
|
handles[2].delete = handles[3].watch;
|
2009-09-03 16:25:03 +00:00
|
|
|
if (safewrite(handles[2].pipeFD[1], &one, 1) != 1
|
|
|
|
|| safewrite(handles[3].pipeFD[1], &one, 1) != 1)
|
|
|
|
return EXIT_FAILURE;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Deleted during dispatch", 2, -1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Extreme fun, lets delete ourselves during dispatch */
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-05-12 16:45:14 +00:00
|
|
|
handles[2].delete = handles[2].watch;
|
2009-09-03 16:25:03 +00:00
|
|
|
if (safewrite(handles[2].pipeFD[1], &one, 1) != 1)
|
|
|
|
return EXIT_FAILURE;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Deleted during dispatch", 2, -1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Run a timer on its own */
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[1].timer, 100);
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
|
|
|
if (finishJob("Firing a timer", -1, 1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[1].timer, -1);
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Now lets delete one before starting poll(), and
|
|
|
|
* try triggering another timer */
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[1].timer, 100);
|
|
|
|
virEventPollRemoveTimeout(timers[0].timer);
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
|
|
|
if (finishJob("Deleted before poll", -1, 1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[1].timer, -1);
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Next lets delete *during* poll, which should interrupt
|
|
|
|
* the loop with no event showing */
|
|
|
|
|
|
|
|
/* NB: this case is subject to a bit of a race condition.
|
|
|
|
* We yield & sleep, and pray that the other thread gets
|
2011-02-24 17:58:04 +00:00
|
|
|
* scheduled before we run EventRemoveTimeout */
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-05-12 16:45:14 +00:00
|
|
|
pthread_mutex_unlock(&eventThreadMutex);
|
|
|
|
sched_yield();
|
|
|
|
usleep(100 * 1000);
|
|
|
|
pthread_mutex_lock(&eventThreadMutex);
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveTimeout(timers[1].timer);
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Interrupted during poll", -1, -1) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Getting more fun, lets delete a later timer during dispatch */
|
|
|
|
|
|
|
|
/* NB: this case is subject to a bit of a race condition.
|
|
|
|
* Only 1 time in 3 does the 2nd write get triggered by
|
2009-05-12 20:44:29 +00:00
|
|
|
* before poll() exits for the first safewrite(). We don't
|
2009-05-12 16:45:14 +00:00
|
|
|
* see a hard failure in other cases, so nothing to worry
|
|
|
|
* about */
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[2].timer, 100);
|
|
|
|
virEventPollUpdateTimeout(timers[3].timer, 100);
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-05-12 16:45:14 +00:00
|
|
|
timers[2].delete = timers[3].timer;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Deleted during dispatch", -1, 2) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[2].timer, -1);
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
resetAll();
|
|
|
|
|
|
|
|
/* Extreme fun, lets delete ourselves during dispatch */
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollUpdateTimeout(timers[2].timer, 100);
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-05-12 16:45:14 +00:00
|
|
|
timers[2].delete = timers[2].timer;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Deleted during dispatch", -1, 2) != EXIT_SUCCESS)
|
2009-05-12 16:45:14 +00:00
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
for (i = 0 ; i < NUM_FDS - 1 ; i++)
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveHandle(handles[i].watch);
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
for (i = 0 ; i < NUM_TIME - 1 ; i++)
|
2011-02-24 17:58:04 +00:00
|
|
|
virEventPollRemoveTimeout(timers[i].timer);
|
2009-05-12 16:45:14 +00:00
|
|
|
|
2009-08-24 16:27:55 +00:00
|
|
|
resetAll();
|
|
|
|
|
event: fix event-handling allocation crash
Regression introduced in commit e6b68d7 (Nov 2010).
Prior to that point, handlesAlloc was always a multiple of
EVENT_ALLOC_EXTENT (10), and was an int (so even if the subtraction
had been able to wrap, a negative value would be less than the count
not try to free the handles array). But after that point,
VIR_RESIZE_N made handlesAlloc grow geometrically (with a pattern of
10, 20, 30, 45 for the handles array) but still freed in multiples of
EVENT_ALLOC_EXTENT; and the count changed to size_t. Which means that
after 31 handles have been created, then 30 handles destroyed,
handlesAlloc is 5 while handlesCount is 1, and since (size_t)(1 - 5)
is indeed greater than 1, this then tried to free 10 elements, which
had the awful effect of nuking the handles array while there were
still live handles.
Nuking live handles puts libvirtd in an inconsistent state, and was
easily reproducible by starting and then stopping 60 faqemu guests.
* daemon/event.c (virEventCleanupTimeouts, virEventCleanupHandles):
Avoid integer wrap-around causing us to delete the entire array
while entries are still active.
* tests/eventtest.c (mymain): Expose the bug.
2011-01-21 19:57:03 +00:00
|
|
|
/* Make sure the last handle still works several times in a row. */
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
startJob();
|
|
|
|
if (safewrite(handles[NUM_FDS - 1].pipeFD[1], &one, 1) != 1)
|
|
|
|
return EXIT_FAILURE;
|
|
|
|
if (finishJob("Simple write", NUM_FDS - 1, -1) != EXIT_SUCCESS)
|
|
|
|
return EXIT_FAILURE;
|
|
|
|
|
|
|
|
resetAll();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-08-24 16:27:55 +00:00
|
|
|
/* Final test, register same FD twice, once with no
|
|
|
|
* events, and make sure the right callback runs */
|
|
|
|
handles[0].pipeFD[0] = handles[1].pipeFD[0];
|
|
|
|
handles[0].pipeFD[1] = handles[1].pipeFD[1];
|
|
|
|
|
2011-02-24 17:58:04 +00:00
|
|
|
handles[0].watch = virEventPollAddHandle(handles[0].pipeFD[0],
|
2009-08-24 16:27:55 +00:00
|
|
|
0,
|
|
|
|
testPipeReader,
|
|
|
|
&handles[0], NULL);
|
2011-02-24 17:58:04 +00:00
|
|
|
handles[1].watch = virEventPollAddHandle(handles[1].pipeFD[0],
|
2009-08-24 16:27:55 +00:00
|
|
|
VIR_EVENT_HANDLE_READABLE,
|
|
|
|
testPipeReader,
|
|
|
|
&handles[1], NULL);
|
2009-11-30 19:01:31 +00:00
|
|
|
startJob();
|
2009-09-03 16:25:03 +00:00
|
|
|
if (safewrite(handles[1].pipeFD[1], &one, 1) != 1)
|
|
|
|
return EXIT_FAILURE;
|
2009-11-30 19:01:31 +00:00
|
|
|
if (finishJob("Write duplicate", 1, -1) != EXIT_SUCCESS)
|
2009-08-24 16:27:55 +00:00
|
|
|
return EXIT_FAILURE;
|
2009-05-12 16:45:14 +00:00
|
|
|
|
|
|
|
//pthread_kill(eventThread, SIGTERM);
|
|
|
|
|
|
|
|
return EXIT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIRT_TEST_MAIN(mymain)
|