diff --git a/.gitignore b/.gitignore index 5ea281a783..cd8a85ccde 100644 --- a/.gitignore +++ b/.gitignore @@ -152,6 +152,7 @@ /tests/statstest /tests/storagebackendsheepdogtest /tests/utiltest +/tests/viratomictest /tests/virauthconfigtest /tests/virbuftest /tests/virdrivermoduletest diff --git a/configure.ac b/configure.ac index 400ac3b065..6b189dbad0 100644 --- a/configure.ac +++ b/configure.ac @@ -159,6 +159,63 @@ AC_CHECK_HEADERS([pwd.h paths.h regex.h sys/un.h \ sys/un.h sys/syscall.h netinet/tcp.h ifaddrs.h libtasn1.h \ net/if.h execinfo.h]) +dnl We need to decide at configure time if libvirt will use real atomic +dnl operations ("lock free") or emulated ones with a mutex. + +dnl Note that the atomic ops are only available with GCC on x86 when +dnl using -march=i486 or higher. If we detect that the atomic ops are +dnl not available but would be available given the right flags, we want +dnl to abort and advise the user to fix their CFLAGS. It's better to do +dnl that then to silently fall back on emulated atomic ops just because +dnl the user had the wrong build environment. + +atomic_ops= + +AC_MSG_CHECKING([for atomic ops implementation]) + +AC_TRY_COMPILE([], [__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4;],[ + atomic_ops=gcc +],[]) + +if test "$atomic_ops" = "" ; then + SAVE_CFLAGS="${CFLAGS}" + CFLAGS="-march=i486" + AC_TRY_COMPILE([], + [__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4;], + [AC_MSG_ERROR([Libvirt must be built with -march=i486 or later.])], + []) + CFLAGS="${SAVE_CFLAGS}" + + case "$host" in + *-*-mingw* | *-*-msvc* ) + atomic_ops=win32 + ;; + *) + if test "$ac_cv_header_pthread_h" = "yes" ; then + atomic_ops=pthread + else + AC_MSG_ERROR([Libvirt must be built with GCC or have pthread.h on non-Win32 platforms]) + fi + ;; + esac +fi + +case "$atomic_ops" in + gcc) + AC_DEFINE([VIR_ATOMIC_OPS_GCC],[1],[Use GCC atomic ops]) + ;; + win32) + AC_DEFINE([VIR_ATOMIC_OPS_WIN32],[1],[Use Win32 atomic ops]) + ;; + pthread) + AC_DEFINE([VIR_ATOMIC_OPS_PTHREAD],[1],[Use pthread atomic ops emulation]) + ;; +esac +AM_CONDITIONAL([WITH_ATOMIC_OPS_PTHREAD],[test "$atomic_ops" = "pthread"]) +AC_MSG_RESULT([$atomic_ops]) + + + AC_MSG_CHECKING([for struct ifreq in net/if.h]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[ diff --git a/src/Makefile.am b/src/Makefile.am index b48ce6581a..844e6489f5 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -79,7 +79,7 @@ UTIL_SOURCES = \ util/threadpool.c util/threadpool.h \ util/uuid.c util/uuid.h \ util/util.c util/util.h \ - util/viratomic.h \ + util/viratomic.h util/viratomic.c \ util/viraudit.c util/viraudit.h \ util/virauth.c util/virauth.h \ util/virauthconfig.c util/virauthconfig.h \ @@ -1317,9 +1317,14 @@ if HAVE_SASL USED_SYM_FILES += libvirt_sasl.syms endif +if WITH_ATOMIC_OPS_PTHREAD +USED_SYM_FILES += libvirt_atomic.syms +endif + EXTRA_DIST += \ libvirt_public.syms \ libvirt_private.syms \ + libvirt_atomic.syms \ libvirt_driver_modules.syms \ libvirt_daemon.syms \ libvirt_linux.syms \ diff --git a/src/libvirt_atomic.syms b/src/libvirt_atomic.syms new file mode 100644 index 0000000000..afaaf70664 --- /dev/null +++ b/src/libvirt_atomic.syms @@ -0,0 +1,3 @@ + +# viratomic.h +virAtomicLock; diff --git a/src/nwfilter/nwfilter_dhcpsnoop.c b/src/nwfilter/nwfilter_dhcpsnoop.c index 1395bfe921..23cbdde67f 100644 --- a/src/nwfilter/nwfilter_dhcpsnoop.c +++ b/src/nwfilter/nwfilter_dhcpsnoop.c @@ -78,9 +78,9 @@ struct virNWFilterSnoopState { /* lease file */ int leaseFD; - virAtomicInt nLeases; /* number of active leases */ - virAtomicInt wLeases; /* number of written leases */ - virAtomicInt nThreads; /* number of running threads */ + int nLeases; /* number of active leases */ + int wLeases; /* number of written leases */ + int nThreads; /* number of running threads */ /* thread management */ virHashTablePtr snoopReqs; virHashTablePtr ifnameToKey; @@ -126,7 +126,7 @@ struct _virNWFilterSnoopReq { * publicSnoopReqs hash, the refctr may only * be modified with the SnoopLock held */ - virAtomicInt refctr; + int refctr; virNWFilterTechDriverPtr techdriver; char *ifname; @@ -240,7 +240,7 @@ struct _virNWFilterDHCPDecodeJob { unsigned char packet[PCAP_PBUFSIZE]; int caplen; bool fromVM; - virAtomicIntPtr qCtr; + int *qCtr; }; # define DHCP_PKT_RATE 10 /* pkts/sec */ @@ -271,7 +271,7 @@ struct _virNWFilterSnoopPcapConf { const pcap_direction_t dir; const char *filter; virNWFilterSnoopRateLimitConf rateLimit; /* indep. rate limiters */ - virAtomicInt qCtr; /* number of jobs in the worker's queue */ + int qCtr; /* number of jobs in the worker's queue */ const unsigned int maxQSize; unsigned long long penaltyTimeoutAbs; }; @@ -588,8 +588,7 @@ virNWFilterSnoopReqNew(const char *ifkey) req->threadStatus = THREAD_STATUS_NONE; - if (virAtomicIntInit(&req->refctr) < 0 || - virStrcpyStatic(req->ifkey, ifkey) == NULL || + if (virStrcpyStatic(req->ifkey, ifkey) == NULL || virMutexInitRecursive(&req->lock) < 0) goto err_free_req; @@ -622,7 +621,7 @@ virNWFilterSnoopReqFree(virNWFilterSnoopReqPtr req) if (!req) return; - if (virAtomicIntRead(&req->refctr) != 0) + if (virAtomicIntGet(&req->refctr) != 0) return; /* free all leases */ @@ -715,7 +714,7 @@ virNWFilterSnoopReqPut(virNWFilterSnoopReqPtr req) virNWFilterSnoopLock(); - if (virAtomicIntDec(&req->refctr) == 0) { + if (virAtomicIntDecAndTest(&req->refctr)) { /* * delete the request: * - if we don't find req on the global list anymore @@ -897,7 +896,7 @@ virNWFilterSnoopReqLeaseDel(virNWFilterSnoopReqPtr req, skip_instantiate: VIR_FREE(ipl); - virAtomicIntDec(&virNWFilterSnoopState.nLeases); + virAtomicIntDecAndTest(&virNWFilterSnoopState.nLeases); lease_not_found: VIR_FREE(ipstr); @@ -1159,7 +1158,7 @@ static void virNWFilterDHCPDecodeWorker(void *jobdata, void *opaque) _("Instantiation of rules failed on " "interface '%s'"), req->ifname); } - virAtomicIntDec(job->qCtr); + virAtomicIntDecAndTest(job->qCtr); VIR_FREE(job); } @@ -1170,7 +1169,7 @@ static int virNWFilterSnoopDHCPDecodeJobSubmit(virThreadPoolPtr pool, virNWFilterSnoopEthHdrPtr pep, int len, pcap_direction_t dir, - virAtomicIntPtr qCtr) + int *qCtr) { virNWFilterDHCPDecodeJobPtr job; int ret; @@ -1376,8 +1375,7 @@ virNWFilterDHCPSnoopThread(void *req0) virNWFilterSnoopDHCPOpen(req->ifname, &req->macaddr, pcapConf[i].filter, pcapConf[i].dir); - if (!pcapConf[i].handle || - virAtomicIntInit(&pcapConf[i].qCtr) < 0) { + if (!pcapConf[i].handle) { error = true; break; } @@ -1488,7 +1486,7 @@ virNWFilterDHCPSnoopThread(void *req0) unsigned int diff; /* submit packet to worker thread */ - if (virAtomicIntRead(&pcapConf[i].qCtr) > + if (virAtomicIntGet(&pcapConf[i].qCtr) > pcapConf[i].maxQSize) { if (last_displayed_queue - time(0) > 10) { last_displayed_queue = time(0); @@ -1554,7 +1552,7 @@ exit: pcap_close(pcapConf[i].handle); } - virAtomicIntDec(&virNWFilterSnoopState.nThreads); + virAtomicIntDecAndTest(&virNWFilterSnoopState.nThreads); return; } @@ -1805,7 +1803,7 @@ virNWFilterSnoopLeaseFileSave(virNWFilterSnoopIPLeasePtr ipl) /* keep dead leases at < ~95% of file size */ if (virAtomicIntInc(&virNWFilterSnoopState.wLeases) >= - virAtomicIntRead(&virNWFilterSnoopState.nLeases) * 20) + virAtomicIntGet(&virNWFilterSnoopState.nLeases) * 20) virNWFilterSnoopLeaseFileLoad(); /* load & refresh lease file */ err_exit: @@ -1836,7 +1834,7 @@ virNWFilterSnoopPruneIter(const void *payload, /* * have the entry removed if it has no leases and no one holds a ref */ - del_req = ((req->start == NULL) && (virAtomicIntRead(&req->refctr) == 0)); + del_req = ((req->start == NULL) && (virAtomicIntGet(&req->refctr) == 0)); virNWFilterSnoopReqUnlock(req); @@ -1994,9 +1992,9 @@ virNWFilterSnoopLeaseFileLoad(void) static void virNWFilterSnoopJoinThreads(void) { - while (virAtomicIntRead(&virNWFilterSnoopState.nThreads) != 0) { + while (virAtomicIntGet(&virNWFilterSnoopState.nThreads) != 0) { VIR_WARN("Waiting for snooping threads to terminate: %u\n", - virAtomicIntRead(&virNWFilterSnoopState.nThreads)); + virAtomicIntGet(&virNWFilterSnoopState.nThreads)); usleep(1000 * 1000); } } @@ -2061,10 +2059,7 @@ virNWFilterDHCPSnoopInit(void) VIR_DEBUG("Initializing DHCP snooping"); if (virMutexInitRecursive(&virNWFilterSnoopState.snoopLock) < 0 || - virMutexInit(&virNWFilterSnoopState.activeLock) < 0 || - virAtomicIntInit(&virNWFilterSnoopState.nLeases) < 0 || - virAtomicIntInit(&virNWFilterSnoopState.wLeases) < 0 || - virAtomicIntInit(&virNWFilterSnoopState.nThreads) < 0) + virMutexInit(&virNWFilterSnoopState.activeLock) < 0) return -1; virNWFilterSnoopState.ifnameToKey = virHashCreate(0, NULL); diff --git a/src/util/viratomic.c b/src/util/viratomic.c new file mode 100644 index 0000000000..54c25e0f29 --- /dev/null +++ b/src/util/viratomic.c @@ -0,0 +1,35 @@ +/* + * viratomic.h: atomic integer operations + * + * Copyright (C) 2012 Red Hat, Inc. + * + * Based on code taken from GLib 2.32, under the LGPLv2+ + * + * Copyright (C) 2011 Ryan Lortie + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; If not, see + * . + * + */ + +#include + +#include "viratomic.h" + + +#ifdef VIR_ATOMIC_OPS_PTHREAD + +pthread_mutex_t virAtomicLock = PTHREAD_MUTEX_INITIALIZER; + +#endif diff --git a/src/util/viratomic.h b/src/util/viratomic.h index 246fe2be46..fa0a89ad23 100644 --- a/src/util/viratomic.h +++ b/src/util/viratomic.h @@ -1,10 +1,11 @@ /* * viratomic.h: atomic integer operations * - * Copyright (C) 2012 IBM Corporation + * Copyright (C) 2012 Red Hat, Inc. * - * Authors: - * Stefan Berger + * Based on code taken from GLib 2.32, under the LGPLv2+ + * + * Copyright (C) 2011 Ryan Lortie * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -25,130 +26,422 @@ #ifndef __VIR_ATOMIC_H__ # define __VIR_ATOMIC_H__ -# include "threads.h" +# include "internal.h" -typedef struct _virAtomicInt virAtomicInt; -typedef virAtomicInt *virAtomicIntPtr; +/** + * virAtomicIntGet: + * Gets the current value of atomic. + * + * This call acts as a full compiler and hardware memory barrier + * (before the get) + */ +int virAtomicIntGet(volatile int *atomic) + ATTRIBUTE_NONNULL(1); -# define __VIR_ATOMIC_USES_LOCK +/** + * virAtomicIntSet: + * Sets the value of atomic to newval. + * + * This call acts as a full compiler and hardware memory barrier + * (after the set) + */ +void virAtomicIntSet(volatile int *atomic, + int newval) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntInc: + * Increments the value of atomic by 1. + * + * Think of this operation as an atomic version of + * { *atomic += 1; return *atomic; } + * + * This call acts as a full compiler and hardware memory barrier. + */ +int virAtomicIntInc(volatile int *atomic) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntDecAndTest: + * Decrements the value of atomic by 1. + * + * Think of this operation as an atomic version of + * { *atomic -= 1; return *atomic == 0; } + * + * This call acts as a full compiler and hardware memory barrier. + */ +bool virAtomicIntDecAndTest(volatile int *atomic) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntCompareExchange: + * Compares atomic to oldval and, if equal, sets it to newval. If + * atomic was not equal to oldval then no change occurs. + * + * This compare and exchange is done atomically. + * + * Think of this operation as an atomic version of + * { if (*atomic == oldval) { *atomic = newval; return true; } + * else return false; } + * + * This call acts as a full compiler and hardware memory barrier. + */ +bool virAtomicIntCompareExchange(volatile int *atomic, + int oldval, + int newval) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntAdd: + * Atomically adds val to the value of atomic. + * + * Think of this operation as an atomic version of + * { tmp = *atomic; *atomic += val; return tmp; } + * + * This call acts as a full compiler and hardware memory barrier. + */ +int virAtomicIntAdd(volatile int *atomic, + int val) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntAnd: + * Performs an atomic bitwise 'and' of the value of atomic + * and val, storing the result back in atomic. + * + * This call acts as a full compiler and hardware memory barrier. + * + * Think of this operation as an atomic version of + * { tmp = *atomic; *atomic &= val; return tmp; } + */ +unsigned int virAtomicIntAnd(volatile unsigned int *atomic, + unsigned int val) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntOr: + * Performs an atomic bitwise 'or' of the value of atomic + * and val, storing the result back in atomic. + * + * Think of this operation as an atomic version of + * { tmp = *atomic; *atomic |= val; return tmp; } + * + * This call acts as a full compiler and hardware memory barrier. + */ +unsigned int virAtomicIntOr(volatile unsigned int *atomic, + unsigned int val) + ATTRIBUTE_NONNULL(1); + +/** + * virAtomicIntXor: + * Performs an atomic bitwise 'xor' of the value of atomic + * and val, storing the result back in atomic. + * + * Think of this operation as an atomic version of + * { tmp = *atomic; *atomic ^= val; return tmp; } + * + * This call acts as a full compiler and hardware memory barrier. + */ +unsigned int virAtomicIntXor(volatile unsigned int *atomic, + unsigned int val) + ATTRIBUTE_NONNULL(1); + + +# ifdef VIR_ATOMIC_OPS_GCC + +# define virAtomicIntGet(atomic) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void)(0 ? *(atomic) ^ *(atomic) : 0); \ + __sync_synchronize(); \ + (int)*(atomic); \ + })) +# define virAtomicIntSet(atomic, newval) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void)(0 ? *(atomic) ^ (newval) : 0); \ + *(atomic) = (newval); \ + __sync_synchronize(); \ + })) +# define virAtomicIntInc(atomic) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void)(0 ? *(atomic) ^ *(atomic) : 0); \ + __sync_add_and_fetch((atomic), 1); \ + })) +# define virAtomicIntDecAndTest(atomic) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void)(0 ? *(atomic) ^ *(atomic) : 0); \ + __sync_fetch_and_sub((atomic), 1) == 1; \ + })) +# define virAtomicIntCompareExchange(atomic, oldval, newval) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void)(0 ? *(atomic) ^ (newval) ^ (oldval) : 0); \ + (bool)__sync_bool_compare_and_swap((atomic), \ + (oldval), (newval)); \ + })) +# define virAtomicIntAdd(atomic, val) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void)(0 ? *(atomic) ^ (val) : 0); \ + (int) __sync_fetch_and_add((atomic), (val)); \ + })) +# define virAtomicIntAnd(atomic, val) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void) (0 ? *(atomic) ^ (val) : 0); \ + (unsigned int) __sync_fetch_and_and((atomic), (val)); \ + })) +# define virAtomicIntOr(atomic, val) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void) (0 ? *(atomic) ^ (val) : 0); \ + (unsigned int) __sync_fetch_and_or((atomic), (val)); \ + })) +# define virAtomicIntXor(atomic, val) \ + (__extension__ ({ \ + (void)verify_true(sizeof(*(atomic)) == sizeof(int)); \ + (void) (0 ? *(atomic) ^ (val) : 0); \ + (unsigned int) __sync_fetch_and_xor((atomic), (val)); \ + })) + + +# else + +# ifdef VIR_ATOMIC_OPS_WIN32 + +# include +# include +# include +# if !defined(_M_AMD64) && !defined (_M_IA64) && !defined(_M_X64) +# define InterlockedAnd _InterlockedAnd +# define InterlockedOr _InterlockedOr +# define InterlockedXor _InterlockedXor +# endif + +/* + * http://msdn.microsoft.com/en-us/library/ms684122(v=vs.85).aspx + */ +inline int +virAtomicIntGet(volatile int *atomic) +{ + MemoryBarrier(); + return *atomic; +} + +inline void +virAtomicIntSet(volatile int *atomic, + int newval) +{ + *atomic = newval; + MemoryBarrier(); +} + +inline int +virAtomicIntInc(volatile int *atomic) +{ + return InterlockedIncrement((volatile LONG *)atomic); +} + +inline bool +virAtomicIntDecAndTest(volatile int *atomic) +{ + return InterlockedDecrement((volatile LONG *)atomic) == 0; +} + +inline bool +virAtomicIntCompareExchange(volatile int *atomic, + int oldval, + int newval) +{ + return InterlockedCompareExchange((volatile LONG *)atomic, newval, oldval) == oldval; +} + +inline int +virAtomicIntAdd(volatile int *atomic, + int val) +{ + return InterlockedExchangeAdd((volatile LONG *)atomic, val); +} + +inline unsigned int +virAtomicIntAnd(volatile unsigned int *atomic, + unsigned int val) +{ + return InterlockedAnd((volatile LONG *)atomic, val); +} + +inline unsigned int +virAtomicIntOr(volatile unsigned int *atomic, + unsigned int val) +{ + return InterlockedOr((volatile LONG *)atomic, val); +} + +inline unsigned int +virAtomicIntXor(volatile unsigned int *atomic, + unsigned int val) +{ + return InterlockedXor((volatile LONG *)atomic, val); +} + + +# else +# ifdef VIR_ATOMIC_OPS_PTHREAD +# include + +extern pthread_mutex_t virAtomicLock; + +inline int +virAtomicIntGet(volatile int *atomic) +{ + int value; + + pthread_mutex_lock(&virAtomicLock); + value = *atomic; + pthread_mutex_unlock(&virAtomicLock); + + return value; +} + +inline void +virAtomicIntSet(volatile int *atomic, + int value) +{ + pthread_mutex_lock(&virAtomicLock); + *atomic = value; + pthread_mutex_unlock(&virAtomicLock); +} + +inline int +virAtomicIntInc(volatile int *atomic) +{ + int value; + + pthread_mutex_lock(&virAtomicLock); + value = ++(*atomic); + pthread_mutex_unlock(&virAtomicLock); + + return value; +} + +inline bool +virAtomicIntDecAndTest(volatile int *atomic) +{ + bool is_zero; + + pthread_mutex_lock(&virAtomicLock); + is_zero = --(*atomic) == 0; + pthread_mutex_unlock(&virAtomicLock); + + return is_zero; +} + +inline bool +virAtomicIntCompareExchange(volatile int *atomic, + int oldval, + int newval) +{ + bool success; + + pthread_mutex_lock(&virAtomicLock); + + if ((success = (*atomic == oldval))) + *atomic = newval; + + pthread_mutex_unlock(&virAtomicLock); + + return success; +} + +inline int +virAtomicIntAdd(volatile int *atomic, + int val) +{ + int oldval; + + pthread_mutex_lock(&virAtomicLock); + oldval = *atomic; + *atomic = oldval + val; + pthread_mutex_unlock(&virAtomicLock); + + return oldval; +} + +inline unsigned int +virAtomicIntAnd(volatile unsigned int *atomic, + unsigned int val) +{ + unsigned int oldval; + + pthread_mutex_lock(&virAtomicLock); + oldval = *atomic; + *atomic = oldval & val; + pthread_mutex_unlock(&virAtomicLock); + + return oldval; +} + +inline unsigned int +virAtomicIntOr(volatile unsigned int *atomic, + unsigned int val) +{ + unsigned int oldval; + + pthread_mutex_lock(&virAtomicLock); + oldval = *atomic; + *atomic = oldval | val; + pthread_mutex_unlock(&virAtomicLock); + + return oldval; +} + +inline unsigned int +virAtomicIntXor(volatile unsigned int *atomic, + unsigned int val) +{ + unsigned int oldval; + + pthread_mutex_lock(&virAtomicLock); + oldval = *atomic; + *atomic = oldval ^ val; + pthread_mutex_unlock(&virAtomicLock); + + return oldval; +} + + +# else +# error "No atomic integer impl for this platform" +# endif +# endif + +/* The int/unsigned int casts here ensure that you can + * pass either an int or unsigned int to all atomic op + * functions, in the same way that we can with GCC + * atomic op helpers. + */ +# define virAtomicIntGet(atomic) \ + virAtomicIntGet((int *)atomic) +# define virAtomicIntSet(atomic, val) \ + virAtomicIntSet((int *)atomic, val) +# define virAtomicIntInc(atomic) \ + virAtomicIntInc((int *)atomic) +# define virAtomicIntDecAndTest(atomic) \ + virAtomicIntDecAndTest((int *)atomic) +# define virAtomicIntCompareExchange(atomic, oldval, newval) \ + virAtomicIntCompareExchange((int *)atomic, oldval, newval) +# define virAtomicIntAdd(atomic, val) \ + virAtomicIntAdd((int *)atomic, val) +# define virAtomicIntAnd(atomic, val) \ + virAtomicIntAnd((unsigned int *)atomic, val) +# define virAtomicIntOr(atomic, val) \ + virAtomicIntOr((unsigned int *)atomic, val) +# define virAtomicIntXor(atomic, val) \ + virAtomicIntXor((unsigned int *)atomic, val) -# if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 1)) || (__GNUC__ > 4) -# undef __VIR_ATOMIC_USES_LOCK # endif -static inline int virAtomicIntInit(virAtomicIntPtr vaip) - ATTRIBUTE_NONNULL(1) ATTRIBUTE_RETURN_CHECK; -static inline int virAtomicIntRead(virAtomicIntPtr vaip) - ATTRIBUTE_NONNULL(1); -static inline void virAtomicIntSet(virAtomicIntPtr vaip, int val) - ATTRIBUTE_NONNULL(1); -static inline int virAtomicIntAdd(virAtomicIntPtr vaip, int add) - ATTRIBUTE_NONNULL(1); -static inline int virAtomicIntSub(virAtomicIntPtr vaip, int add) - ATTRIBUTE_NONNULL(1); -static inline int virAtomicIntInc(virAtomicIntPtr vaip) - ATTRIBUTE_NONNULL(1); -static inline int virAtomicIntDec(virAtomicIntPtr vaip) - ATTRIBUTE_NONNULL(1); - -# ifdef __VIR_ATOMIC_USES_LOCK - -struct _virAtomicInt { - virMutex lock; - int value; -}; - -static inline int -virAtomicIntInit(virAtomicIntPtr vaip) -{ - vaip->value = 0; - return virMutexInit(&vaip->lock); -} - -static inline int -virAtomicIntAdd(virAtomicIntPtr vaip, int add) -{ - int ret; - - virMutexLock(&vaip->lock); - - vaip->value += add; - ret = vaip->value; - - virMutexUnlock(&vaip->lock); - - return ret; -} - -static inline int -virAtomicIntSub(virAtomicIntPtr vaip, int sub) -{ - int ret; - - virMutexLock(&vaip->lock); - - vaip->value -= sub; - ret = vaip->value; - - virMutexUnlock(&vaip->lock); - - return ret; -} - -# else /* __VIR_ATOMIC_USES_LOCK */ - -struct _virAtomicInt { - int value; -}; - -static inline int -virAtomicIntInit(virAtomicIntPtr vaip) -{ - vaip->value = 0; - return 0; -} - -static inline int -virAtomicIntAdd(virAtomicIntPtr vaip, int add) -{ - return __sync_add_and_fetch(&vaip->value, add); -} - -static inline int -virAtomicIntSub(virAtomicIntPtr vaip, int sub) -{ - return __sync_sub_and_fetch(&vaip->value, sub); -} - -# endif /* __VIR_ATOMIC_USES_LOCK */ - - - -/* common operations that need no locking or build on others */ - - -static inline void -virAtomicIntSet(virAtomicIntPtr vaip, int value) -{ - vaip->value = value; -} - -static inline int -virAtomicIntRead(virAtomicIntPtr vaip) -{ - return *(volatile int *)&vaip->value; -} - -static inline int -virAtomicIntInc(virAtomicIntPtr vaip) -{ - return virAtomicIntAdd(vaip, 1); -} - -static inline int -virAtomicIntDec(virAtomicIntPtr vaip) -{ - return virAtomicIntSub(vaip, 1); -} - #endif /* __VIR_ATOMIC_H */ diff --git a/src/util/virfile.c b/src/util/virfile.c index 5c99976ed3..6a4386974e 100644 --- a/src/util/virfile.c +++ b/src/util/virfile.c @@ -618,7 +618,7 @@ cleanup: #else /* __linux__ */ int virFileLoopDeviceAssociate(const char *file, - char **dev) + char **dev ATTRIBUTE_UNUSED) { virReportSystemError(ENOSYS, _("Unable to associate file %s with loop device"), diff --git a/tests/Makefile.am b/tests/Makefile.am index b931cea54e..6a1b18b5d6 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -89,6 +89,7 @@ test_programs = virshtest sockettest \ nodeinfotest virbuftest \ commandtest seclabeltest \ virhashtest virnetmessagetest virnetsockettest \ + viratomictest \ utiltest virnettlscontexttest shunloadtest \ virtimetest viruritest virkeyfiletest \ virauthconfigtest @@ -530,6 +531,10 @@ virhashtest_SOURCES = \ virhashtest.c virhashdata.h testutils.h testutils.c virhashtest_LDADD = $(LDADDS) +viratomictest_SOURCES = \ + viratomictest.c viratomicdata.h testutils.h testutils.c +viratomictest_LDADD = $(LDADDS) + jsontest_SOURCES = \ jsontest.c testutils.h testutils.c jsontest_LDADD = $(LDADDS) diff --git a/tests/viratomictest.c b/tests/viratomictest.c new file mode 100644 index 0000000000..48f0d24df2 --- /dev/null +++ b/tests/viratomictest.c @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2011-2012 Red Hat, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * License along with this library; If not, see + * . + * + */ + +#include + +#include +#include + +#include "testutils.h" + +#include "viratomic.h" +#include "virrandom.h" +#include "threads.h" + +static int +testTypes(const void *data ATTRIBUTE_UNUSED) +{ + unsigned int u, u2; + int s, s2; + bool res; + +#define virAssertCmpInt(a, op, b) \ + if (!((a) op (b))) \ + return -1; + virAtomicIntSet(&u, 5); + u2 = virAtomicIntGet(&u); + virAssertCmpInt(u2, ==, 5); + + res = virAtomicIntCompareExchange(&u, 6, 7); + if (res) + return -1; + virAssertCmpInt(u, ==, 5); + + virAssertCmpInt(virAtomicIntAdd(&u, 1), ==, 5); + virAssertCmpInt(u, ==, 6); + + virAssertCmpInt(virAtomicIntInc(&u), ==, 7); + virAssertCmpInt(u, ==, 7); + + res = virAtomicIntDecAndTest(&u); + if (res) + return -1; + virAssertCmpInt(u, ==, 6); + + u2 = virAtomicIntAnd(&u, 5); + virAssertCmpInt(u2, ==, 6); + virAssertCmpInt(u, ==, 4); + + u2 = virAtomicIntOr(&u, 8); + virAssertCmpInt(u2, ==, 4); + virAssertCmpInt(u, ==, 12); + + u2 = virAtomicIntXor(&u, 4); + virAssertCmpInt(u2, ==, 12); + virAssertCmpInt(u, ==, 8); + + virAtomicIntSet(&s, 5); + s2 = virAtomicIntGet(&s); + virAssertCmpInt(s2, ==, 5); + + res = virAtomicIntCompareExchange(&s, 6, 7); + if (res) + return -1; + virAssertCmpInt(s, ==, 5); + + virAtomicIntAdd(&s, 1); + virAssertCmpInt(s, ==, 6); + + virAtomicIntInc(&s); + virAssertCmpInt(s, ==, 7); + + res = virAtomicIntDecAndTest(&s); + if (res) + return -1; + virAssertCmpInt(s, ==, 6); + + s2 = virAtomicIntAnd(&s, 5); + virAssertCmpInt(s2, ==, 6); + virAssertCmpInt(s, ==, 4); + + s2 = virAtomicIntOr(&s, 8); + virAssertCmpInt(s2, ==, 4); + virAssertCmpInt(s, ==, 12); + + s2 = virAtomicIntXor(&s, 4); + virAssertCmpInt(s2, ==, 12); + virAssertCmpInt(s, ==, 8); + + return 0; +} + +#define THREADS 10 +#define ROUNDS 10000 + +volatile int bucket[THREADS]; +volatile int atomic; + +static void +thread_func(void *data) +{ + int idx = (int)(long)data; + int i; + int d; + + for (i = 0; i < ROUNDS; i++) { + d = virRandomBits(7); + bucket[idx] += d; + virAtomicIntAdd(&atomic, d); +#ifdef WIN32 + SleepEx(0,0); +#else + sched_yield(); +#endif + } +} + +static int +testThreads(const void *data ATTRIBUTE_UNUSED) +{ + int sum; + int i; + virThread threads[THREADS]; + + atomic = 0; + for (i = 0; i < THREADS; i++) + bucket[i] = 0; + + for (i = 0; i < THREADS; i++) { + if (virThreadCreate(&(threads[i]), true, thread_func, (void*)(long)i) < 0) + return -1; + } + + for (i = 0; i < THREADS; i++) + virThreadJoin(&threads[i]); + + sum = 0; + for (i = 0; i < THREADS; i++) + sum += bucket[i]; + + if (sum != atomic) + return -1; + + return 0; +} + +static int +mymain(void) +{ + int ret = 0; + + if (virRandomInitialize(time(NULL)) < 0) + return -1; + if (virThreadInitialize() < 0) + return -1; + + if (virtTestRun("types", 1, testTypes, NULL) < 0) + ret = -1; + if (virtTestRun("threads", 1, testThreads, NULL) < 0) + ret = -1; + + return ret; +} + +VIRT_TEST_MAIN(mymain)