From 8024199176f5092a9248acf99b88bbe890406bb9 Mon Sep 17 00:00:00 2001 From: Jonathan Wakely Date: Tue, 9 Oct 2012 08:16:13 +0000 Subject: [PATCH] re PR libstdc++/54754 ([parallel mode] 'make check-parallel' only works on x86-64) PR libstdc++/54754 * include/parallel/compatibility.h: Use atomic built-ins when they are lock-free. From-SVN: r192240 --- libstdc++-v3/ChangeLog | 6 + libstdc++-v3/include/parallel/compatibility.h | 172 +++++------------- 2 files changed, 50 insertions(+), 128 deletions(-) diff --git a/libstdc++-v3/ChangeLog b/libstdc++-v3/ChangeLog index 0b03bacd894..b9f3533798c 100644 --- a/libstdc++-v3/ChangeLog +++ b/libstdc++-v3/ChangeLog @@ -1,3 +1,9 @@ +2012-10-09 Jonathan Wakely + + PR libstdc++/54754 + * include/parallel/compatibility.h: Use atomic built-ins when they are + lock-free. + 2012-10-09 Uros Bizjak * testsuite/util/testsuite_abi.cc (check_version): Add CXXABI_1.3.7. diff --git a/libstdc++-v3/include/parallel/compatibility.h b/libstdc++-v3/include/parallel/compatibility.h index 03506d84de3..a58e65fe60c 100644 --- a/libstdc++-v3/include/parallel/compatibility.h +++ b/libstdc++-v3/include/parallel/compatibility.h @@ -51,154 +51,70 @@ __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long); namespace __gnu_parallel { - // These atomic functions only work on integers - - /** @brief Add a value to a variable, atomically. - * - * Implementation is heavily platform-dependent. - * @param __ptr Pointer to a 32-bit signed integer. - * @param __addend Value to add. - */ - inline int32_t - __fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend) - { - return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); - } - - /** @brief Add a value to a variable, atomically. - * - * Implementation is heavily platform-dependent. - * @param __ptr Pointer to a 64-bit signed integer. - * @param __addend Value to add. - */ - inline int64_t - __fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend) - { -#if defined(__x86_64) - return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); -#elif defined(__i386) && \ - (defined(__i686) || defined(__pentium4) || defined(__athlon) \ - || defined(__k8) || defined(__core2)) - return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); -#else //fallback, slow -#if defined(__i386) - // XXX doesn'__t work with -march=native - //#warning "please compile with -march=i686 or better" -#endif -#pragma message("slow __fetch_and_add_64") - int64_t __res; -#pragma omp critical + template + inline _Tp + __add_omp(volatile _Tp* __ptr, _Tp __addend) { - __res = *__ptr; - *(__ptr) += __addend; + int64_t __res; +#pragma omp critical + { + __res = *__ptr; + *(__ptr) += __addend; + } + return __res; } - return __res; -#endif - } /** @brief Add a value to a variable, atomically. * - * Implementation is heavily platform-dependent. * @param __ptr Pointer to a signed integer. * @param __addend Value to add. */ template - inline _Tp - __fetch_and_add(volatile _Tp* __ptr, _Tp __addend) - { - if (sizeof(_Tp) == sizeof(int32_t)) - return - (_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend); - else if (sizeof(_Tp) == sizeof(int64_t)) - return - (_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend); - else - _GLIBCXX_PARALLEL_ASSERT(false); - } - - /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c - * *__ptr=__replacement and return @c true, return @c false otherwise. - * - * Implementation is heavily platform-dependent. - * @param __ptr Pointer to 32-bit signed integer. - * @param __comparand Compare value. - * @param __replacement Replacement value. - */ - inline bool - __compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand, - int32_t __replacement) - { - return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, - false, __ATOMIC_ACQ_REL, - __ATOMIC_RELAXED); - } - - /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c - * *__ptr=__replacement and return @c true, return @c false otherwise. - * - * Implementation is heavily platform-dependent. - * @param __ptr Pointer to 64-bit signed integer. - * @param __comparand Compare value. - * @param __replacement Replacement value. - */ - inline bool - __compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand, - int64_t __replacement) - { -#if defined(__x86_64) - return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, - false, __ATOMIC_ACQ_REL, - __ATOMIC_RELAXED); -#elif defined(__i386) && \ - (defined(__i686) || defined(__pentium4) || defined(__athlon) \ - || defined(__k8) || defined(__core2)) - return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, - false, __ATOMIC_ACQ_REL, - __ATOMIC_RELAXED); -#else -#if defined(__i386) - // XXX -march=native - //#warning "please compile with -march=i686 or better" -#endif -#pragma message("slow __compare_and_swap_64") - bool __res = false; -#pragma omp critical + inline _Tp + __fetch_and_add(volatile _Tp* __ptr, _Tp __addend) { - if (*__ptr == __comparand) - { - *__ptr = __replacement; - __res = true; - } + if (__atomic_always_lock_free(sizeof(_Tp), __ptr)) + return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); + return __add_omp(__ptr, __addend); } - return __res; -#endif - } - /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c + template + inline bool + __cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) + { + bool __res = false; +#pragma omp critical + { + if (*__ptr == __comparand) + { + *__ptr = __replacement; + __res = true; + } + } + return __res; + } + + /** @brief Compare-and-swap + * + * Compare @c *__ptr and @c __comparand. If equal, let @c * *__ptr=__replacement and return @c true, return @c false otherwise. * - * Implementation is heavily platform-dependent. * @param __ptr Pointer to signed integer. * @param __comparand Compare value. * @param __replacement Replacement value. */ template - inline bool - __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) - { - if (sizeof(_Tp) == sizeof(int32_t)) - return __compare_and_swap_32((volatile int32_t*) __ptr, - (int32_t)__comparand, - (int32_t)__replacement); - else if (sizeof(_Tp) == sizeof(int64_t)) - return __compare_and_swap_64((volatile int64_t*) __ptr, - (int64_t)__comparand, - (int64_t)__replacement); - else - _GLIBCXX_PARALLEL_ASSERT(false); - } + inline bool + __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) + { + if (__atomic_always_lock_free(sizeof(_Tp), __ptr)) + return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, + false, __ATOMIC_ACQ_REL, + __ATOMIC_RELAXED); + return __cas_omp(__ptr, __comparand, __replacement); + } - /** @brief Yield the control to another thread, without waiting for + /** @brief Yield control to another thread, without waiting for * the end of the time slice. */ inline void