summaryrefslogtreecommitdiffstats
path: root/libstdc++-v3/include/std/mutex
diff options
context:
space:
mode:
authorbkoz <bkoz@138bc75d-0d04-0410-961f-82ee72b054a4>2009-02-10 08:29:57 +0000
committerbkoz <bkoz@138bc75d-0d04-0410-961f-82ee72b054a4>2009-02-10 08:29:57 +0000
commit20deb9d4d2f2a37450bef8c4ec4e2b94e2dc56a4 (patch)
tree19f878843cba38e4bd49fe8d61f7521c7fad4342 /libstdc++-v3/include/std/mutex
parent1bcbf21afd2e410c18eaae217fac3d536b2fe241 (diff)
downloadppe42-gcc-20deb9d4d2f2a37450bef8c4ec4e2b94e2dc56a4.tar.gz
ppe42-gcc-20deb9d4d2f2a37450bef8c4ec4e2b94e2dc56a4.zip
2009-02-09 Benjamin Kosnik <bkoz@redhat.com>
* include/std/condition_variable (condition_variable): Remove _M_internal_mutex. Add private __native_type typedef. * src/condition_variable.cc (condition_variable::notify_one): Remove _M_internal_mutex use. Use typedef. (condition_variable::notify_all): Same. * include/std/mutex (mutex): Add private __native_type typedef. Use it. (recursive_mutex): Same. (timed_mutex): Same. (recursive_timed_mutex): Same. (once_flag): Make __native_type typedef private. * include/std/thread (this_thread): Add minimal markup. * testsuite/30_threads/condition_variable_any/cons/assign_neg.cc: Adjust line numbers. * testsuite/30_threads/condition_variable_any/cons/copy_neg.cc: Same. * testsuite/30_threads/mutex/cons/assign_neg.cc: Same. * testsuite/30_threads/mutex/cons/copy_neg.cc: Same. * testsuite/30_threads/timed_mutex/cons/assign_neg.cc: Same. * testsuite/30_threads/timed_mutex/cons/copy_neg.cc: Same. * testsuite/30_threads/thread/cons/assign_neg.cc: Same. * testsuite/30_threads/thread/cons/copy_neg.cc: Same. * testsuite/30_threads/recursive_mutex/cons/assign_neg.cc: Same. * testsuite/30_threads/recursive_mutex/cons/copy_neg.cc: Same. * testsuite/30_threads/condition_variable/cons/assign_neg.cc: Same. * testsuite/30_threads/condition_variable/cons/copy_neg.cc: Same. * testsuite/30_threads/recursive_timed_mutex/cons/assign_neg.cc: Same. * testsuite/30_threads/recursive_timed_mutex/cons/copy_neg.cc: Same. * testsuite/util/thread/all.h: Testsuite utilities for testing thread. * testsuite/30_threads/condition_variable_any/native_handle/ typesizes.cc: New. * testsuite/30_threads/mutex/native_handle/typesizes.cc: Same. * testsuite/30_threads/timed_mutex/native_handle/typesizes.cc: Same. * testsuite/30_threads/thread/native_handle/typesizes.cc: Same. * testsuite/30_threads/recursive_mutex/native_handle/typesizes.cc: Same. * testsuite/30_threads/condition_variable/native_handle/ typesizes.cc: Same. * testsuite/30_threads/recursive_timed_mutex/native_handle/ typesizes.cc: Same. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@144053 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libstdc++-v3/include/std/mutex')
-rw-r--r--libstdc++-v3/include/std/mutex224
1 files changed, 114 insertions, 110 deletions
diff --git a/libstdc++-v3/include/std/mutex b/libstdc++-v3/include/std/mutex
index 4964bb3bcbc..22aff881335 100644
--- a/libstdc++-v3/include/std/mutex
+++ b/libstdc++-v3/include/std/mutex
@@ -59,14 +59,17 @@ namespace std
/// mutex
class mutex
{
+ typedef __gthread_mutex_t __native_type;
+ __native_type _M_mutex;
+
public:
- typedef __gthread_mutex_t* native_handle_type;
+ typedef __native_type* native_handle_type;
mutex()
{
// XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
#ifdef __GTHREAD_MUTEX_INIT
- __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
+ __native_type __tmp = __GTHREAD_MUTEX_INIT;
_M_mutex = __tmp;
#else
__GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
@@ -103,22 +106,22 @@ namespace std
native_handle_type
native_handle()
{ return &_M_mutex; }
-
- private:
- __gthread_mutex_t _M_mutex;
};
/// recursive_mutex
class recursive_mutex
{
+ typedef __gthread_recursive_mutex_t __native_type;
+ __native_type _M_mutex;
+
public:
- typedef __gthread_recursive_mutex_t* native_handle_type;
+ typedef __native_type* native_handle_type;
recursive_mutex()
{
// XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
- __gthread_recursive_mutex_t __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
+ __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
_M_mutex = __tmp;
#else
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
@@ -155,21 +158,28 @@ namespace std
native_handle_type
native_handle()
{ return &_M_mutex; }
-
- private:
- __gthread_recursive_mutex_t _M_mutex;
};
/// timed_mutex
class timed_mutex
- {
+ {
+ typedef __gthread_mutex_t __native_type;
+
+#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
+ typedef chrono::monotonic_clock __clock_t;
+#else
+ typedef chrono::high_resolution_clock __clock_t;
+#endif
+
+ __native_type _M_mutex;
+
public:
- typedef __gthread_mutex_t* native_handle_type;
+ typedef __native_type* native_handle_type;
timed_mutex()
{
#ifdef __GTHREAD_MUTEX_INIT
- __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
+ __native_type __tmp = __GTHREAD_MUTEX_INIT;
_M_mutex = __tmp;
#else
__GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
@@ -204,19 +214,19 @@ namespace std
template <class _Clock, class _Duration>
bool
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
- {
+ {
chrono::time_point<_Clock, chrono::seconds> __s =
- chrono::time_point_cast<chrono::seconds>(__atime);
+ chrono::time_point_cast<chrono::seconds>(__atime);
chrono::nanoseconds __ns =
- chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+ chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
- __gthread_time_t __ts = {
- static_cast<std::time_t>(__s.time_since_epoch().count()),
- static_cast<long>(__ns.count())
- };
+ __gthread_time_t __ts = {
+ static_cast<std::time_t>(__s.time_since_epoch().count()),
+ static_cast<long>(__ns.count())
+ };
- return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
+ return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
}
void
@@ -229,50 +239,52 @@ namespace std
native_handle_type
native_handle()
{ return &_M_mutex; }
-
- private:
- __gthread_mutex_t _M_mutex;
-
-#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
- typedef chrono::monotonic_clock __clock_t;
-#else
- typedef chrono::high_resolution_clock __clock_t;
-#endif
+ private:
template<typename _Rep, typename _Period>
typename enable_if<
- ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
+ ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
- __clock_t::time_point __atime = __clock_t::now()
- + chrono::duration_cast<__clock_t::duration>(__rtime);
+ __clock_t::time_point __atime = __clock_t::now()
+ + chrono::duration_cast<__clock_t::duration>(__rtime);
- return try_lock_until(__atime);
+ return try_lock_until(__atime);
}
template <typename _Rep, typename _Period>
typename enable_if<
- !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
+ !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
- __clock_t::time_point __atime = __clock_t::now()
+ __clock_t::time_point __atime = __clock_t::now()
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
- return try_lock_until(__atime);
+ return try_lock_until(__atime);
}
};
/// recursive_timed_mutex
class recursive_timed_mutex
{
+ typedef __gthread_recursive_mutex_t __native_type;
+
+#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
+ typedef chrono::monotonic_clock __clock_t;
+#else
+ typedef chrono::high_resolution_clock __clock_t;
+#endif
+
+ __native_type _M_mutex;
+
public:
- typedef __gthread_recursive_mutex_t* native_handle_type;
+ typedef __native_type* native_handle_type;
recursive_timed_mutex()
{
// XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
- __gthread_recursive_mutex_t __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
+ __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
_M_mutex = __tmp;
#else
__GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
@@ -307,19 +319,19 @@ namespace std
template <class _Clock, class _Duration>
bool
try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
- {
+ {
chrono::time_point<_Clock, chrono::seconds> __s =
- chrono::time_point_cast<chrono::seconds>(__atime);
+ chrono::time_point_cast<chrono::seconds>(__atime);
chrono::nanoseconds __ns =
- chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+ chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
- __gthread_time_t __ts = {
- static_cast<std::time_t>(__s.time_since_epoch().count()),
- static_cast<long>(__ns.count())
- };
+ __gthread_time_t __ts = {
+ static_cast<std::time_t>(__s.time_since_epoch().count()),
+ static_cast<long>(__ns.count())
+ };
- return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
+ return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
}
void
@@ -334,34 +346,26 @@ namespace std
{ return &_M_mutex; }
private:
- __gthread_recursive_mutex_t _M_mutex;
-
-#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
- typedef chrono::monotonic_clock __clock_t;
-#else
- typedef chrono::high_resolution_clock __clock_t;
-#endif
-
template<typename _Rep, typename _Period>
typename enable_if<
- ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
+ ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
- __clock_t::time_point __atime = __clock_t::now()
- + chrono::duration_cast<__clock_t::duration>(__rtime);
+ __clock_t::time_point __atime = __clock_t::now()
+ + chrono::duration_cast<__clock_t::duration>(__rtime);
- return try_lock_until(__atime);
+ return try_lock_until(__atime);
}
template <typename _Rep, typename _Period>
typename enable_if<
- !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
+ !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
__try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
{
- __clock_t::time_point __atime = __clock_t::now()
+ __clock_t::time_point __atime = __clock_t::now()
+ ++chrono::duration_cast<__clock_t::duration>(__rtime);
- return try_lock_until(__atime);
+ return try_lock_until(__atime);
}
};
@@ -418,7 +422,7 @@ namespace std
{
public:
typedef _Mutex mutex_type;
-
+
unique_lock()
: _M_device(0), _M_owns(false)
{ }
@@ -445,13 +449,13 @@ namespace std
}
template<typename _Clock, typename _Duration>
- unique_lock(mutex_type& __m,
+ unique_lock(mutex_type& __m,
const chrono::time_point<_Clock, _Duration>& __atime)
: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
{ }
template<typename _Rep, typename _Period>
- unique_lock(mutex_type& __m,
+ unique_lock(mutex_type& __m,
const chrono::duration<_Rep, _Period>& __rtime)
: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
{ }
@@ -474,14 +478,14 @@ namespace std
unique_lock& operator=(unique_lock&& __u)
{
- if(_M_owns)
+ if(_M_owns)
unlock();
-
+
unique_lock(std::move(__u)).swap(*this);
__u._M_device = 0;
__u._M_owns = false;
-
+
return *this;
}
@@ -503,20 +507,20 @@ namespace std
try_lock()
{
if (!_M_device)
- __throw_system_error((int)errc::operation_not_permitted);
- else if (_M_owns)
- __throw_system_error((int)errc::resource_deadlock_would_occur);
- else
+ __throw_system_error((int)errc::operation_not_permitted);
+ else if (_M_owns)
+ __throw_system_error((int)errc::resource_deadlock_would_occur);
+ else
{
- _M_owns = _M_device->try_lock();
+ _M_owns = _M_device->try_lock();
return _M_owns;
}
}
template<typename _Clock, typename _Duration>
- bool
- try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
- {
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ {
if (!_M_device)
__throw_system_error((int)errc::operation_not_permitted);
else if (_M_owns)
@@ -527,11 +531,11 @@ namespace std
return _M_owns;
}
}
-
+
template<typename _Rep, typename _Period>
bool
try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
- {
+ {
if (!_M_device)
__throw_system_error((int)errc::operation_not_permitted);
else if (_M_owns)
@@ -546,15 +550,15 @@ namespace std
void
unlock()
{
- if (!_M_owns)
- __throw_system_error((int)errc::operation_not_permitted);
- else if (_M_device)
- {
- _M_device->unlock();
- _M_owns = false;
- }
+ if (!_M_owns)
+ __throw_system_error((int)errc::operation_not_permitted);
+ else if (_M_device)
+ {
+ _M_device->unlock();
+ _M_owns = false;
+ }
}
-
+
void
swap(unique_lock&& __u)
{
@@ -606,30 +610,30 @@ namespace std
struct __unlock_impl
{
template<typename... _Lock>
- static void
- __do_unlock(tuple<_Lock&...>& __locks)
- {
+ static void
+ __do_unlock(tuple<_Lock&...>& __locks)
+ {
std::get<_Idx>(__locks).unlock();
__unlock_impl<_Idx - 1>::__do_unlock(__locks);
}
};
-
+
template<>
struct __unlock_impl<-1>
{
template<typename... _Lock>
- static void
- __do_unlock(tuple<_Lock&...>&)
- { }
+ static void
+ __do_unlock(tuple<_Lock&...>&)
+ { }
};
template<int _Idx, bool _Continue = true>
struct __try_lock_impl
{
template<typename... _Lock>
- static int
- __do_try_lock(tuple<_Lock&...>& __locks)
- {
+ static int
+ __do_try_lock(tuple<_Lock&...>& __locks)
+ {
if(std::get<_Idx>(__locks).try_lock())
{
return __try_lock_impl<_Idx + 1,
@@ -642,14 +646,14 @@ namespace std
}
}
};
-
+
template<int _Idx>
struct __try_lock_impl<_Idx, false>
{
template<typename... _Lock>
- static int
- __do_try_lock(tuple<_Lock&...>& __locks)
- {
+ static int
+ __do_try_lock(tuple<_Lock&...>& __locks)
+ {
if(std::get<_Idx>(__locks).try_lock())
return -1;
else
@@ -659,12 +663,12 @@ namespace std
}
}
};
-
+
/** @brief Generic try_lock.
* @param __l1 Meets Mutex requirements (try_lock() may throw).
* @param __l2 Meets Mutex requirements (try_lock() may throw).
* @param __l3 Meets Mutex requirements (try_lock() may throw).
- * @return Returns -1 if all try_lock() calls return true. Otherwise returns
+ * @return Returns -1 if all try_lock() calls return true. Otherwise returns
* a 0-based index corresponding to the argument that returned false.
* @post Either all arguments are locked, or none will be.
*
@@ -685,23 +689,23 @@ namespace std
/// once_flag
struct once_flag
{
+ private:
typedef __gthread_once_t __native_type;
+ __native_type _M_once;
+ public:
once_flag()
{
- __gthread_once_t __tmp = __GTHREAD_ONCE_INIT;
+ __native_type __tmp = __GTHREAD_ONCE_INIT;
_M_once = __tmp;
}
-
+
once_flag(const once_flag&) = delete;
once_flag& operator=(const once_flag&) = delete;
template<typename _Callable, typename... _Args>
friend void
call_once(once_flag& __once, _Callable __f, _Args&&... __args);
-
- private:
- __native_type _M_once;
};
#ifdef _GLIBCXX_HAVE_TLS
@@ -709,7 +713,7 @@ namespace std
extern __thread void (*__once_call)();
template<typename _Callable>
- inline void
+ inline void
__once_call_impl()
{
(*(_Callable*)__once_callable)();
@@ -736,10 +740,10 @@ namespace std
__functor_lock.lock();
__once_functor = bind(__f, __args...);
#endif
-
+
int __e = __gthread_once(&(__once._M_once), &__once_proxy);
-#ifndef _GLIBCXX_HAVE_TLS
+#ifndef _GLIBCXX_HAVE_TLS
if (__functor_lock)
__functor_lock.unlock();
#endif
OpenPOWER on IntegriCloud