#ifdef _GLIBCXX_USE_C99_STDINT_TR1
#ifdef _GLIBCXX_HAS_GTHREADS
+#if __cplusplus > 201402L
+// TODO: #define __cpp_lib_shared_mutex 201505
+ class shared_mutex;
+#endif
+
#define __cpp_lib_shared_timed_mutex 201402
+ class shared_timed_mutex;
- /// shared_timed_mutex
- class shared_timed_mutex
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
+ /// A shared mutex type implemented using pthread_rwlock_t.
+ class __shared_mutex_pthread
{
-#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
- typedef chrono::system_clock __clock_t;
+ friend class shared_timed_mutex;
#ifdef PTHREAD_RWLOCK_INITIALIZER
pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
public:
- shared_timed_mutex() = default;
- ~shared_timed_mutex() = default;
+ __shared_mutex_pthread() = default;
+ ~__shared_mutex_pthread() = default;
#else
pthread_rwlock_t _M_rwlock;
public:
- shared_timed_mutex()
+ __shared_mutex_pthread()
{
int __ret = pthread_rwlock_init(&_M_rwlock, NULL);
if (__ret == ENOMEM)
_GLIBCXX_DEBUG_ASSERT(__ret == 0);
}
- ~shared_timed_mutex()
+ ~__shared_mutex_pthread()
{
int __ret __attribute((__unused__)) = pthread_rwlock_destroy(&_M_rwlock);
// Errors not handled: EBUSY, EINVAL
}
#endif
- shared_timed_mutex(const shared_timed_mutex&) = delete;
- shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
-
- // Exclusive ownership
+ __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
+ __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
void
lock()
return true;
}
- template<typename _Rep, typename _Period>
- bool
- try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
- {
- return try_lock_until(__clock_t::now() + __rel_time);
- }
-
- template<typename _Duration>
- bool
- try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
- {
- auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
- auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
-
- __gthread_time_t __ts =
- {
- static_cast<std::time_t>(__s.time_since_epoch().count()),
- static_cast<long>(__ns.count())
- };
-
- int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
- // On self-deadlock, we just fail to acquire the lock. Technically,
- // the program violated the precondition.
- if (__ret == ETIMEDOUT || __ret == EDEADLK)
- return false;
- // Errors not handled: EINVAL
- _GLIBCXX_DEBUG_ASSERT(__ret == 0);
- return true;
- }
-
- template<typename _Clock, typename _Duration>
- bool
- try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
- {
- // DR 887 - Sync unknown clock to known clock.
- const typename _Clock::time_point __c_entry = _Clock::now();
- const __clock_t::time_point __s_entry = __clock_t::now();
- const auto __delta = __abs_time - __c_entry;
- const auto __s_atime = __s_entry + __delta;
- return try_lock_until(__s_atime);
- }
-
void
unlock()
{
return true;
}
- template<typename _Rep, typename _Period>
- bool
- try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
- {
- return try_lock_shared_until(__clock_t::now() + __rel_time);
- }
-
- template<typename _Duration>
- bool
- try_lock_shared_until(const chrono::time_point<__clock_t,
- _Duration>& __atime)
- {
- auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
- auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
-
- __gthread_time_t __ts =
- {
- static_cast<std::time_t>(__s.time_since_epoch().count()),
- static_cast<long>(__ns.count())
- };
-
- int __ret;
- // Unlike for lock(), we are not allowed to throw an exception so if
- // the maximum number of read locks has been exceeded, or we would
- // deadlock, we just try to acquire the lock again (and will time out
- // eventually).
- // In cases where we would exceed the maximum number of read locks
- // throughout the whole time until the timeout, we will fail to
- // acquire the lock even if it would be logically free; however, this
- // is allowed by the standard, and we made a "strong effort"
- // (see C++14 30.4.1.4p26).
- // For cases where the implementation detects a deadlock we
- // intentionally block and timeout so that an early return isn't
- // mistaken for a spurious failure, which might help users realise
- // there is a deadlock.
- do
- __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
- while (__ret == EAGAIN || __ret == EDEADLK);
- if (__ret == ETIMEDOUT)
- return false;
- // Errors not handled: EINVAL
- _GLIBCXX_DEBUG_ASSERT(__ret == 0);
- return true;
- }
-
- template<typename _Clock, typename _Duration>
- bool
- try_lock_shared_until(const chrono::time_point<_Clock,
- _Duration>& __abs_time)
- {
- // DR 887 - Sync unknown clock to known clock.
- const typename _Clock::time_point __c_entry = _Clock::now();
- const __clock_t::time_point __s_entry = __clock_t::now();
- const auto __delta = __abs_time - __c_entry;
- const auto __s_atime = __s_entry + __delta;
- return try_lock_shared_until(__s_atime);
- }
-
void
unlock_shared()
{
unlock();
}
-#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+ void* native_handle() { return &_M_rwlock; }
+ };
+#endif
- // Must use the same clock as condition_variable
- typedef chrono::system_clock __clock_t;
+#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+ /// A shared mutex type implemented using std::condition_variable.
+ class __shared_mutex_cv
+ {
+ friend class shared_timed_mutex;
// Based on Howard Hinnant's reference implementation from N2406.
unsigned _M_readers() const { return _M_state & _S_max_readers; }
public:
- shared_timed_mutex() : _M_state(0) {}
+ __shared_mutex_cv() : _M_state(0) {}
- ~shared_timed_mutex()
+ ~__shared_mutex_cv()
{
_GLIBCXX_DEBUG_ASSERT( _M_state == 0 );
}
- shared_timed_mutex(const shared_timed_mutex&) = delete;
- shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
+ __shared_mutex_cv(const __shared_mutex_cv&) = delete;
+ __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
// Exclusive ownership
return false;
}
- template<typename _Rep, typename _Period>
- bool
- try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
- {
- return try_lock_until(__clock_t::now() + __rel_time);
- }
-
- template<typename _Clock, typename _Duration>
- bool
- try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
- {
- unique_lock<mutex> __lk(_M_mut);
- if (!_M_gate1.wait_until(__lk, __abs_time,
- [=]{ return !_M_write_entered(); }))
- {
- return false;
- }
- _M_state |= _S_write_entered;
- if (!_M_gate2.wait_until(__lk, __abs_time,
- [=]{ return _M_readers() == 0; }))
- {
- _M_state ^= _S_write_entered;
- // Wake all threads blocked while the write-entered flag was set.
- _M_gate1.notify_all();
- return false;
- }
- return true;
- }
-
void
unlock()
{
return false;
}
- template<typename _Rep, typename _Period>
- bool
- try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
- {
- return try_lock_shared_until(__clock_t::now() + __rel_time);
- }
-
- template <typename _Clock, typename _Duration>
- bool
- try_lock_shared_until(const chrono::time_point<_Clock,
- _Duration>& __abs_time)
- {
- unique_lock<mutex> __lk(_M_mut);
- if (!_M_gate1.wait_until(__lk, __abs_time,
- [=]{ return _M_state < _S_max_readers; }))
- {
- return false;
- }
- ++_M_state;
- return true;
- }
-
void
unlock_shared()
{
_M_gate1.notify_one();
}
}
+ };
+#endif
+
+#if __cplusplus > 201402L
+ /// The standard shared mutex type.
+ class shared_mutex
+ {
+ public:
+ shared_mutex() = default;
+ ~shared_mutex() = default;
+
+ shared_mutex(const shared_mutex&) = delete;
+ shared_mutex& operator=(const shared_mutex&) = delete;
+
+ // Exclusive ownership
+
+ void lock() { _M_impl.lock(); }
+ bool try_lock() { return _M_impl.try_lock(); }
+ void unlock() { _M_impl.try_lock(); }
+
+ // Shared ownership
+
+ void lock_shared() { _M_impl.lock_shared(); }
+ bool try_lock_shared() { return _M_impl.try_lock_shared(); }
+ void unlock_shared() { _M_impl.unlock_shared(); }
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
+ typedef void* native_handle_type;
+ native_handle_type native_handle() { return _M_impl.native_handle(); }
+
+ private:
+ __shared_mutex_pthread _M_impl;
+#else
+ private:
+ __shared_mutex_cv _M_impl;
+#endif
+ };
+#endif // C++17
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+ using __shared_timed_mutex_base = __shared_mutex_pthread;
+#else
+ using __shared_timed_mutex_base = __shared_mutex_cv;
+#endif
+
+ /// The standard shared timed mutex type.
+ class shared_timed_mutex
+ : private __shared_timed_mutex_base
+ {
+ using _Base = __shared_timed_mutex_base;
+
+ // Must use the same clock as condition_variable for __shared_mutex_cv.
+ typedef chrono::system_clock __clock_t;
+
+ public:
+ shared_timed_mutex() = default;
+ ~shared_timed_mutex() = default;
+
+ shared_timed_mutex(const shared_timed_mutex&) = delete;
+ shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
+
+ // Exclusive ownership
+
+ void lock() { _Base::lock(); }
+ bool try_lock() { return _Base::try_lock(); }
+ void unlock() { _Base::unlock(); }
+
+ template<typename _Rep, typename _Period>
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
+ {
+ return try_lock_until(__clock_t::now() + __rel_time);
+ }
+
+ // Shared ownership
+
+ void lock_shared() { _Base::lock_shared(); }
+ bool try_lock_shared() { return _Base::try_lock_shared(); }
+ void unlock_shared() { _Base::unlock_shared(); }
+
+ template<typename _Rep, typename _Period>
+ bool
+ try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rel_time)
+ {
+ return try_lock_shared_until(__clock_t::now() + __rel_time);
+ }
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+
+ // Exclusive ownership
+
+ template<typename _Duration>
+ bool
+ try_lock_until(const chrono::time_point<__clock_t, _Duration>& __atime)
+ {
+ auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+ auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast<std::time_t>(__s.time_since_epoch().count()),
+ static_cast<long>(__ns.count())
+ };
+
+ int __ret = pthread_rwlock_timedwrlock(&_M_rwlock, &__ts);
+ // On self-deadlock, we just fail to acquire the lock. Technically,
+ // the program violated the precondition.
+ if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ return false;
+ // Errors not handled: EINVAL
+ _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+ return true;
+ }
+
+ template<typename _Clock, typename _Duration>
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
+ {
+ // DR 887 - Sync unknown clock to known clock.
+ const typename _Clock::time_point __c_entry = _Clock::now();
+ const __clock_t::time_point __s_entry = __clock_t::now();
+ const auto __delta = __abs_time - __c_entry;
+ const auto __s_atime = __s_entry + __delta;
+ return try_lock_until(__s_atime);
+ }
+
+ // Shared ownership
+
+ template<typename _Duration>
+ bool
+ try_lock_shared_until(const chrono::time_point<__clock_t,
+ _Duration>& __atime)
+ {
+ auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
+ auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast<std::time_t>(__s.time_since_epoch().count()),
+ static_cast<long>(__ns.count())
+ };
+
+ int __ret;
+ // Unlike for lock(), we are not allowed to throw an exception so if
+ // the maximum number of read locks has been exceeded, or we would
+ // deadlock, we just try to acquire the lock again (and will time out
+ // eventually).
+ // In cases where we would exceed the maximum number of read locks
+ // throughout the whole time until the timeout, we will fail to
+ // acquire the lock even if it would be logically free; however, this
+ // is allowed by the standard, and we made a "strong effort"
+ // (see C++14 30.4.1.4p26).
+ // For cases where the implementation detects a deadlock we
+ // intentionally block and timeout so that an early return isn't
+ // mistaken for a spurious failure, which might help users realise
+ // there is a deadlock.
+ do
+ __ret = pthread_rwlock_timedrdlock(&_M_rwlock, &__ts);
+ while (__ret == EAGAIN || __ret == EDEADLK);
+ if (__ret == ETIMEDOUT)
+ return false;
+ // Errors not handled: EINVAL
+ _GLIBCXX_DEBUG_ASSERT(__ret == 0);
+ return true;
+ }
+
+ template<typename _Clock, typename _Duration>
+ bool
+ try_lock_shared_until(const chrono::time_point<_Clock,
+ _Duration>& __abs_time)
+ {
+ // DR 887 - Sync unknown clock to known clock.
+ const typename _Clock::time_point __c_entry = _Clock::now();
+ const __clock_t::time_point __s_entry = __clock_t::now();
+ const auto __delta = __abs_time - __c_entry;
+ const auto __s_atime = __s_entry + __delta;
+ return try_lock_shared_until(__s_atime);
+ }
+
+#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+
+ // Exclusive ownership
+
+ template<typename _Clock, typename _Duration>
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
+ {
+ unique_lock<mutex> __lk(_M_mut);
+ if (!_M_gate1.wait_until(__lk, __abs_time,
+ [=]{ return !_M_write_entered(); }))
+ {
+ return false;
+ }
+ _M_state |= _S_write_entered;
+ if (!_M_gate2.wait_until(__lk, __abs_time,
+ [=]{ return _M_readers() == 0; }))
+ {
+ _M_state ^= _S_write_entered;
+ // Wake all threads blocked while the write-entered flag was set.
+ _M_gate1.notify_all();
+ return false;
+ }
+ return true;
+ }
+
+ // Shared ownership
+
+ template <typename _Clock, typename _Duration>
+ bool
+ try_lock_shared_until(const chrono::time_point<_Clock,
+ _Duration>& __abs_time)
+ {
+ unique_lock<mutex> __lk(_M_mut);
+ if (!_M_gate1.wait_until(__lk, __abs_time,
+ [=]{ return _M_state < _S_max_readers; }))
+ {
+ return false;
+ }
+ ++_M_state;
+ return true;
+ }
+
#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
};
#endif // _GLIBCXX_HAS_GTHREADS