system_error (system_category): To system_category().
[gcc.git] / libstdc++-v3 / include / std / mutex
1 // <mutex> -*- C++ -*-
2
3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 2, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16
17 // You should have received a copy of the GNU General Public License
18 // along with this library; see the file COPYING. If not, write to
19 // the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 // Boston, MA 02110-1301, USA.
21
22 // As a special exception, you may use this file as part of a free software
23 // library without restriction. Specifically, if other files instantiate
24 // templates or use macros or inline functions from this file, or you compile
25 // this file and link it with other files to produce an executable, this
26 // file does not by itself cause the resulting executable to be covered by
27 // the GNU General Public License. This exception does not however
28 // invalidate any other reasons why the executable file might be covered by
29 // the GNU General Public License.
30
31 /** @file mutex
32 * This is a Standard C++ Library header.
33 */
34
35 #ifndef _GLIBCXX_MUTEX
36 #define _GLIBCXX_MUTEX 1
37
38 #pragma GCC system_header
39
40 #ifndef __GXX_EXPERIMENTAL_CXX0X__
41 # include <c++0x_warning.h>
42 #else
43
44 #include <tuple>
45 #include <cstddef>
46 #include <chrono>
47 #include <exception>
48 #include <type_traits>
49 #include <functional>
50 #include <system_error>
51 #include <bits/functexcept.h>
52 #include <bits/gthr.h>
53 #include <bits/move.h> // for std::swap
54
55 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
56
57 namespace std
58 {
59 /// mutex
60 class mutex
61 {
62 typedef __gthread_mutex_t __native_type;
63 __native_type _M_mutex;
64
65 public:
66 typedef __native_type* native_handle_type;
67
68 mutex()
69 {
70 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
71 #ifdef __GTHREAD_MUTEX_INIT
72 __native_type __tmp = __GTHREAD_MUTEX_INIT;
73 _M_mutex = __tmp;
74 #else
75 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
76 #endif
77 }
78
79 mutex(const mutex&) = delete;
80 mutex& operator=(const mutex&) = delete;
81
82 void
83 lock()
84 {
85 int __e = __gthread_mutex_lock(&_M_mutex);
86
87 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
88 if (__e)
89 __throw_system_error(__e);
90 }
91
92 bool
93 try_lock()
94 {
95 // XXX EINVAL, EAGAIN, EBUSY
96 return !__gthread_mutex_trylock(&_M_mutex);
97 }
98
99 void
100 unlock()
101 {
102 // XXX EINVAL, EAGAIN, EPERM
103 __gthread_mutex_unlock(&_M_mutex);
104 }
105
106 native_handle_type
107 native_handle()
108 { return &_M_mutex; }
109 };
110
111 /// recursive_mutex
112 class recursive_mutex
113 {
114 typedef __gthread_recursive_mutex_t __native_type;
115 __native_type _M_mutex;
116
117 public:
118 typedef __native_type* native_handle_type;
119
120 recursive_mutex()
121 {
122 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
123 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
124 __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
125 _M_mutex = __tmp;
126 #else
127 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
128 #endif
129 }
130
131 recursive_mutex(const recursive_mutex&) = delete;
132 recursive_mutex& operator=(const recursive_mutex&) = delete;
133
134 void
135 lock()
136 {
137 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
138
139 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
140 if (__e)
141 __throw_system_error(__e);
142 }
143
144 bool
145 try_lock()
146 {
147 // XXX EINVAL, EAGAIN, EBUSY
148 return !__gthread_recursive_mutex_trylock(&_M_mutex);
149 }
150
151 void
152 unlock()
153 {
154 // XXX EINVAL, EAGAIN, EBUSY
155 __gthread_recursive_mutex_unlock(&_M_mutex);
156 }
157
158 native_handle_type
159 native_handle()
160 { return &_M_mutex; }
161 };
162
163 /// timed_mutex
164 class timed_mutex
165 {
166 typedef __gthread_mutex_t __native_type;
167
168 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
169 typedef chrono::monotonic_clock __clock_t;
170 #else
171 typedef chrono::high_resolution_clock __clock_t;
172 #endif
173
174 __native_type _M_mutex;
175
176 public:
177 typedef __native_type* native_handle_type;
178
179 timed_mutex()
180 {
181 #ifdef __GTHREAD_MUTEX_INIT
182 __native_type __tmp = __GTHREAD_MUTEX_INIT;
183 _M_mutex = __tmp;
184 #else
185 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
186 #endif
187 }
188
189 timed_mutex(const timed_mutex&) = delete;
190 timed_mutex& operator=(const timed_mutex&) = delete;
191
192 void
193 lock()
194 {
195 int __e = __gthread_mutex_lock(&_M_mutex);
196
197 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
198 if (__e)
199 __throw_system_error(__e);
200 }
201
202 bool
203 try_lock()
204 {
205 // XXX EINVAL, EAGAIN, EBUSY
206 return !__gthread_mutex_trylock(&_M_mutex);
207 }
208
209 template <class _Rep, class _Period>
210 bool
211 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
212 { return __try_lock_for_impl(__rtime); }
213
214 template <class _Clock, class _Duration>
215 bool
216 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
217 {
218 chrono::time_point<_Clock, chrono::seconds> __s =
219 chrono::time_point_cast<chrono::seconds>(__atime);
220
221 chrono::nanoseconds __ns =
222 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
223
224 __gthread_time_t __ts = {
225 static_cast<std::time_t>(__s.time_since_epoch().count()),
226 static_cast<long>(__ns.count())
227 };
228
229 return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
230 }
231
232 void
233 unlock()
234 {
235 // XXX EINVAL, EAGAIN, EBUSY
236 __gthread_mutex_unlock(&_M_mutex);
237 }
238
239 native_handle_type
240 native_handle()
241 { return &_M_mutex; }
242
243 private:
244 template<typename _Rep, typename _Period>
245 typename enable_if<
246 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
247 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
248 {
249 __clock_t::time_point __atime = __clock_t::now()
250 + chrono::duration_cast<__clock_t::duration>(__rtime);
251
252 return try_lock_until(__atime);
253 }
254
255 template <typename _Rep, typename _Period>
256 typename enable_if<
257 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
258 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
259 {
260 __clock_t::time_point __atime = __clock_t::now()
261 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
262
263 return try_lock_until(__atime);
264 }
265 };
266
267 /// recursive_timed_mutex
268 class recursive_timed_mutex
269 {
270 typedef __gthread_recursive_mutex_t __native_type;
271
272 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
273 typedef chrono::monotonic_clock __clock_t;
274 #else
275 typedef chrono::high_resolution_clock __clock_t;
276 #endif
277
278 __native_type _M_mutex;
279
280 public:
281 typedef __native_type* native_handle_type;
282
283 recursive_timed_mutex()
284 {
285 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
286 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
287 __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
288 _M_mutex = __tmp;
289 #else
290 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
291 #endif
292 }
293
294 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
295 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
296
297 void
298 lock()
299 {
300 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
301
302 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
303 if (__e)
304 __throw_system_error(__e);
305 }
306
307 bool
308 try_lock()
309 {
310 // XXX EINVAL, EAGAIN, EBUSY
311 return !__gthread_recursive_mutex_trylock(&_M_mutex);
312 }
313
314 template <class _Rep, class _Period>
315 bool
316 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
317 { return __try_lock_for_impl(__rtime); }
318
319 template <class _Clock, class _Duration>
320 bool
321 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
322 {
323 chrono::time_point<_Clock, chrono::seconds> __s =
324 chrono::time_point_cast<chrono::seconds>(__atime);
325
326 chrono::nanoseconds __ns =
327 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
328
329 __gthread_time_t __ts = {
330 static_cast<std::time_t>(__s.time_since_epoch().count()),
331 static_cast<long>(__ns.count())
332 };
333
334 return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
335 }
336
337 void
338 unlock()
339 {
340 // XXX EINVAL, EAGAIN, EBUSY
341 __gthread_recursive_mutex_unlock(&_M_mutex);
342 }
343
344 native_handle_type
345 native_handle()
346 { return &_M_mutex; }
347
348 private:
349 template<typename _Rep, typename _Period>
350 typename enable_if<
351 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
352 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
353 {
354 __clock_t::time_point __atime = __clock_t::now()
355 + chrono::duration_cast<__clock_t::duration>(__rtime);
356
357 return try_lock_until(__atime);
358 }
359
360 template <typename _Rep, typename _Period>
361 typename enable_if<
362 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
363 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
364 {
365 __clock_t::time_point __atime = __clock_t::now()
366 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
367
368 return try_lock_until(__atime);
369 }
370 };
371
372 /// Do not acquire ownership of the mutex.
373 struct defer_lock_t { };
374
375 /// Try to acquire ownership of the mutex without blocking.
376 struct try_to_lock_t { };
377
378 /// Assume the calling thread has already obtained mutex ownership
379 /// and manage it.
380 struct adopt_lock_t { };
381
382 extern const defer_lock_t defer_lock;
383 extern const try_to_lock_t try_to_lock;
384 extern const adopt_lock_t adopt_lock;
385
386 /// Thrown to indicate errors with lock operations.
387 class lock_error : public exception
388 {
389 public:
390 virtual const char*
391 what() const throw();
392 };
393
394 /// @brief Scoped lock idiom.
395 // Acquire the mutex here with a constructor call, then release with
396 // the destructor call in accordance with RAII style.
397 template<typename _Mutex>
398 class lock_guard
399 {
400 public:
401 typedef _Mutex mutex_type;
402
403 explicit lock_guard(mutex_type& __m) : _M_device(__m)
404 { _M_device.lock(); }
405
406 lock_guard(mutex_type& __m, adopt_lock_t __a) : _M_device(__m)
407 { _M_device.lock(); }
408
409 ~lock_guard()
410 { _M_device.unlock(); }
411
412 lock_guard(const lock_guard&) = delete;
413 lock_guard& operator=(const lock_guard&) = delete;
414
415 private:
416 mutex_type& _M_device;
417 };
418
419 /// unique_lock
420 template<typename _Mutex>
421 class unique_lock
422 {
423 public:
424 typedef _Mutex mutex_type;
425
426 unique_lock()
427 : _M_device(0), _M_owns(false)
428 { }
429
430 explicit unique_lock(mutex_type& __m)
431 : _M_device(&__m), _M_owns(false)
432 {
433 lock();
434 _M_owns = true;
435 }
436
437 unique_lock(mutex_type& __m, defer_lock_t)
438 : _M_device(&__m), _M_owns(false)
439 { }
440
441 unique_lock(mutex_type& __m, try_to_lock_t)
442 : _M_device(&__m), _M_owns(_M_device->try_lock())
443 { }
444
445 unique_lock(mutex_type& __m, adopt_lock_t)
446 : _M_device(&__m), _M_owns(true)
447 {
448 // XXX calling thread owns mutex
449 }
450
451 template<typename _Clock, typename _Duration>
452 unique_lock(mutex_type& __m,
453 const chrono::time_point<_Clock, _Duration>& __atime)
454 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
455 { }
456
457 template<typename _Rep, typename _Period>
458 unique_lock(mutex_type& __m,
459 const chrono::duration<_Rep, _Period>& __rtime)
460 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
461 { }
462
463 ~unique_lock()
464 {
465 if (_M_owns)
466 unlock();
467 }
468
469 unique_lock(const unique_lock&) = delete;
470 unique_lock& operator=(const unique_lock&) = delete;
471
472 unique_lock(unique_lock&& __u)
473 : _M_device(__u._M_device), _M_owns(__u._M_owns)
474 {
475 __u._M_device = 0;
476 __u._M_owns = false;
477 }
478
479 unique_lock& operator=(unique_lock&& __u)
480 {
481 if(_M_owns)
482 unlock();
483
484 unique_lock(std::move(__u)).swap(*this);
485
486 __u._M_device = 0;
487 __u._M_owns = false;
488
489 return *this;
490 }
491
492 void
493 lock()
494 {
495 if (!_M_device)
496 __throw_system_error(int(errc::operation_not_permitted));
497 else if (_M_owns)
498 __throw_system_error(int(errc::resource_deadlock_would_occur));
499 else
500 {
501 _M_device->lock();
502 _M_owns = true;
503 }
504 }
505
506 bool
507 try_lock()
508 {
509 if (!_M_device)
510 __throw_system_error(int(errc::operation_not_permitted));
511 else if (_M_owns)
512 __throw_system_error(int(errc::resource_deadlock_would_occur));
513 else
514 {
515 _M_owns = _M_device->try_lock();
516 return _M_owns;
517 }
518 }
519
520 template<typename _Clock, typename _Duration>
521 bool
522 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
523 {
524 if (!_M_device)
525 __throw_system_error(int(errc::operation_not_permitted));
526 else if (_M_owns)
527 __throw_system_error(int(errc::resource_deadlock_would_occur));
528 else
529 {
530 _M_owns = _M_device->try_lock_until(__atime);
531 return _M_owns;
532 }
533 }
534
535 template<typename _Rep, typename _Period>
536 bool
537 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
538 {
539 if (!_M_device)
540 __throw_system_error(int(errc::operation_not_permitted));
541 else if (_M_owns)
542 __throw_system_error(int(errc::resource_deadlock_would_occur));
543 else
544 {
545 _M_owns = _M_device->try_lock_for(__rtime);
546 return _M_owns;
547 }
548 }
549
550 void
551 unlock()
552 {
553 if (!_M_owns)
554 __throw_system_error(int(errc::operation_not_permitted));
555 else if (_M_device)
556 {
557 _M_device->unlock();
558 _M_owns = false;
559 }
560 }
561
562 void
563 swap(unique_lock&& __u)
564 {
565 std::swap(_M_device, __u._M_device);
566 std::swap(_M_owns, __u._M_owns);
567 }
568
569 mutex_type*
570 release()
571 {
572 mutex_type* __ret = _M_device;
573 _M_device = 0;
574 _M_owns = false;
575 return __ret;
576 }
577
578 bool
579 owns_lock() const
580 { return _M_owns; }
581
582 /* explicit */ operator bool () const
583 { return owns_lock(); }
584
585 mutex_type*
586 mutex() const
587 { return _M_device; }
588
589 private:
590 mutex_type* _M_device;
591 bool _M_owns; // XXX use atomic_bool
592 };
593
594 template<typename _Mutex>
595 inline void
596 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
597 { __x.swap(__y); }
598
599 template<typename _Mutex>
600 inline void
601 swap(unique_lock<_Mutex>&& __x, unique_lock<_Mutex>& __y)
602 { __x.swap(__y); }
603
604 template<typename _Mutex>
605 inline void
606 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>&& __y)
607 { __x.swap(__y); }
608
609 template<int _Idx>
610 struct __unlock_impl
611 {
612 template<typename... _Lock>
613 static void
614 __do_unlock(tuple<_Lock&...>& __locks)
615 {
616 std::get<_Idx>(__locks).unlock();
617 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
618 }
619 };
620
621 template<>
622 struct __unlock_impl<-1>
623 {
624 template<typename... _Lock>
625 static void
626 __do_unlock(tuple<_Lock&...>&)
627 { }
628 };
629
630 template<int _Idx, bool _Continue = true>
631 struct __try_lock_impl
632 {
633 template<typename... _Lock>
634 static int
635 __do_try_lock(tuple<_Lock&...>& __locks)
636 {
637 if(std::get<_Idx>(__locks).try_lock())
638 {
639 return __try_lock_impl<_Idx + 1,
640 _Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
641 }
642 else
643 {
644 __unlock_impl<_Idx>::__do_unlock(__locks);
645 return _Idx;
646 }
647 }
648 };
649
650 template<int _Idx>
651 struct __try_lock_impl<_Idx, false>
652 {
653 template<typename... _Lock>
654 static int
655 __do_try_lock(tuple<_Lock&...>& __locks)
656 {
657 if(std::get<_Idx>(__locks).try_lock())
658 return -1;
659 else
660 {
661 __unlock_impl<_Idx>::__do_unlock(__locks);
662 return _Idx;
663 }
664 }
665 };
666
667 /** @brief Generic try_lock.
668 * @param __l1 Meets Mutex requirements (try_lock() may throw).
669 * @param __l2 Meets Mutex requirements (try_lock() may throw).
670 * @param __l3 Meets Mutex requirements (try_lock() may throw).
671 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
672 * a 0-based index corresponding to the argument that returned false.
673 * @post Either all arguments are locked, or none will be.
674 *
675 * Sequentially calls try_lock() on each argument.
676 */
677 template<typename _Lock1, typename _Lock2, typename... _Lock3>
678 int
679 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
680 {
681 tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
682 return __try_lock_impl<0>::__do_try_lock(__locks);
683 }
684
685 template<typename _L1, typename _L2, typename ..._L3>
686 void
687 lock(_L1&, _L2&, _L3&...);
688
689 /// once_flag
690 struct once_flag
691 {
692 private:
693 typedef __gthread_once_t __native_type;
694 __native_type _M_once;
695
696 public:
697 once_flag()
698 {
699 __native_type __tmp = __GTHREAD_ONCE_INIT;
700 _M_once = __tmp;
701 }
702
703 once_flag(const once_flag&) = delete;
704 once_flag& operator=(const once_flag&) = delete;
705
706 template<typename _Callable, typename... _Args>
707 friend void
708 call_once(once_flag& __once, _Callable __f, _Args&&... __args);
709 };
710
711 #ifdef _GLIBCXX_HAVE_TLS
712 extern __thread void* __once_callable;
713 extern __thread void (*__once_call)();
714
715 template<typename _Callable>
716 inline void
717 __once_call_impl()
718 {
719 (*(_Callable*)__once_callable)();
720 }
721 #else
722 extern function<void()> __once_functor;
723
724 extern unique_lock<mutex>&
725 __get_once_functor_lock();
726 #endif
727
728 extern "C" void __once_proxy();
729
730 template<typename _Callable, typename... _Args>
731 void
732 call_once(once_flag& __once, _Callable __f, _Args&&... __args)
733 {
734 #ifdef _GLIBCXX_HAVE_TLS
735 auto __bound_functor = bind(__f, __args...);
736 __once_callable = &__bound_functor;
737 __once_call = &__once_call_impl<decltype(__bound_functor)>;
738 #else
739 unique_lock<mutex>& __functor_lock = __get_once_functor_lock();
740 __functor_lock.lock();
741 __once_functor = bind(__f, __args...);
742 #endif
743
744 int __e = __gthread_once(&(__once._M_once), &__once_proxy);
745
746 #ifndef _GLIBCXX_HAVE_TLS
747 if (__functor_lock)
748 __functor_lock.unlock();
749 #endif
750
751 if (__e)
752 __throw_system_error(__e);
753 }
754 }
755
756 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
757
758 #endif // __GXX_EXPERIMENTAL_CXX0X__
759
760 #endif // _GLIBCXX_MUTEX