]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/std/mutex
c++config (std::size_t, [...]): Provide typedefs.
[thirdparty/gcc.git] / libstdc++-v3 / include / std / mutex
1 // <mutex> -*- C++ -*-
2
3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25
26 /** @file mutex
27 * This is a Standard C++ Library header.
28 */
29
30 #ifndef _GLIBCXX_MUTEX
31 #define _GLIBCXX_MUTEX 1
32
33 #pragma GCC system_header
34
35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
36 # include <bits/c++0x_warning.h>
37 #else
38
39 #include <tuple>
40 #include <chrono>
41 #include <exception>
42 #include <type_traits>
43 #include <functional>
44 #include <system_error>
45 #include <bits/functexcept.h>
46 #include <bits/gthr.h>
47 #include <bits/move.h> // for std::swap
48
49 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
50
51 namespace std
52 {
53 /**
54 * @defgroup mutexes Mutexes
55 * @ingroup concurrency
56 *
57 * Classes for mutex support.
58 * @{
59 */
60
61 /// mutex
62 class mutex
63 {
64 typedef __gthread_mutex_t __native_type;
65 __native_type _M_mutex;
66
67 public:
68 typedef __native_type* native_handle_type;
69
70 mutex()
71 {
72 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
73 #ifdef __GTHREAD_MUTEX_INIT
74 __native_type __tmp = __GTHREAD_MUTEX_INIT;
75 _M_mutex = __tmp;
76 #else
77 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
78 #endif
79 }
80
81 mutex(const mutex&) = delete;
82 mutex& operator=(const mutex&) = delete;
83
84 void
85 lock()
86 {
87 int __e = __gthread_mutex_lock(&_M_mutex);
88
89 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
90 if (__e)
91 __throw_system_error(__e);
92 }
93
94 bool
95 try_lock()
96 {
97 // XXX EINVAL, EAGAIN, EBUSY
98 return !__gthread_mutex_trylock(&_M_mutex);
99 }
100
101 void
102 unlock()
103 {
104 // XXX EINVAL, EAGAIN, EPERM
105 __gthread_mutex_unlock(&_M_mutex);
106 }
107
108 native_handle_type
109 native_handle()
110 { return &_M_mutex; }
111 };
112
113 /// recursive_mutex
114 class recursive_mutex
115 {
116 typedef __gthread_recursive_mutex_t __native_type;
117 __native_type _M_mutex;
118
119 public:
120 typedef __native_type* native_handle_type;
121
122 recursive_mutex()
123 {
124 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
125 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
126 __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
127 _M_mutex = __tmp;
128 #else
129 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
130 #endif
131 }
132
133 recursive_mutex(const recursive_mutex&) = delete;
134 recursive_mutex& operator=(const recursive_mutex&) = delete;
135
136 void
137 lock()
138 {
139 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
140
141 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
142 if (__e)
143 __throw_system_error(__e);
144 }
145
146 bool
147 try_lock()
148 {
149 // XXX EINVAL, EAGAIN, EBUSY
150 return !__gthread_recursive_mutex_trylock(&_M_mutex);
151 }
152
153 void
154 unlock()
155 {
156 // XXX EINVAL, EAGAIN, EBUSY
157 __gthread_recursive_mutex_unlock(&_M_mutex);
158 }
159
160 native_handle_type
161 native_handle()
162 { return &_M_mutex; }
163 };
164
165 /// timed_mutex
166 class timed_mutex
167 {
168 typedef __gthread_mutex_t __native_type;
169
170 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
171 typedef chrono::monotonic_clock __clock_t;
172 #else
173 typedef chrono::high_resolution_clock __clock_t;
174 #endif
175
176 __native_type _M_mutex;
177
178 public:
179 typedef __native_type* native_handle_type;
180
181 timed_mutex()
182 {
183 #ifdef __GTHREAD_MUTEX_INIT
184 __native_type __tmp = __GTHREAD_MUTEX_INIT;
185 _M_mutex = __tmp;
186 #else
187 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
188 #endif
189 }
190
191 timed_mutex(const timed_mutex&) = delete;
192 timed_mutex& operator=(const timed_mutex&) = delete;
193
194 void
195 lock()
196 {
197 int __e = __gthread_mutex_lock(&_M_mutex);
198
199 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
200 if (__e)
201 __throw_system_error(__e);
202 }
203
204 bool
205 try_lock()
206 {
207 // XXX EINVAL, EAGAIN, EBUSY
208 return !__gthread_mutex_trylock(&_M_mutex);
209 }
210
211 template <class _Rep, class _Period>
212 bool
213 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
214 { return __try_lock_for_impl(__rtime); }
215
216 template <class _Clock, class _Duration>
217 bool
218 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
219 {
220 chrono::time_point<_Clock, chrono::seconds> __s =
221 chrono::time_point_cast<chrono::seconds>(__atime);
222
223 chrono::nanoseconds __ns =
224 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
225
226 __gthread_time_t __ts = {
227 static_cast<std::time_t>(__s.time_since_epoch().count()),
228 static_cast<long>(__ns.count())
229 };
230
231 return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
232 }
233
234 void
235 unlock()
236 {
237 // XXX EINVAL, EAGAIN, EBUSY
238 __gthread_mutex_unlock(&_M_mutex);
239 }
240
241 native_handle_type
242 native_handle()
243 { return &_M_mutex; }
244
245 private:
246 template<typename _Rep, typename _Period>
247 typename enable_if<
248 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
249 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
250 {
251 __clock_t::time_point __atime = __clock_t::now()
252 + chrono::duration_cast<__clock_t::duration>(__rtime);
253
254 return try_lock_until(__atime);
255 }
256
257 template <typename _Rep, typename _Period>
258 typename enable_if<
259 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
260 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
261 {
262 __clock_t::time_point __atime = __clock_t::now()
263 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
264
265 return try_lock_until(__atime);
266 }
267 };
268
269 /// recursive_timed_mutex
270 class recursive_timed_mutex
271 {
272 typedef __gthread_recursive_mutex_t __native_type;
273
274 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
275 typedef chrono::monotonic_clock __clock_t;
276 #else
277 typedef chrono::high_resolution_clock __clock_t;
278 #endif
279
280 __native_type _M_mutex;
281
282 public:
283 typedef __native_type* native_handle_type;
284
285 recursive_timed_mutex()
286 {
287 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
288 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
289 __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
290 _M_mutex = __tmp;
291 #else
292 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
293 #endif
294 }
295
296 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
297 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
298
299 void
300 lock()
301 {
302 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
303
304 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
305 if (__e)
306 __throw_system_error(__e);
307 }
308
309 bool
310 try_lock()
311 {
312 // XXX EINVAL, EAGAIN, EBUSY
313 return !__gthread_recursive_mutex_trylock(&_M_mutex);
314 }
315
316 template <class _Rep, class _Period>
317 bool
318 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
319 { return __try_lock_for_impl(__rtime); }
320
321 template <class _Clock, class _Duration>
322 bool
323 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
324 {
325 chrono::time_point<_Clock, chrono::seconds> __s =
326 chrono::time_point_cast<chrono::seconds>(__atime);
327
328 chrono::nanoseconds __ns =
329 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
330
331 __gthread_time_t __ts = {
332 static_cast<std::time_t>(__s.time_since_epoch().count()),
333 static_cast<long>(__ns.count())
334 };
335
336 return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
337 }
338
339 void
340 unlock()
341 {
342 // XXX EINVAL, EAGAIN, EBUSY
343 __gthread_recursive_mutex_unlock(&_M_mutex);
344 }
345
346 native_handle_type
347 native_handle()
348 { return &_M_mutex; }
349
350 private:
351 template<typename _Rep, typename _Period>
352 typename enable_if<
353 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
354 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
355 {
356 __clock_t::time_point __atime = __clock_t::now()
357 + chrono::duration_cast<__clock_t::duration>(__rtime);
358
359 return try_lock_until(__atime);
360 }
361
362 template <typename _Rep, typename _Period>
363 typename enable_if<
364 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
365 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
366 {
367 __clock_t::time_point __atime = __clock_t::now()
368 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
369
370 return try_lock_until(__atime);
371 }
372 };
373
374 /// Do not acquire ownership of the mutex.
375 struct defer_lock_t { };
376
377 /// Try to acquire ownership of the mutex without blocking.
378 struct try_to_lock_t { };
379
380 /// Assume the calling thread has already obtained mutex ownership
381 /// and manage it.
382 struct adopt_lock_t { };
383
384 extern const defer_lock_t defer_lock;
385 extern const try_to_lock_t try_to_lock;
386 extern const adopt_lock_t adopt_lock;
387
388 /// @brief Scoped lock idiom.
389 // Acquire the mutex here with a constructor call, then release with
390 // the destructor call in accordance with RAII style.
391 template<typename _Mutex>
392 class lock_guard
393 {
394 public:
395 typedef _Mutex mutex_type;
396
397 explicit lock_guard(mutex_type& __m) : _M_device(__m)
398 { _M_device.lock(); }
399
400 lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
401 { } // calling thread owns mutex
402
403 ~lock_guard()
404 { _M_device.unlock(); }
405
406 lock_guard(const lock_guard&) = delete;
407 lock_guard& operator=(const lock_guard&) = delete;
408
409 private:
410 mutex_type& _M_device;
411 };
412
413 /// unique_lock
414 template<typename _Mutex>
415 class unique_lock
416 {
417 public:
418 typedef _Mutex mutex_type;
419
420 unique_lock()
421 : _M_device(0), _M_owns(false)
422 { }
423
424 explicit unique_lock(mutex_type& __m)
425 : _M_device(&__m), _M_owns(false)
426 {
427 lock();
428 _M_owns = true;
429 }
430
431 unique_lock(mutex_type& __m, defer_lock_t)
432 : _M_device(&__m), _M_owns(false)
433 { }
434
435 unique_lock(mutex_type& __m, try_to_lock_t)
436 : _M_device(&__m), _M_owns(_M_device->try_lock())
437 { }
438
439 unique_lock(mutex_type& __m, adopt_lock_t)
440 : _M_device(&__m), _M_owns(true)
441 {
442 // XXX calling thread owns mutex
443 }
444
445 template<typename _Clock, typename _Duration>
446 unique_lock(mutex_type& __m,
447 const chrono::time_point<_Clock, _Duration>& __atime)
448 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
449 { }
450
451 template<typename _Rep, typename _Period>
452 unique_lock(mutex_type& __m,
453 const chrono::duration<_Rep, _Period>& __rtime)
454 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
455 { }
456
457 ~unique_lock()
458 {
459 if (_M_owns)
460 unlock();
461 }
462
463 unique_lock(const unique_lock&) = delete;
464 unique_lock& operator=(const unique_lock&) = delete;
465
466 unique_lock(unique_lock&& __u)
467 : _M_device(__u._M_device), _M_owns(__u._M_owns)
468 {
469 __u._M_device = 0;
470 __u._M_owns = false;
471 }
472
473 unique_lock& operator=(unique_lock&& __u)
474 {
475 if(_M_owns)
476 unlock();
477
478 unique_lock(std::move(__u)).swap(*this);
479
480 __u._M_device = 0;
481 __u._M_owns = false;
482
483 return *this;
484 }
485
486 void
487 lock()
488 {
489 if (!_M_device)
490 __throw_system_error(int(errc::operation_not_permitted));
491 else if (_M_owns)
492 __throw_system_error(int(errc::resource_deadlock_would_occur));
493 else
494 {
495 _M_device->lock();
496 _M_owns = true;
497 }
498 }
499
500 bool
501 try_lock()
502 {
503 if (!_M_device)
504 __throw_system_error(int(errc::operation_not_permitted));
505 else if (_M_owns)
506 __throw_system_error(int(errc::resource_deadlock_would_occur));
507 else
508 {
509 _M_owns = _M_device->try_lock();
510 return _M_owns;
511 }
512 }
513
514 template<typename _Clock, typename _Duration>
515 bool
516 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
517 {
518 if (!_M_device)
519 __throw_system_error(int(errc::operation_not_permitted));
520 else if (_M_owns)
521 __throw_system_error(int(errc::resource_deadlock_would_occur));
522 else
523 {
524 _M_owns = _M_device->try_lock_until(__atime);
525 return _M_owns;
526 }
527 }
528
529 template<typename _Rep, typename _Period>
530 bool
531 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
532 {
533 if (!_M_device)
534 __throw_system_error(int(errc::operation_not_permitted));
535 else if (_M_owns)
536 __throw_system_error(int(errc::resource_deadlock_would_occur));
537 else
538 {
539 _M_owns = _M_device->try_lock_for(__rtime);
540 return _M_owns;
541 }
542 }
543
544 void
545 unlock()
546 {
547 if (!_M_owns)
548 __throw_system_error(int(errc::operation_not_permitted));
549 else if (_M_device)
550 {
551 _M_device->unlock();
552 _M_owns = false;
553 }
554 }
555
556 void
557 swap(unique_lock& __u)
558 {
559 std::swap(_M_device, __u._M_device);
560 std::swap(_M_owns, __u._M_owns);
561 }
562
563 mutex_type*
564 release()
565 {
566 mutex_type* __ret = _M_device;
567 _M_device = 0;
568 _M_owns = false;
569 return __ret;
570 }
571
572 bool
573 owns_lock() const
574 { return _M_owns; }
575
576 explicit operator bool() const
577 { return owns_lock(); }
578
579 mutex_type*
580 mutex() const
581 { return _M_device; }
582
583 private:
584 mutex_type* _M_device;
585 bool _M_owns; // XXX use atomic_bool
586 };
587
588 template<typename _Mutex>
589 inline void
590 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
591 { __x.swap(__y); }
592
593 template<int _Idx>
594 struct __unlock_impl
595 {
596 template<typename... _Lock>
597 static void
598 __do_unlock(tuple<_Lock&...>& __locks)
599 {
600 std::get<_Idx>(__locks).unlock();
601 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
602 }
603 };
604
605 template<>
606 struct __unlock_impl<-1>
607 {
608 template<typename... _Lock>
609 static void
610 __do_unlock(tuple<_Lock&...>&)
611 { }
612 };
613
614 template<int _Idx, bool _Continue = true>
615 struct __try_lock_impl
616 {
617 template<typename... _Lock>
618 static int
619 __do_try_lock(tuple<_Lock&...>& __locks)
620 {
621 if(std::get<_Idx>(__locks).try_lock())
622 {
623 return __try_lock_impl<_Idx + 1,
624 _Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
625 }
626 else
627 {
628 __unlock_impl<_Idx>::__do_unlock(__locks);
629 return _Idx;
630 }
631 }
632 };
633
634 template<int _Idx>
635 struct __try_lock_impl<_Idx, false>
636 {
637 template<typename... _Lock>
638 static int
639 __do_try_lock(tuple<_Lock&...>& __locks)
640 {
641 if(std::get<_Idx>(__locks).try_lock())
642 return -1;
643 else
644 {
645 __unlock_impl<_Idx>::__do_unlock(__locks);
646 return _Idx;
647 }
648 }
649 };
650
651 /** @brief Generic try_lock.
652 * @param __l1 Meets Mutex requirements (try_lock() may throw).
653 * @param __l2 Meets Mutex requirements (try_lock() may throw).
654 * @param __l3 Meets Mutex requirements (try_lock() may throw).
655 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
656 * a 0-based index corresponding to the argument that returned false.
657 * @post Either all arguments are locked, or none will be.
658 *
659 * Sequentially calls try_lock() on each argument.
660 */
661 template<typename _Lock1, typename _Lock2, typename... _Lock3>
662 int
663 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
664 {
665 tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
666 return __try_lock_impl<0>::__do_try_lock(__locks);
667 }
668
669 /// lock
670 template<typename _L1, typename _L2, typename ..._L3>
671 void
672 lock(_L1&, _L2&, _L3&...);
673
674 /// once_flag
675 struct once_flag
676 {
677 private:
678 typedef __gthread_once_t __native_type;
679 __native_type _M_once;
680
681 public:
682 once_flag()
683 {
684 __native_type __tmp = __GTHREAD_ONCE_INIT;
685 _M_once = __tmp;
686 }
687
688 once_flag(const once_flag&) = delete;
689 once_flag& operator=(const once_flag&) = delete;
690
691 template<typename _Callable, typename... _Args>
692 friend void
693 call_once(once_flag& __once, _Callable __f, _Args&&... __args);
694 };
695
696 #ifdef _GLIBCXX_HAVE_TLS
697 extern __thread void* __once_callable;
698 extern __thread void (*__once_call)();
699
700 template<typename _Callable>
701 inline void
702 __once_call_impl()
703 {
704 (*(_Callable*)__once_callable)();
705 }
706 #else
707 extern function<void()> __once_functor;
708
709 extern void
710 __set_once_functor_lock_ptr(unique_lock<mutex>*);
711
712 extern mutex&
713 __get_once_mutex();
714 #endif
715
716 extern "C" void __once_proxy();
717
718 /// call_once
719 template<typename _Callable, typename... _Args>
720 void
721 call_once(once_flag& __once, _Callable __f, _Args&&... __args)
722 {
723 #ifdef _GLIBCXX_HAVE_TLS
724 auto __bound_functor = std::bind<void>(__f, __args...);
725 __once_callable = &__bound_functor;
726 __once_call = &__once_call_impl<decltype(__bound_functor)>;
727 #else
728 unique_lock<mutex> __functor_lock(__get_once_mutex());
729 __once_functor = std::bind<void>(__f, __args...);
730 __set_once_functor_lock_ptr(&__functor_lock);
731 #endif
732
733 int __e = __gthread_once(&(__once._M_once), &__once_proxy);
734
735 #ifndef _GLIBCXX_HAVE_TLS
736 if (__functor_lock)
737 __set_once_functor_lock_ptr(0);
738 #endif
739
740 if (__e)
741 __throw_system_error(__e);
742 }
743
744 // @} group mutexes
745 }
746
747 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
748
749 #endif // __GXX_EXPERIMENTAL_CXX0X__
750
751 #endif // _GLIBCXX_MUTEX