]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/std/mutex
re PR c++/59378 (Internal compiler error when using __builtin_shuffle in a template...
[thirdparty/gcc.git] / libstdc++-v3 / include / std / mutex
1 // <mutex> -*- C++ -*-
2
3 // Copyright (C) 2003-2013 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file include/mutex
26 * This is a Standard C++ Library header.
27 */
28
29 #ifndef _GLIBCXX_MUTEX
30 #define _GLIBCXX_MUTEX 1
31
32 #pragma GCC system_header
33
34 #if __cplusplus < 201103L
35 # include <bits/c++0x_warning.h>
36 #else
37
38 #include <tuple>
39 #include <chrono>
40 #include <exception>
41 #include <type_traits>
42 #include <functional>
43 #include <system_error>
44 #include <bits/functexcept.h>
45 #include <bits/gthr.h>
46 #include <bits/move.h> // for std::swap
47
48 #ifdef _GLIBCXX_USE_C99_STDINT_TR1
49
50 namespace std _GLIBCXX_VISIBILITY(default)
51 {
52 _GLIBCXX_BEGIN_NAMESPACE_VERSION
53
54 #ifdef _GLIBCXX_HAS_GTHREADS
55 // Common base class for std::mutex and std::timed_mutex
56 class __mutex_base
57 {
58 protected:
59 typedef __gthread_mutex_t __native_type;
60
61 #ifdef __GTHREAD_MUTEX_INIT
62 __native_type _M_mutex = __GTHREAD_MUTEX_INIT;
63
64 constexpr __mutex_base() noexcept = default;
65 #else
66 __native_type _M_mutex;
67
68 __mutex_base() noexcept
69 {
70 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
71 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
72 }
73
74 ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
75 #endif
76
77 __mutex_base(const __mutex_base&) = delete;
78 __mutex_base& operator=(const __mutex_base&) = delete;
79 };
80
81 // Common base class for std::recursive_mutex and std::recursive_timed_mutex
82 class __recursive_mutex_base
83 {
84 protected:
85 typedef __gthread_recursive_mutex_t __native_type;
86
87 __recursive_mutex_base(const __recursive_mutex_base&) = delete;
88 __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
89
90 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
91 __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
92
93 __recursive_mutex_base() = default;
94 #else
95 __native_type _M_mutex;
96
97 __recursive_mutex_base()
98 {
99 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
100 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
101 }
102
103 ~__recursive_mutex_base()
104 { __gthread_recursive_mutex_destroy(&_M_mutex); }
105 #endif
106 };
107
108 /**
109 * @defgroup mutexes Mutexes
110 * @ingroup concurrency
111 *
112 * Classes for mutex support.
113 * @{
114 */
115
116 /// mutex
117 class mutex : private __mutex_base
118 {
119 public:
120 typedef __native_type* native_handle_type;
121
122 #ifdef __GTHREAD_MUTEX_INIT
123 constexpr
124 #endif
125 mutex() noexcept = default;
126 ~mutex() = default;
127
128 mutex(const mutex&) = delete;
129 mutex& operator=(const mutex&) = delete;
130
131 void
132 lock()
133 {
134 int __e = __gthread_mutex_lock(&_M_mutex);
135
136 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
137 if (__e)
138 __throw_system_error(__e);
139 }
140
141 bool
142 try_lock() noexcept
143 {
144 // XXX EINVAL, EAGAIN, EBUSY
145 return !__gthread_mutex_trylock(&_M_mutex);
146 }
147
148 void
149 unlock()
150 {
151 // XXX EINVAL, EAGAIN, EPERM
152 __gthread_mutex_unlock(&_M_mutex);
153 }
154
155 native_handle_type
156 native_handle()
157 { return &_M_mutex; }
158 };
159
160 /// recursive_mutex
161 class recursive_mutex : private __recursive_mutex_base
162 {
163 public:
164 typedef __native_type* native_handle_type;
165
166 recursive_mutex() = default;
167 ~recursive_mutex() = default;
168
169 recursive_mutex(const recursive_mutex&) = delete;
170 recursive_mutex& operator=(const recursive_mutex&) = delete;
171
172 void
173 lock()
174 {
175 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
176
177 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
178 if (__e)
179 __throw_system_error(__e);
180 }
181
182 bool
183 try_lock() noexcept
184 {
185 // XXX EINVAL, EAGAIN, EBUSY
186 return !__gthread_recursive_mutex_trylock(&_M_mutex);
187 }
188
189 void
190 unlock()
191 {
192 // XXX EINVAL, EAGAIN, EBUSY
193 __gthread_recursive_mutex_unlock(&_M_mutex);
194 }
195
196 native_handle_type
197 native_handle()
198 { return &_M_mutex; }
199 };
200
201 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
202 template<typename _Derived>
203 class __timed_mutex_impl
204 {
205 protected:
206 typedef chrono::high_resolution_clock __clock_t;
207
208 template<typename _Rep, typename _Period>
209 bool
210 _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
211 {
212 using chrono::steady_clock;
213 auto __rt = chrono::duration_cast<steady_clock::duration>(__rtime);
214 if (ratio_greater<steady_clock::period, _Period>())
215 ++__rt;
216 return _M_try_lock_until(steady_clock::now() + __rt);
217 }
218
219 template<typename _Duration>
220 bool
221 _M_try_lock_until(const chrono::time_point<__clock_t,
222 _Duration>& __atime)
223 {
224 auto __s = chrono::time_point_cast<chrono::seconds>(__atime);
225 auto __ns = chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
226
227 __gthread_time_t __ts = {
228 static_cast<std::time_t>(__s.time_since_epoch().count()),
229 static_cast<long>(__ns.count())
230 };
231
232 auto __mutex = static_cast<_Derived*>(this)->native_handle();
233 return !__gthread_mutex_timedlock(__mutex, &__ts);
234 }
235
236 template<typename _Clock, typename _Duration>
237 bool
238 _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
239 {
240 auto __rtime = __atime - _Clock::now();
241 return _M_try_lock_until(__clock_t::now() + __rtime);
242 }
243 };
244
245 /// timed_mutex
246 class timed_mutex
247 : private __mutex_base, public __timed_mutex_impl<timed_mutex>
248 {
249 public:
250 typedef __native_type* native_handle_type;
251
252 timed_mutex() = default;
253 ~timed_mutex() = default;
254
255 timed_mutex(const timed_mutex&) = delete;
256 timed_mutex& operator=(const timed_mutex&) = delete;
257
258 void
259 lock()
260 {
261 int __e = __gthread_mutex_lock(&_M_mutex);
262
263 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
264 if (__e)
265 __throw_system_error(__e);
266 }
267
268 bool
269 try_lock() noexcept
270 {
271 // XXX EINVAL, EAGAIN, EBUSY
272 return !__gthread_mutex_trylock(&_M_mutex);
273 }
274
275 template <class _Rep, class _Period>
276 bool
277 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
278 { return _M_try_lock_for(__rtime); }
279
280 template <class _Clock, class _Duration>
281 bool
282 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
283 { return _M_try_lock_until(__atime); }
284
285 void
286 unlock()
287 {
288 // XXX EINVAL, EAGAIN, EBUSY
289 __gthread_mutex_unlock(&_M_mutex);
290 }
291
292 native_handle_type
293 native_handle()
294 { return &_M_mutex; }
295 };
296
297 /// recursive_timed_mutex
298 class recursive_timed_mutex
299 : private __recursive_mutex_base,
300 public __timed_mutex_impl<recursive_timed_mutex>
301 {
302 public:
303 typedef __native_type* native_handle_type;
304
305 recursive_timed_mutex() = default;
306 ~recursive_timed_mutex() = default;
307
308 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
309 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
310
311 void
312 lock()
313 {
314 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
315
316 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
317 if (__e)
318 __throw_system_error(__e);
319 }
320
321 bool
322 try_lock() noexcept
323 {
324 // XXX EINVAL, EAGAIN, EBUSY
325 return !__gthread_recursive_mutex_trylock(&_M_mutex);
326 }
327
328 template <class _Rep, class _Period>
329 bool
330 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
331 { return _M_try_lock_for(__rtime); }
332
333 template <class _Clock, class _Duration>
334 bool
335 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
336 { return _M_try_lock_until(__atime); }
337
338 void
339 unlock()
340 {
341 // XXX EINVAL, EAGAIN, EBUSY
342 __gthread_recursive_mutex_unlock(&_M_mutex);
343 }
344
345 native_handle_type
346 native_handle()
347 { return &_M_mutex; }
348 };
349 #endif
350 #endif // _GLIBCXX_HAS_GTHREADS
351
352 /// Do not acquire ownership of the mutex.
353 struct defer_lock_t { };
354
355 /// Try to acquire ownership of the mutex without blocking.
356 struct try_to_lock_t { };
357
358 /// Assume the calling thread has already obtained mutex ownership
359 /// and manage it.
360 struct adopt_lock_t { };
361
362 constexpr defer_lock_t defer_lock { };
363 constexpr try_to_lock_t try_to_lock { };
364 constexpr adopt_lock_t adopt_lock { };
365
366 /// @brief Scoped lock idiom.
367 // Acquire the mutex here with a constructor call, then release with
368 // the destructor call in accordance with RAII style.
369 template<typename _Mutex>
370 class lock_guard
371 {
372 public:
373 typedef _Mutex mutex_type;
374
375 explicit lock_guard(mutex_type& __m) : _M_device(__m)
376 { _M_device.lock(); }
377
378 lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
379 { } // calling thread owns mutex
380
381 ~lock_guard()
382 { _M_device.unlock(); }
383
384 lock_guard(const lock_guard&) = delete;
385 lock_guard& operator=(const lock_guard&) = delete;
386
387 private:
388 mutex_type& _M_device;
389 };
390
391 /// unique_lock
392 template<typename _Mutex>
393 class unique_lock
394 {
395 public:
396 typedef _Mutex mutex_type;
397
398 unique_lock() noexcept
399 : _M_device(0), _M_owns(false)
400 { }
401
402 explicit unique_lock(mutex_type& __m)
403 : _M_device(&__m), _M_owns(false)
404 {
405 lock();
406 _M_owns = true;
407 }
408
409 unique_lock(mutex_type& __m, defer_lock_t) noexcept
410 : _M_device(&__m), _M_owns(false)
411 { }
412
413 unique_lock(mutex_type& __m, try_to_lock_t)
414 : _M_device(&__m), _M_owns(_M_device->try_lock())
415 { }
416
417 unique_lock(mutex_type& __m, adopt_lock_t)
418 : _M_device(&__m), _M_owns(true)
419 {
420 // XXX calling thread owns mutex
421 }
422
423 template<typename _Clock, typename _Duration>
424 unique_lock(mutex_type& __m,
425 const chrono::time_point<_Clock, _Duration>& __atime)
426 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
427 { }
428
429 template<typename _Rep, typename _Period>
430 unique_lock(mutex_type& __m,
431 const chrono::duration<_Rep, _Period>& __rtime)
432 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
433 { }
434
435 ~unique_lock()
436 {
437 if (_M_owns)
438 unlock();
439 }
440
441 unique_lock(const unique_lock&) = delete;
442 unique_lock& operator=(const unique_lock&) = delete;
443
444 unique_lock(unique_lock&& __u) noexcept
445 : _M_device(__u._M_device), _M_owns(__u._M_owns)
446 {
447 __u._M_device = 0;
448 __u._M_owns = false;
449 }
450
451 unique_lock& operator=(unique_lock&& __u) noexcept
452 {
453 if(_M_owns)
454 unlock();
455
456 unique_lock(std::move(__u)).swap(*this);
457
458 __u._M_device = 0;
459 __u._M_owns = false;
460
461 return *this;
462 }
463
464 void
465 lock()
466 {
467 if (!_M_device)
468 __throw_system_error(int(errc::operation_not_permitted));
469 else if (_M_owns)
470 __throw_system_error(int(errc::resource_deadlock_would_occur));
471 else
472 {
473 _M_device->lock();
474 _M_owns = true;
475 }
476 }
477
478 bool
479 try_lock()
480 {
481 if (!_M_device)
482 __throw_system_error(int(errc::operation_not_permitted));
483 else if (_M_owns)
484 __throw_system_error(int(errc::resource_deadlock_would_occur));
485 else
486 {
487 _M_owns = _M_device->try_lock();
488 return _M_owns;
489 }
490 }
491
492 template<typename _Clock, typename _Duration>
493 bool
494 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
495 {
496 if (!_M_device)
497 __throw_system_error(int(errc::operation_not_permitted));
498 else if (_M_owns)
499 __throw_system_error(int(errc::resource_deadlock_would_occur));
500 else
501 {
502 _M_owns = _M_device->try_lock_until(__atime);
503 return _M_owns;
504 }
505 }
506
507 template<typename _Rep, typename _Period>
508 bool
509 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
510 {
511 if (!_M_device)
512 __throw_system_error(int(errc::operation_not_permitted));
513 else if (_M_owns)
514 __throw_system_error(int(errc::resource_deadlock_would_occur));
515 else
516 {
517 _M_owns = _M_device->try_lock_for(__rtime);
518 return _M_owns;
519 }
520 }
521
522 void
523 unlock()
524 {
525 if (!_M_owns)
526 __throw_system_error(int(errc::operation_not_permitted));
527 else if (_M_device)
528 {
529 _M_device->unlock();
530 _M_owns = false;
531 }
532 }
533
534 void
535 swap(unique_lock& __u) noexcept
536 {
537 std::swap(_M_device, __u._M_device);
538 std::swap(_M_owns, __u._M_owns);
539 }
540
541 mutex_type*
542 release() noexcept
543 {
544 mutex_type* __ret = _M_device;
545 _M_device = 0;
546 _M_owns = false;
547 return __ret;
548 }
549
550 bool
551 owns_lock() const noexcept
552 { return _M_owns; }
553
554 explicit operator bool() const noexcept
555 { return owns_lock(); }
556
557 mutex_type*
558 mutex() const noexcept
559 { return _M_device; }
560
561 private:
562 mutex_type* _M_device;
563 bool _M_owns; // XXX use atomic_bool
564 };
565
566 /// Partial specialization for unique_lock objects.
567 template<typename _Mutex>
568 inline void
569 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
570 { __x.swap(__y); }
571
572 template<int _Idx>
573 struct __unlock_impl
574 {
575 template<typename... _Lock>
576 static void
577 __do_unlock(tuple<_Lock&...>& __locks)
578 {
579 std::get<_Idx>(__locks).unlock();
580 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
581 }
582 };
583
584 template<>
585 struct __unlock_impl<-1>
586 {
587 template<typename... _Lock>
588 static void
589 __do_unlock(tuple<_Lock&...>&)
590 { }
591 };
592
593 template<typename _Lock>
594 unique_lock<_Lock>
595 __try_to_lock(_Lock& __l)
596 { return unique_lock<_Lock>(__l, try_to_lock); }
597
598 template<int _Idx, bool _Continue = true>
599 struct __try_lock_impl
600 {
601 template<typename... _Lock>
602 static void
603 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
604 {
605 __idx = _Idx;
606 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
607 if (__lock.owns_lock())
608 {
609 __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
610 __do_try_lock(__locks, __idx);
611 if (__idx == -1)
612 __lock.release();
613 }
614 }
615 };
616
617 template<int _Idx>
618 struct __try_lock_impl<_Idx, false>
619 {
620 template<typename... _Lock>
621 static void
622 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
623 {
624 __idx = _Idx;
625 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
626 if (__lock.owns_lock())
627 {
628 __idx = -1;
629 __lock.release();
630 }
631 }
632 };
633
634 /** @brief Generic try_lock.
635 * @param __l1 Meets Mutex requirements (try_lock() may throw).
636 * @param __l2 Meets Mutex requirements (try_lock() may throw).
637 * @param __l3 Meets Mutex requirements (try_lock() may throw).
638 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
639 * a 0-based index corresponding to the argument that returned false.
640 * @post Either all arguments are locked, or none will be.
641 *
642 * Sequentially calls try_lock() on each argument.
643 */
644 template<typename _Lock1, typename _Lock2, typename... _Lock3>
645 int
646 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
647 {
648 int __idx;
649 auto __locks = std::tie(__l1, __l2, __l3...);
650 __try
651 { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
652 __catch(...)
653 { }
654 return __idx;
655 }
656
657 /** @brief Generic lock.
658 * @param __l1 Meets Mutex requirements (try_lock() may throw).
659 * @param __l2 Meets Mutex requirements (try_lock() may throw).
660 * @param __l3 Meets Mutex requirements (try_lock() may throw).
661 * @throw An exception thrown by an argument's lock() or try_lock() member.
662 * @post All arguments are locked.
663 *
664 * All arguments are locked via a sequence of calls to lock(), try_lock()
665 * and unlock(). If the call exits via an exception any locks that were
666 * obtained will be released.
667 */
668 template<typename _L1, typename _L2, typename ..._L3>
669 void
670 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
671 {
672 while (true)
673 {
674 unique_lock<_L1> __first(__l1);
675 int __idx;
676 auto __locks = std::tie(__l2, __l3...);
677 __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
678 if (__idx == -1)
679 {
680 __first.release();
681 return;
682 }
683 }
684 }
685
686 #ifdef _GLIBCXX_HAS_GTHREADS
687 /// once_flag
688 struct once_flag
689 {
690 private:
691 typedef __gthread_once_t __native_type;
692 __native_type _M_once = __GTHREAD_ONCE_INIT;
693
694 public:
695 /// Constructor
696 constexpr once_flag() noexcept = default;
697
698 /// Deleted copy constructor
699 once_flag(const once_flag&) = delete;
700 /// Deleted assignment operator
701 once_flag& operator=(const once_flag&) = delete;
702
703 template<typename _Callable, typename... _Args>
704 friend void
705 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
706 };
707
708 #ifdef _GLIBCXX_HAVE_TLS
709 extern __thread void* __once_callable;
710 extern __thread void (*__once_call)();
711
712 template<typename _Callable>
713 inline void
714 __once_call_impl()
715 {
716 (*(_Callable*)__once_callable)();
717 }
718 #else
719 extern function<void()> __once_functor;
720
721 extern void
722 __set_once_functor_lock_ptr(unique_lock<mutex>*);
723
724 extern mutex&
725 __get_once_mutex();
726 #endif
727
728 extern "C" void __once_proxy(void);
729
730 /// call_once
731 template<typename _Callable, typename... _Args>
732 void
733 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
734 {
735 #ifdef _GLIBCXX_HAVE_TLS
736 auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
737 std::forward<_Args>(__args)...);
738 __once_callable = &__bound_functor;
739 __once_call = &__once_call_impl<decltype(__bound_functor)>;
740 #else
741 unique_lock<mutex> __functor_lock(__get_once_mutex());
742 auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
743 std::forward<_Args>(__args)...);
744 __once_functor = [&]() { __callable(); };
745 __set_once_functor_lock_ptr(&__functor_lock);
746 #endif
747
748 int __e = __gthread_once(&__once._M_once, &__once_proxy);
749
750 #ifndef _GLIBCXX_HAVE_TLS
751 if (__functor_lock)
752 __set_once_functor_lock_ptr(0);
753 #endif
754
755 if (__e)
756 __throw_system_error(__e);
757 }
758 #endif // _GLIBCXX_HAS_GTHREADS
759
760 // @} group mutexes
761 _GLIBCXX_END_NAMESPACE_VERSION
762 } // namespace
763 #endif // _GLIBCXX_USE_C99_STDINT_TR1
764
765 #endif // C++11
766
767 #endif // _GLIBCXX_MUTEX