]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/std/mutex
*: Use headername alias to associate private includes to public includes.
[thirdparty/gcc.git] / libstdc++-v3 / include / std / mutex
1 // <mutex> -*- C++ -*-
2
3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25
26 /** @file include/mutex
27 * This is a Standard C++ Library header.
28 */
29
30 #ifndef _GLIBCXX_MUTEX
31 #define _GLIBCXX_MUTEX 1
32
33 #pragma GCC system_header
34
35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
36 # include <bits/c++0x_warning.h>
37 #else
38
39 #include <tuple>
40 #include <chrono>
41 #include <exception>
42 #include <type_traits>
43 #include <functional>
44 #include <system_error>
45 #include <bits/functexcept.h>
46 #include <bits/gthr.h>
47 #include <bits/move.h> // for std::swap
48
49 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
50
51 _GLIBCXX_BEGIN_NAMESPACE(std)
52
53 /**
54 * @defgroup mutexes Mutexes
55 * @ingroup concurrency
56 *
57 * Classes for mutex support.
58 * @{
59 */
60
61 /// mutex
62 class mutex
63 {
64 typedef __gthread_mutex_t __native_type;
65 __native_type _M_mutex;
66
67 public:
68 typedef __native_type* native_handle_type;
69
70 #ifdef __GTHREAD_MUTEX_INIT
71 constexpr mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
72 #else
73 mutex()
74 {
75 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
76 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
77 }
78
79 ~mutex() { __gthread_mutex_destroy(&_M_mutex); }
80 #endif
81
82 mutex(const mutex&) = delete;
83 mutex& operator=(const mutex&) = delete;
84
85 void
86 lock()
87 {
88 int __e = __gthread_mutex_lock(&_M_mutex);
89
90 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
91 if (__e)
92 __throw_system_error(__e);
93 }
94
95 bool
96 try_lock()
97 {
98 // XXX EINVAL, EAGAIN, EBUSY
99 return !__gthread_mutex_trylock(&_M_mutex);
100 }
101
102 void
103 unlock()
104 {
105 // XXX EINVAL, EAGAIN, EPERM
106 __gthread_mutex_unlock(&_M_mutex);
107 }
108
109 native_handle_type
110 native_handle()
111 { return &_M_mutex; }
112 };
113
114 #ifndef __GTHREAD_RECURSIVE_MUTEX_INIT
115 // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
116 // so we need to obtain a __gthread_mutex_t to destroy
117 class __destroy_recursive_mutex
118 {
119 template<typename _Mx, typename _Rm>
120 static void
121 _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
122 {
123 __mx->counter = __rmx->counter;
124 __mx->sema = __rmx->sema;
125 __gthread_mutex_destroy(__mx);
126 }
127
128 public:
129 // matches a gthr-win32.h recursive mutex
130 template<typename _Rm>
131 static typename enable_if<sizeof(&_Rm::sema), void>::type
132 _S_destroy(_Rm* __mx)
133 {
134 __gthread_mutex_t __tmp;
135 _S_destroy_win32(&__tmp, __mx);
136 }
137
138 // matches a recursive mutex with a member 'actual'
139 template<typename _Rm>
140 static typename enable_if<sizeof(&_Rm::actual), void>::type
141 _S_destroy(_Rm* __mx)
142 { __gthread_mutex_destroy(&__mx->actual); }
143
144 // matches when there's only one mutex type
145 template<typename _Rm>
146 static
147 typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type
148 _S_destroy(_Rm* __mx)
149 { __gthread_mutex_destroy(__mx); }
150 };
151 #endif
152
153 /// recursive_mutex
154 class recursive_mutex
155 {
156 typedef __gthread_recursive_mutex_t __native_type;
157 __native_type _M_mutex;
158
159 public:
160 typedef __native_type* native_handle_type;
161
162 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
163 recursive_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
164 #else
165 recursive_mutex()
166 {
167 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
168 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
169 }
170
171 ~recursive_mutex()
172 { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
173 #endif
174
175 recursive_mutex(const recursive_mutex&) = delete;
176 recursive_mutex& operator=(const recursive_mutex&) = delete;
177
178 void
179 lock()
180 {
181 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
182
183 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
184 if (__e)
185 __throw_system_error(__e);
186 }
187
188 bool
189 try_lock()
190 {
191 // XXX EINVAL, EAGAIN, EBUSY
192 return !__gthread_recursive_mutex_trylock(&_M_mutex);
193 }
194
195 void
196 unlock()
197 {
198 // XXX EINVAL, EAGAIN, EBUSY
199 __gthread_recursive_mutex_unlock(&_M_mutex);
200 }
201
202 native_handle_type
203 native_handle()
204 { return &_M_mutex; }
205 };
206
207 /// timed_mutex
208 class timed_mutex
209 {
210 typedef __gthread_mutex_t __native_type;
211
212 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
213 typedef chrono::monotonic_clock __clock_t;
214 #else
215 typedef chrono::high_resolution_clock __clock_t;
216 #endif
217
218 __native_type _M_mutex;
219
220 public:
221 typedef __native_type* native_handle_type;
222
223 #ifdef __GTHREAD_MUTEX_INIT
224 timed_mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
225 #else
226 timed_mutex()
227 {
228 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
229 }
230
231 ~timed_mutex() { __gthread_mutex_destroy(&_M_mutex); }
232 #endif
233
234 timed_mutex(const timed_mutex&) = delete;
235 timed_mutex& operator=(const timed_mutex&) = delete;
236
237 void
238 lock()
239 {
240 int __e = __gthread_mutex_lock(&_M_mutex);
241
242 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
243 if (__e)
244 __throw_system_error(__e);
245 }
246
247 bool
248 try_lock()
249 {
250 // XXX EINVAL, EAGAIN, EBUSY
251 return !__gthread_mutex_trylock(&_M_mutex);
252 }
253
254 template <class _Rep, class _Period>
255 bool
256 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
257 { return __try_lock_for_impl(__rtime); }
258
259 template <class _Clock, class _Duration>
260 bool
261 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
262 {
263 chrono::time_point<_Clock, chrono::seconds> __s =
264 chrono::time_point_cast<chrono::seconds>(__atime);
265
266 chrono::nanoseconds __ns =
267 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
268
269 __gthread_time_t __ts = {
270 static_cast<std::time_t>(__s.time_since_epoch().count()),
271 static_cast<long>(__ns.count())
272 };
273
274 return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
275 }
276
277 void
278 unlock()
279 {
280 // XXX EINVAL, EAGAIN, EBUSY
281 __gthread_mutex_unlock(&_M_mutex);
282 }
283
284 native_handle_type
285 native_handle()
286 { return &_M_mutex; }
287
288 private:
289 template<typename _Rep, typename _Period>
290 typename enable_if<
291 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
292 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
293 {
294 __clock_t::time_point __atime = __clock_t::now()
295 + chrono::duration_cast<__clock_t::duration>(__rtime);
296
297 return try_lock_until(__atime);
298 }
299
300 template <typename _Rep, typename _Period>
301 typename enable_if<
302 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
303 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
304 {
305 __clock_t::time_point __atime = __clock_t::now()
306 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
307
308 return try_lock_until(__atime);
309 }
310 };
311
312 /// recursive_timed_mutex
313 class recursive_timed_mutex
314 {
315 typedef __gthread_recursive_mutex_t __native_type;
316
317 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
318 typedef chrono::monotonic_clock __clock_t;
319 #else
320 typedef chrono::high_resolution_clock __clock_t;
321 #endif
322
323 __native_type _M_mutex;
324
325 public:
326 typedef __native_type* native_handle_type;
327
328 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
329 recursive_timed_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
330 #else
331 recursive_timed_mutex()
332 {
333 // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
334 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
335 }
336
337 ~recursive_timed_mutex()
338 { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
339 #endif
340
341 recursive_timed_mutex(const recursive_timed_mutex&) = delete;
342 recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
343
344 void
345 lock()
346 {
347 int __e = __gthread_recursive_mutex_lock(&_M_mutex);
348
349 // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
350 if (__e)
351 __throw_system_error(__e);
352 }
353
354 bool
355 try_lock()
356 {
357 // XXX EINVAL, EAGAIN, EBUSY
358 return !__gthread_recursive_mutex_trylock(&_M_mutex);
359 }
360
361 template <class _Rep, class _Period>
362 bool
363 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
364 { return __try_lock_for_impl(__rtime); }
365
366 template <class _Clock, class _Duration>
367 bool
368 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
369 {
370 chrono::time_point<_Clock, chrono::seconds> __s =
371 chrono::time_point_cast<chrono::seconds>(__atime);
372
373 chrono::nanoseconds __ns =
374 chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
375
376 __gthread_time_t __ts = {
377 static_cast<std::time_t>(__s.time_since_epoch().count()),
378 static_cast<long>(__ns.count())
379 };
380
381 return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
382 }
383
384 void
385 unlock()
386 {
387 // XXX EINVAL, EAGAIN, EBUSY
388 __gthread_recursive_mutex_unlock(&_M_mutex);
389 }
390
391 native_handle_type
392 native_handle()
393 { return &_M_mutex; }
394
395 private:
396 template<typename _Rep, typename _Period>
397 typename enable_if<
398 ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
399 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
400 {
401 __clock_t::time_point __atime = __clock_t::now()
402 + chrono::duration_cast<__clock_t::duration>(__rtime);
403
404 return try_lock_until(__atime);
405 }
406
407 template <typename _Rep, typename _Period>
408 typename enable_if<
409 !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
410 __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
411 {
412 __clock_t::time_point __atime = __clock_t::now()
413 + ++chrono::duration_cast<__clock_t::duration>(__rtime);
414
415 return try_lock_until(__atime);
416 }
417 };
418
419 /// Do not acquire ownership of the mutex.
420 struct defer_lock_t { };
421
422 /// Try to acquire ownership of the mutex without blocking.
423 struct try_to_lock_t { };
424
425 /// Assume the calling thread has already obtained mutex ownership
426 /// and manage it.
427 struct adopt_lock_t { };
428
429 constexpr defer_lock_t defer_lock { };
430 constexpr try_to_lock_t try_to_lock { };
431 constexpr adopt_lock_t adopt_lock { };
432
433 /// @brief Scoped lock idiom.
434 // Acquire the mutex here with a constructor call, then release with
435 // the destructor call in accordance with RAII style.
436 template<typename _Mutex>
437 class lock_guard
438 {
439 public:
440 typedef _Mutex mutex_type;
441
442 explicit lock_guard(mutex_type& __m) : _M_device(__m)
443 { _M_device.lock(); }
444
445 lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
446 { } // calling thread owns mutex
447
448 ~lock_guard()
449 { _M_device.unlock(); }
450
451 lock_guard(const lock_guard&) = delete;
452 lock_guard& operator=(const lock_guard&) = delete;
453
454 private:
455 mutex_type& _M_device;
456 };
457
458 /// unique_lock
459 template<typename _Mutex>
460 class unique_lock
461 {
462 public:
463 typedef _Mutex mutex_type;
464
465 unique_lock()
466 : _M_device(0), _M_owns(false)
467 { }
468
469 explicit unique_lock(mutex_type& __m)
470 : _M_device(&__m), _M_owns(false)
471 {
472 lock();
473 _M_owns = true;
474 }
475
476 unique_lock(mutex_type& __m, defer_lock_t)
477 : _M_device(&__m), _M_owns(false)
478 { }
479
480 unique_lock(mutex_type& __m, try_to_lock_t)
481 : _M_device(&__m), _M_owns(_M_device->try_lock())
482 { }
483
484 unique_lock(mutex_type& __m, adopt_lock_t)
485 : _M_device(&__m), _M_owns(true)
486 {
487 // XXX calling thread owns mutex
488 }
489
490 template<typename _Clock, typename _Duration>
491 unique_lock(mutex_type& __m,
492 const chrono::time_point<_Clock, _Duration>& __atime)
493 : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
494 { }
495
496 template<typename _Rep, typename _Period>
497 unique_lock(mutex_type& __m,
498 const chrono::duration<_Rep, _Period>& __rtime)
499 : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
500 { }
501
502 ~unique_lock()
503 {
504 if (_M_owns)
505 unlock();
506 }
507
508 unique_lock(const unique_lock&) = delete;
509 unique_lock& operator=(const unique_lock&) = delete;
510
511 unique_lock(unique_lock&& __u)
512 : _M_device(__u._M_device), _M_owns(__u._M_owns)
513 {
514 __u._M_device = 0;
515 __u._M_owns = false;
516 }
517
518 unique_lock& operator=(unique_lock&& __u)
519 {
520 if(_M_owns)
521 unlock();
522
523 unique_lock(std::move(__u)).swap(*this);
524
525 __u._M_device = 0;
526 __u._M_owns = false;
527
528 return *this;
529 }
530
531 void
532 lock()
533 {
534 if (!_M_device)
535 __throw_system_error(int(errc::operation_not_permitted));
536 else if (_M_owns)
537 __throw_system_error(int(errc::resource_deadlock_would_occur));
538 else
539 {
540 _M_device->lock();
541 _M_owns = true;
542 }
543 }
544
545 bool
546 try_lock()
547 {
548 if (!_M_device)
549 __throw_system_error(int(errc::operation_not_permitted));
550 else if (_M_owns)
551 __throw_system_error(int(errc::resource_deadlock_would_occur));
552 else
553 {
554 _M_owns = _M_device->try_lock();
555 return _M_owns;
556 }
557 }
558
559 template<typename _Clock, typename _Duration>
560 bool
561 try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
562 {
563 if (!_M_device)
564 __throw_system_error(int(errc::operation_not_permitted));
565 else if (_M_owns)
566 __throw_system_error(int(errc::resource_deadlock_would_occur));
567 else
568 {
569 _M_owns = _M_device->try_lock_until(__atime);
570 return _M_owns;
571 }
572 }
573
574 template<typename _Rep, typename _Period>
575 bool
576 try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
577 {
578 if (!_M_device)
579 __throw_system_error(int(errc::operation_not_permitted));
580 else if (_M_owns)
581 __throw_system_error(int(errc::resource_deadlock_would_occur));
582 else
583 {
584 _M_owns = _M_device->try_lock_for(__rtime);
585 return _M_owns;
586 }
587 }
588
589 void
590 unlock()
591 {
592 if (!_M_owns)
593 __throw_system_error(int(errc::operation_not_permitted));
594 else if (_M_device)
595 {
596 _M_device->unlock();
597 _M_owns = false;
598 }
599 }
600
601 void
602 swap(unique_lock& __u)
603 {
604 std::swap(_M_device, __u._M_device);
605 std::swap(_M_owns, __u._M_owns);
606 }
607
608 mutex_type*
609 release()
610 {
611 mutex_type* __ret = _M_device;
612 _M_device = 0;
613 _M_owns = false;
614 return __ret;
615 }
616
617 bool
618 owns_lock() const
619 { return _M_owns; }
620
621 explicit operator bool() const
622 { return owns_lock(); }
623
624 mutex_type*
625 mutex() const
626 { return _M_device; }
627
628 private:
629 mutex_type* _M_device;
630 bool _M_owns; // XXX use atomic_bool
631 };
632
633 template<typename _Mutex>
634 inline void
635 swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
636 { __x.swap(__y); }
637
638 template<int _Idx>
639 struct __unlock_impl
640 {
641 template<typename... _Lock>
642 static void
643 __do_unlock(tuple<_Lock&...>& __locks)
644 {
645 std::get<_Idx>(__locks).unlock();
646 __unlock_impl<_Idx - 1>::__do_unlock(__locks);
647 }
648 };
649
650 template<>
651 struct __unlock_impl<-1>
652 {
653 template<typename... _Lock>
654 static void
655 __do_unlock(tuple<_Lock&...>&)
656 { }
657 };
658
659 template<typename _Lock>
660 unique_lock<_Lock>
661 __try_to_lock(_Lock& __l)
662 { return unique_lock<_Lock>(__l, try_to_lock); }
663
664 template<int _Idx, bool _Continue = true>
665 struct __try_lock_impl
666 {
667 template<typename... _Lock>
668 static void
669 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
670 {
671 __idx = _Idx;
672 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
673 if (__lock.owns_lock())
674 {
675 __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
676 __do_try_lock(__locks, __idx);
677 if (__idx == -1)
678 __lock.release();
679 }
680 }
681 };
682
683 template<int _Idx>
684 struct __try_lock_impl<_Idx, false>
685 {
686 template<typename... _Lock>
687 static void
688 __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
689 {
690 __idx = _Idx;
691 auto __lock = __try_to_lock(std::get<_Idx>(__locks));
692 if (__lock.owns_lock())
693 {
694 __idx = -1;
695 __lock.release();
696 }
697 }
698 };
699
700 /** @brief Generic try_lock.
701 * @param __l1 Meets Mutex requirements (try_lock() may throw).
702 * @param __l2 Meets Mutex requirements (try_lock() may throw).
703 * @param __l3 Meets Mutex requirements (try_lock() may throw).
704 * @return Returns -1 if all try_lock() calls return true. Otherwise returns
705 * a 0-based index corresponding to the argument that returned false.
706 * @post Either all arguments are locked, or none will be.
707 *
708 * Sequentially calls try_lock() on each argument.
709 */
710 template<typename _Lock1, typename _Lock2, typename... _Lock3>
711 int
712 try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
713 {
714 int __idx;
715 auto __locks = std::tie(__l1, __l2, __l3...);
716 __try
717 { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
718 __catch(...)
719 { }
720 return __idx;
721 }
722
723 /** @brief Generic lock.
724 * @param __l1 Meets Mutex requirements (try_lock() may throw).
725 * @param __l2 Meets Mutex requirements (try_lock() may throw).
726 * @param __l3 Meets Mutex requirements (try_lock() may throw).
727 * @throw An exception thrown by an argument's lock() or try_lock() member.
728 * @post All arguments are locked.
729 *
730 * All arguments are locked via a sequence of calls to lock(), try_lock()
731 * and unlock(). If the call exits via an exception any locks that were
732 * obtained will be released.
733 */
734 template<typename _L1, typename _L2, typename ..._L3>
735 void
736 lock(_L1& __l1, _L2& __l2, _L3&... __l3)
737 {
738 while (true)
739 {
740 unique_lock<_L1> __first(__l1);
741 int __idx;
742 auto __locks = std::tie(__l2, __l3...);
743 __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
744 if (__idx == -1)
745 {
746 __first.release();
747 return;
748 }
749 }
750 }
751
752 /// once_flag
753 struct once_flag
754 {
755 private:
756 typedef __gthread_once_t __native_type;
757 __native_type _M_once;
758
759 public:
760 constexpr once_flag() : _M_once(__GTHREAD_ONCE_INIT) { }
761
762 once_flag(const once_flag&) = delete;
763 once_flag& operator=(const once_flag&) = delete;
764
765 template<typename _Callable, typename... _Args>
766 friend void
767 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
768 };
769
770 #ifdef _GLIBCXX_HAVE_TLS
771 extern __thread void* __once_callable;
772 extern __thread void (*__once_call)();
773
774 template<typename _Callable>
775 inline void
776 __once_call_impl()
777 {
778 (*(_Callable*)__once_callable)();
779 }
780 #else
781 extern function<void()> __once_functor;
782
783 extern void
784 __set_once_functor_lock_ptr(unique_lock<mutex>*);
785
786 extern mutex&
787 __get_once_mutex();
788 #endif
789
790 extern "C" void __once_proxy();
791
792 /// call_once
793 template<typename _Callable, typename... _Args>
794 void
795 call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
796 {
797 #ifdef _GLIBCXX_HAVE_TLS
798 auto __bound_functor = std::bind<void>(std::forward<_Callable>(__f),
799 std::forward<_Args>(__args)...);
800 __once_callable = &__bound_functor;
801 __once_call = &__once_call_impl<decltype(__bound_functor)>;
802 #else
803 unique_lock<mutex> __functor_lock(__get_once_mutex());
804 __once_functor = std::bind<void>(std::forward<_Callable>(__f),
805 std::forward<_Args>(__args)...);
806 __set_once_functor_lock_ptr(&__functor_lock);
807 #endif
808
809 int __e = __gthread_once(&(__once._M_once), &__once_proxy);
810
811 #ifndef _GLIBCXX_HAVE_TLS
812 if (__functor_lock)
813 __set_once_functor_lock_ptr(0);
814 #endif
815
816 if (__e)
817 __throw_system_error(__e);
818 }
819
820 // @} group mutexes
821 _GLIBCXX_END_NAMESPACE
822
823 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
824
825 #endif // __GXX_EXPERIMENTAL_CXX0X__
826
827 #endif // _GLIBCXX_MUTEX