]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/ext/concurrence.h
ad028398be1af25193031b741beabc8682189785
[thirdparty/gcc.git] / libstdc++-v3 / include / ext / concurrence.h
1 // Support for concurrent programing -*- C++ -*-
2
3 // Copyright (C) 2003-2012 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24
25 /** @file ext/concurrence.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29 #ifndef _CONCURRENCE_H
30 #define _CONCURRENCE_H 1
31
32 #pragma GCC system_header
33
34 #include <exception>
35 #include <bits/gthr.h>
36 #include <bits/functexcept.h>
37 #include <bits/cpp_type_traits.h>
38 #include <ext/type_traits.h>
39
40 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
41 {
42 _GLIBCXX_BEGIN_NAMESPACE_VERSION
43
44 // Available locking policies:
45 // _S_single single-threaded code that doesn't need to be locked.
46 // _S_mutex multi-threaded code that requires additional support
47 // from gthr.h or abstraction layers in concurrence.h.
48 // _S_atomic multi-threaded code using atomic operations.
49 enum _Lock_policy { _S_single, _S_mutex, _S_atomic };
50
51 // Compile time constant that indicates prefered locking policy in
52 // the current configuration.
53 static const _Lock_policy __default_lock_policy =
54 #ifdef __GTHREADS
55 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) \
56 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4))
57 _S_atomic;
58 #else
59 _S_mutex;
60 #endif
61 #else
62 _S_single;
63 #endif
64
65 // NB: As this is used in libsupc++, need to only depend on
66 // exception. No stdexception classes, no use of std::string.
67 class __concurrence_lock_error : public std::exception
68 {
69 public:
70 virtual char const*
71 what() const throw()
72 { return "__gnu_cxx::__concurrence_lock_error"; }
73 };
74
75 class __concurrence_unlock_error : public std::exception
76 {
77 public:
78 virtual char const*
79 what() const throw()
80 { return "__gnu_cxx::__concurrence_unlock_error"; }
81 };
82
83 class __concurrence_broadcast_error : public std::exception
84 {
85 public:
86 virtual char const*
87 what() const throw()
88 { return "__gnu_cxx::__concurrence_broadcast_error"; }
89 };
90
91 class __concurrence_wait_error : public std::exception
92 {
93 public:
94 virtual char const*
95 what() const throw()
96 { return "__gnu_cxx::__concurrence_wait_error"; }
97 };
98
99 // Substitute for concurrence_error object in the case of -fno-exceptions.
100 inline void
101 __throw_concurrence_lock_error()
102 {
103 #if __EXCEPTIONS
104 throw __concurrence_lock_error();
105 #else
106 __builtin_abort();
107 #endif
108 }
109
110 inline void
111 __throw_concurrence_unlock_error()
112 {
113 #if __EXCEPTIONS
114 throw __concurrence_unlock_error();
115 #else
116 __builtin_abort();
117 #endif
118 }
119
120 #ifdef __GTHREAD_HAS_COND
121 inline void
122 __throw_concurrence_broadcast_error()
123 {
124 #if __EXCEPTIONS
125 throw __concurrence_broadcast_error();
126 #else
127 __builtin_abort();
128 #endif
129 }
130
131 inline void
132 __throw_concurrence_wait_error()
133 {
134 #if __EXCEPTIONS
135 throw __concurrence_wait_error();
136 #else
137 __builtin_abort();
138 #endif
139 }
140 #endif
141
142 class __mutex
143 {
144 private:
145 #if __GTHREADS && defined __GTHREAD_MUTEX_INIT
146 __gthread_mutex_t _M_mutex = __GTHREAD_MUTEX_INIT;
147 #else
148 __gthread_mutex_t _M_mutex;
149 #endif
150
151 __mutex(const __mutex&);
152 __mutex& operator=(const __mutex&);
153
154 public:
155 __mutex()
156 {
157 #if __GTHREADS && ! defined __GTHREAD_MUTEX_INIT
158 if (__gthread_active_p())
159 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
160 #endif
161 }
162
163 #if __GTHREADS && ! defined __GTHREAD_MUTEX_INIT
164 ~__mutex()
165 {
166 if (__gthread_active_p())
167 __gthread_mutex_destroy(&_M_mutex);
168 }
169 #endif
170
171 void lock()
172 {
173 #if __GTHREADS
174 if (__gthread_active_p())
175 {
176 if (__gthread_mutex_lock(&_M_mutex) != 0)
177 __throw_concurrence_lock_error();
178 }
179 #endif
180 }
181
182 void unlock()
183 {
184 #if __GTHREADS
185 if (__gthread_active_p())
186 {
187 if (__gthread_mutex_unlock(&_M_mutex) != 0)
188 __throw_concurrence_unlock_error();
189 }
190 #endif
191 }
192
193 __gthread_mutex_t* gthread_mutex(void)
194 { return &_M_mutex; }
195 };
196
197 class __recursive_mutex
198 {
199 private:
200 #if __GTHREADS && defined __GTHREAD_RECURSIVE_MUTEX_INIT
201 __gthread_recursive_mutex_t _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
202 #else
203 __gthread_recursive_mutex_t _M_mutex;
204 #endif
205
206 __recursive_mutex(const __recursive_mutex&);
207 __recursive_mutex& operator=(const __recursive_mutex&);
208
209 public:
210 __recursive_mutex()
211 {
212 #if __GTHREADS && ! defined __GTHREAD_RECURSIVE_MUTEX_INIT
213 if (__gthread_active_p())
214 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
215 #endif
216 }
217
218 #if __GTHREADS && ! defined __GTHREAD_RECURSIVE_MUTEX_INIT
219 ~__recursive_mutex()
220 {
221 if (__gthread_active_p())
222 _S_destroy(&_M_mutex);
223 }
224 #endif
225
226 void lock()
227 {
228 #if __GTHREADS
229 if (__gthread_active_p())
230 {
231 if (__gthread_recursive_mutex_lock(&_M_mutex) != 0)
232 __throw_concurrence_lock_error();
233 }
234 #endif
235 }
236
237 void unlock()
238 {
239 #if __GTHREADS
240 if (__gthread_active_p())
241 {
242 if (__gthread_recursive_mutex_unlock(&_M_mutex) != 0)
243 __throw_concurrence_unlock_error();
244 }
245 #endif
246 }
247
248 __gthread_recursive_mutex_t* gthread_recursive_mutex(void)
249 { return &_M_mutex; }
250
251 #if __GTHREADS && ! defined __GTHREAD_RECURSIVE_MUTEX_INIT
252 // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
253 // so we need to obtain a __gthread_mutex_t to destroy
254 private:
255 template<typename _Mx, typename _Rm>
256 static void
257 _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
258 {
259 __mx->counter = __rmx->counter;
260 __mx->sema = __rmx->sema;
261 __gthread_mutex_destroy(__mx);
262 }
263
264 // matches a gthr-win32.h recursive mutex
265 template<typename _Rm>
266 static typename __enable_if<(bool)sizeof(&_Rm::sema), void>::__type
267 _S_destroy(_Rm* __mx)
268 {
269 __gthread_mutex_t __tmp;
270 _S_destroy_win32(&__tmp, __mx);
271 }
272
273 // matches a recursive mutex with a member 'actual'
274 template<typename _Rm>
275 static typename __enable_if<(bool)sizeof(&_Rm::actual), void>::__type
276 _S_destroy(_Rm* __mx)
277 { __gthread_mutex_destroy(&__mx->actual); }
278
279 // matches when there's only one mutex type
280 template<typename _Rm>
281 static typename
282 __enable_if<std::__are_same<_Rm, __gthread_mutex_t>::__value,
283 void>::__type
284 _S_destroy(_Rm* __mx)
285 { __gthread_mutex_destroy(__mx); }
286 #endif
287 };
288
289 /// Scoped lock idiom.
290 // Acquire the mutex here with a constructor call, then release with
291 // the destructor call in accordance with RAII style.
292 class __scoped_lock
293 {
294 public:
295 typedef __mutex __mutex_type;
296
297 private:
298 __mutex_type& _M_device;
299
300 __scoped_lock(const __scoped_lock&);
301 __scoped_lock& operator=(const __scoped_lock&);
302
303 public:
304 explicit __scoped_lock(__mutex_type& __name) : _M_device(__name)
305 { _M_device.lock(); }
306
307 ~__scoped_lock() throw()
308 { _M_device.unlock(); }
309 };
310
311 #ifdef __GTHREAD_HAS_COND
312 class __cond
313 {
314 private:
315 #if __GTHREADS && defined __GTHREAD_COND_INIT
316 __gthread_cond_t _M_cond = __GTHREAD_COND_INIT;
317 #else
318 __gthread_cond_t _M_cond;
319 #endif
320
321 __cond(const __cond&);
322 __cond& operator=(const __cond&);
323
324 public:
325 __cond()
326 {
327 #if __GTHREADS && ! defined __GTHREAD_COND_INIT
328 if (__gthread_active_p())
329 __GTHREAD_COND_INIT_FUNCTION(&_M_cond);
330 #endif
331 }
332
333 #if __GTHREADS && ! defined __GTHREAD_COND_INIT
334 ~__cond()
335 {
336 if (__gthread_active_p())
337 __gthread_cond_destroy(&_M_cond);
338 }
339 #endif
340
341 void broadcast()
342 {
343 #if __GTHREADS
344 if (__gthread_active_p())
345 {
346 if (__gthread_cond_broadcast(&_M_cond) != 0)
347 __throw_concurrence_broadcast_error();
348 }
349 #endif
350 }
351
352 void wait(__mutex *mutex)
353 {
354 #if __GTHREADS
355 {
356 if (__gthread_cond_wait(&_M_cond, mutex->gthread_mutex()) != 0)
357 __throw_concurrence_wait_error();
358 }
359 #endif
360 }
361
362 void wait_recursive(__recursive_mutex *mutex)
363 {
364 #if __GTHREADS
365 {
366 if (__gthread_cond_wait_recursive(&_M_cond,
367 mutex->gthread_recursive_mutex())
368 != 0)
369 __throw_concurrence_wait_error();
370 }
371 #endif
372 }
373 };
374 #endif
375
376 _GLIBCXX_END_NAMESPACE_VERSION
377 } // namespace
378
379 #endif