]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/ext/concurrence.h
PR libstdc++/36104 part four
[thirdparty/gcc.git] / libstdc++-v3 / include / ext / concurrence.h
1 // Support for concurrent programing -*- C++ -*-
2
3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25
26 /** @file ext/concurrence.h
27 * This file is a GNU extension to the Standard C++ Library.
28 */
29
30 #ifndef _CONCURRENCE_H
31 #define _CONCURRENCE_H 1
32
33 #pragma GCC system_header
34
35 #include <exception>
36 #include <bits/gthr.h>
37 #include <bits/functexcept.h>
38 #include <bits/cpp_type_traits.h>
39 #include <ext/type_traits.h>
40
41 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
42 {
43 _GLIBCXX_BEGIN_NAMESPACE_VERSION
44
45 // Available locking policies:
46 // _S_single single-threaded code that doesn't need to be locked.
47 // _S_mutex multi-threaded code that requires additional support
48 // from gthr.h or abstraction layers in concurrence.h.
49 // _S_atomic multi-threaded code using atomic operations.
50 enum _Lock_policy { _S_single, _S_mutex, _S_atomic };
51
52 // Compile time constant that indicates prefered locking policy in
53 // the current configuration.
54 static const _Lock_policy __default_lock_policy =
55 #ifdef __GTHREADS
56 #if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) \
57 && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4))
58 _S_atomic;
59 #else
60 _S_mutex;
61 #endif
62 #else
63 _S_single;
64 #endif
65
66 // NB: As this is used in libsupc++, need to only depend on
67 // exception. No stdexception classes, no use of std::string.
68 class __concurrence_lock_error : public std::exception
69 {
70 public:
71 virtual char const*
72 what() const throw()
73 { return "__gnu_cxx::__concurrence_lock_error"; }
74 };
75
76 class __concurrence_unlock_error : public std::exception
77 {
78 public:
79 virtual char const*
80 what() const throw()
81 { return "__gnu_cxx::__concurrence_unlock_error"; }
82 };
83
84 class __concurrence_broadcast_error : public std::exception
85 {
86 public:
87 virtual char const*
88 what() const throw()
89 { return "__gnu_cxx::__concurrence_broadcast_error"; }
90 };
91
92 class __concurrence_wait_error : public std::exception
93 {
94 public:
95 virtual char const*
96 what() const throw()
97 { return "__gnu_cxx::__concurrence_wait_error"; }
98 };
99
100 // Substitute for concurrence_error object in the case of -fno-exceptions.
101 inline void
102 __throw_concurrence_lock_error()
103 {
104 #if __EXCEPTIONS
105 throw __concurrence_lock_error();
106 #else
107 __builtin_abort();
108 #endif
109 }
110
111 inline void
112 __throw_concurrence_unlock_error()
113 {
114 #if __EXCEPTIONS
115 throw __concurrence_unlock_error();
116 #else
117 __builtin_abort();
118 #endif
119 }
120
121 #ifdef __GTHREAD_HAS_COND
122 inline void
123 __throw_concurrence_broadcast_error()
124 {
125 #if __EXCEPTIONS
126 throw __concurrence_broadcast_error();
127 #else
128 __builtin_abort();
129 #endif
130 }
131
132 inline void
133 __throw_concurrence_wait_error()
134 {
135 #if __EXCEPTIONS
136 throw __concurrence_wait_error();
137 #else
138 __builtin_abort();
139 #endif
140 }
141 #endif
142
143 class __mutex
144 {
145 private:
146 __gthread_mutex_t _M_mutex;
147
148 __mutex(const __mutex&);
149 __mutex& operator=(const __mutex&);
150
151 public:
152 __mutex()
153 {
154 #if __GTHREADS
155 if (__gthread_active_p())
156 {
157 #if defined __GTHREAD_MUTEX_INIT
158 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
159 _M_mutex = __tmp;
160 #else
161 __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
162 #endif
163 }
164 #endif
165 }
166
167 #if __GTHREADS && ! defined __GTHREAD_MUTEX_INIT
168 ~__mutex()
169 {
170 if (__gthread_active_p())
171 __gthread_mutex_destroy(&_M_mutex);
172 }
173 #endif
174
175 void lock()
176 {
177 #if __GTHREADS
178 if (__gthread_active_p())
179 {
180 if (__gthread_mutex_lock(&_M_mutex) != 0)
181 __throw_concurrence_lock_error();
182 }
183 #endif
184 }
185
186 void unlock()
187 {
188 #if __GTHREADS
189 if (__gthread_active_p())
190 {
191 if (__gthread_mutex_unlock(&_M_mutex) != 0)
192 __throw_concurrence_unlock_error();
193 }
194 #endif
195 }
196
197 __gthread_mutex_t* gthread_mutex(void)
198 { return &_M_mutex; }
199 };
200
201 class __recursive_mutex
202 {
203 private:
204 __gthread_recursive_mutex_t _M_mutex;
205
206 __recursive_mutex(const __recursive_mutex&);
207 __recursive_mutex& operator=(const __recursive_mutex&);
208
209 public:
210 __recursive_mutex()
211 {
212 #if __GTHREADS
213 if (__gthread_active_p())
214 {
215 #if defined __GTHREAD_RECURSIVE_MUTEX_INIT
216 __gthread_recursive_mutex_t __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
217 _M_mutex = __tmp;
218 #else
219 __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
220 #endif
221 }
222 #endif
223 }
224
225 #if __GTHREADS && ! defined __GTHREAD_RECURSIVE_MUTEX_INIT
226 ~__recursive_mutex()
227 {
228 if (__gthread_active_p())
229 _S_destroy(&_M_mutex);
230 }
231 #endif
232
233 void lock()
234 {
235 #if __GTHREADS
236 if (__gthread_active_p())
237 {
238 if (__gthread_recursive_mutex_lock(&_M_mutex) != 0)
239 __throw_concurrence_lock_error();
240 }
241 #endif
242 }
243
244 void unlock()
245 {
246 #if __GTHREADS
247 if (__gthread_active_p())
248 {
249 if (__gthread_recursive_mutex_unlock(&_M_mutex) != 0)
250 __throw_concurrence_unlock_error();
251 }
252 #endif
253 }
254
255 __gthread_recursive_mutex_t* gthread_recursive_mutex(void)
256 { return &_M_mutex; }
257
258 #if __GTHREADS && ! defined __GTHREAD_RECURSIVE_MUTEX_INIT
259 // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
260 // so we need to obtain a __gthread_mutex_t to destroy
261 private:
262 template<typename _Mx, typename _Rm>
263 static void
264 _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
265 {
266 __mx->counter = __rmx->counter;
267 __mx->sema = __rmx->sema;
268 __gthread_mutex_destroy(__mx);
269 }
270
271 // matches a gthr-win32.h recursive mutex
272 template<typename _Rm>
273 static typename __enable_if<sizeof(&_Rm::sema), void>::__type
274 _S_destroy(_Rm* __mx)
275 {
276 __gthread_mutex_t __tmp;
277 _S_destroy_win32(&__tmp, __mx);
278 }
279
280 // matches a recursive mutex with a member 'actual'
281 template<typename _Rm>
282 static typename __enable_if<sizeof(&_Rm::actual), void>::__type
283 _S_destroy(_Rm* __mx)
284 { __gthread_mutex_destroy(&__mx->actual); }
285
286 // matches when there's only one mutex type
287 template<typename _Rm>
288 static typename
289 __enable_if<std::__are_same<_Rm, __gthread_mutex_t>::__value,
290 void>::__type
291 _S_destroy(_Rm* __mx)
292 { __gthread_mutex_destroy(__mx); }
293 #endif
294 };
295
296 /// Scoped lock idiom.
297 // Acquire the mutex here with a constructor call, then release with
298 // the destructor call in accordance with RAII style.
299 class __scoped_lock
300 {
301 public:
302 typedef __mutex __mutex_type;
303
304 private:
305 __mutex_type& _M_device;
306
307 __scoped_lock(const __scoped_lock&);
308 __scoped_lock& operator=(const __scoped_lock&);
309
310 public:
311 explicit __scoped_lock(__mutex_type& __name) : _M_device(__name)
312 { _M_device.lock(); }
313
314 ~__scoped_lock() throw()
315 { _M_device.unlock(); }
316 };
317
318 #ifdef __GTHREAD_HAS_COND
319 class __cond
320 {
321 private:
322 __gthread_cond_t _M_cond;
323
324 __cond(const __cond&);
325 __cond& operator=(const __cond&);
326
327 public:
328 __cond()
329 {
330 #if __GTHREADS
331 if (__gthread_active_p())
332 {
333 #if defined __GTHREAD_COND_INIT
334 __gthread_cond_t __tmp = __GTHREAD_COND_INIT;
335 _M_cond = __tmp;
336 #else
337 __GTHREAD_COND_INIT_FUNCTION(&_M_cond);
338 #endif
339 }
340 #endif
341 }
342
343 #if __GTHREADS && ! defined __GTHREAD_COND_INIT
344 ~__cond()
345 {
346 if (__gthread_active_p())
347 __gthread_cond_destroy(&_M_cond);
348 }
349 #endif
350
351 void broadcast()
352 {
353 #if __GTHREADS
354 if (__gthread_active_p())
355 {
356 if (__gthread_cond_broadcast(&_M_cond) != 0)
357 __throw_concurrence_broadcast_error();
358 }
359 #endif
360 }
361
362 void wait(__mutex *mutex)
363 {
364 #if __GTHREADS
365 {
366 if (__gthread_cond_wait(&_M_cond, mutex->gthread_mutex()) != 0)
367 __throw_concurrence_wait_error();
368 }
369 #endif
370 }
371
372 void wait_recursive(__recursive_mutex *mutex)
373 {
374 #if __GTHREADS
375 {
376 if (__gthread_cond_wait_recursive(&_M_cond,
377 mutex->gthread_recursive_mutex())
378 != 0)
379 __throw_concurrence_wait_error();
380 }
381 #endif
382 }
383 };
384 #endif
385
386 _GLIBCXX_END_NAMESPACE_VERSION
387 } // namespace
388
389 #endif