]> git.ipfire.org Git - thirdparty/gcc.git/blob - libstdc++-v3/include/bits/stl_threads.h
stl_threads.h (_Atomic_swap): New function.
[thirdparty/gcc.git] / libstdc++-v3 / include / bits / stl_threads.h
1 // Threading support -*- C++ -*-
2
3 // Copyright (C) 2001 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
10
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
20
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
29
30 /*
31 * Copyright (c) 1997-1999
32 * Silicon Graphics Computer Systems, Inc.
33 *
34 * Permission to use, copy, modify, distribute and sell this software
35 * and its documentation for any purpose is hereby granted without fee,
36 * provided that the above copyright notice appear in all copies and
37 * that both that copyright notice and this permission notice appear
38 * in supporting documentation. Silicon Graphics makes no
39 * representations about the suitability of this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
41 */
42
43 // WARNING: This is an internal header file, included by other C++
44 // standard library headers. You should not attempt to use this header
45 // file directly.
46 // Stl_config.h should be included before this file.
47
48 #ifndef __SGI_STL_INTERNAL_THREADS_H
49 #define __SGI_STL_INTERNAL_THREADS_H
50
51 // Supported threading models are native SGI, pthreads, uithreads
52 // (similar to pthreads, but based on an earlier draft of the Posix
53 // threads standard), and Win32 threads. Uithread support by Jochen
54 // Schlick, 1999.
55
56 // GCC extension begin
57 // In order to present a stable threading configuration, in all cases,
58 // gcc looks for it's own abstraction layer before all others. All
59 // modifications to this file are marked to allow easier importation of
60 // STL upgrades.
61 #if defined(__STL_GTHREADS)
62 #include "bits/gthr.h"
63 #else
64 // GCC extension end
65 #if defined(__STL_SGI_THREADS)
66 #include <mutex.h>
67 #include <time.h>
68 #elif defined(__STL_PTHREADS)
69 #include <pthread.h>
70 #elif defined(__STL_UITHREADS)
71 #include <thread.h>
72 #include <synch.h>
73 #elif defined(__STL_WIN32THREADS)
74 #include <windows.h>
75 #endif
76 // GCC extension begin
77 #endif
78 // GCC extension end
79
80 namespace std
81 {
82
83 // Class _Refcount_Base provides a type, _RC_t, a data member,
84 // _M_ref_count, and member functions _M_incr and _M_decr, which perform
85 // atomic preincrement/predecrement. The constructor initializes
86 // _M_ref_count.
87
88 // Hack for SGI o32 compilers.
89 #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
90 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
91 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
92 # define __test_and_set(__l,__v) test_and_set(__l,__v)
93 #endif /* o32 */
94
95 struct _Refcount_Base
96 {
97 // The type _RC_t
98 # ifdef __STL_WIN32THREADS
99 typedef long _RC_t;
100 # else
101 typedef size_t _RC_t;
102 #endif
103
104 // The data member _M_ref_count
105 volatile _RC_t _M_ref_count;
106
107 // Constructor
108 // GCC extension begin
109 #ifdef __STL_GTHREADS
110 __gthread_mutex_t _M_ref_count_lock;
111 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
112 {
113 #ifdef __GTHREAD_MUTEX_INIT
114 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
115 _M_ref_count_lock = __tmp;
116 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
117 __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock);
118 #else
119 #error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.
120 #endif
121 }
122 #else
123 // GCC extension end
124 # ifdef __STL_PTHREADS
125 pthread_mutex_t _M_ref_count_lock;
126 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
127 { pthread_mutex_init(&_M_ref_count_lock, 0); }
128 # elif defined(__STL_UITHREADS)
129 mutex_t _M_ref_count_lock;
130 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
131 { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }
132 # else
133 _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}
134 # endif
135 // GCC extension begin
136 #endif
137 // GCC extension end
138
139 // GCC extension begin
140 #ifdef __STL_GTHREADS
141 void _M_incr() {
142 __gthread_mutex_lock(&_M_ref_count_lock);
143 ++_M_ref_count;
144 __gthread_mutex_unlock(&_M_ref_count_lock);
145 }
146 _RC_t _M_decr() {
147 __gthread_mutex_lock(&_M_ref_count_lock);
148 volatile _RC_t __tmp = --_M_ref_count;
149 __gthread_mutex_unlock(&_M_ref_count_lock);
150 return __tmp;
151 }
152 #else
153 // GCC extension end
154 // _M_incr and _M_decr
155 # ifdef __STL_SGI_THREADS
156 void _M_incr() { __add_and_fetch(&_M_ref_count, 1); }
157 _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }
158 # elif defined (__STL_WIN32THREADS)
159 void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }
160 _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }
161 # elif defined(__STL_PTHREADS)
162 void _M_incr() {
163 pthread_mutex_lock(&_M_ref_count_lock);
164 ++_M_ref_count;
165 pthread_mutex_unlock(&_M_ref_count_lock);
166 }
167 _RC_t _M_decr() {
168 pthread_mutex_lock(&_M_ref_count_lock);
169 volatile _RC_t __tmp = --_M_ref_count;
170 pthread_mutex_unlock(&_M_ref_count_lock);
171 return __tmp;
172 }
173 # elif defined(__STL_UITHREADS)
174 void _M_incr() {
175 mutex_lock(&_M_ref_count_lock);
176 ++_M_ref_count;
177 mutex_unlock(&_M_ref_count_lock);
178 }
179 _RC_t _M_decr() {
180 mutex_lock(&_M_ref_count_lock);
181 /*volatile*/ _RC_t __tmp = --_M_ref_count;
182 mutex_unlock(&_M_ref_count_lock);
183 return __tmp;
184 }
185 # else /* No threads */
186 void _M_incr() { ++_M_ref_count; }
187 _RC_t _M_decr() { return --_M_ref_count; }
188 # endif
189 // GCC extension begin
190 #endif
191 // GCC extension end
192 };
193
194 // Atomic swap on unsigned long
195 // This is guaranteed to behave as though it were atomic only if all
196 // possibly concurrent updates use _Atomic_swap.
197 // In some cases the operation is emulated with a lock.
198 // GCC extension begin
199 #ifdef __STL_GTHREADS
200 // We don't provide an _Atomic_swap in this configuration. This only
201 // affects the use of ext/rope with threads. Someone could add this
202 // later, if required. You can start by cloning the __STL_PTHREADS
203 // path while making the obvious changes. Later it could be optimized
204 // to use the atomicity.h abstraction layer from libstdc++-v3.
205 // vyzo: simple _Atomic_swap implementation following the guidelines above
206 // We use a template here only to get a unique initialized instance.
207 template<int __dummy>
208 struct _Swap_lock_struct {
209 static __gthread_mutex_t _S_swap_lock;
210 };
211
212 template<int __dummy>
213 __gthread_mutex_t
214 _Swap_lock_struct<__dummy>::_S_swap_lock = __GTHREAD_MUTEX_INIT;
215
216 // This should be portable, but performance is expected
217 // to be quite awful. This really needs platform specific
218 // code.
219 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
220 __gthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
221 unsigned long __result = *__p;
222 *__p = __q;
223 __gthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
224 return __result;
225 }
226
227 #else
228 // GCC extension end
229 # ifdef __STL_SGI_THREADS
230 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
231 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
232 return test_and_set(__p, __q);
233 # else
234 return __test_and_set(__p, (unsigned long)__q);
235 # endif
236 }
237 # elif defined(__STL_WIN32THREADS)
238 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
239 return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);
240 }
241 # elif defined(__STL_PTHREADS)
242 // We use a template here only to get a unique initialized instance.
243 template<int __dummy>
244 struct _Swap_lock_struct {
245 static pthread_mutex_t _S_swap_lock;
246 };
247
248 template<int __dummy>
249 pthread_mutex_t
250 _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;
251
252 // This should be portable, but performance is expected
253 // to be quite awful. This really needs platform specific
254 // code.
255 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
256 pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
257 unsigned long __result = *__p;
258 *__p = __q;
259 pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
260 return __result;
261 }
262 # elif defined(__STL_UITHREADS)
263 // We use a template here only to get a unique initialized instance.
264 template<int __dummy>
265 struct _Swap_lock_struct {
266 static mutex_t _S_swap_lock;
267 };
268
269 template<int __dummy>
270 mutex_t
271 _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
272
273 // This should be portable, but performance is expected
274 // to be quite awful. This really needs platform specific
275 // code.
276 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
277 mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
278 unsigned long __result = *__p;
279 *__p = __q;
280 mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
281 return __result;
282 }
283 # elif defined (__STL_SOLARIS_THREADS)
284 // any better solutions ?
285 // We use a template here only to get a unique initialized instance.
286 template<int __dummy>
287 struct _Swap_lock_struct {
288 static mutex_t _S_swap_lock;
289 };
290
291 # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
292 template<int __dummy>
293 mutex_t
294 _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
295 # else
296 __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock,
297 =DEFAULTMUTEX);
298 # endif /* ( __STL_STATIC_TEMPLATE_DATA > 0 ) */
299
300 // This should be portable, but performance is expected
301 // to be quite awful. This really needs platform specific
302 // code.
303 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
304 mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
305 unsigned long __result = *__p;
306 *__p = __q;
307 mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
308 return __result;
309 }
310 # else
311 static inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
312 unsigned long __result = *__p;
313 *__p = __q;
314 return __result;
315 }
316 # endif
317 // GCC extension begin
318 #endif
319 // GCC extension end
320
321 // Locking class. Note that this class *does not have a constructor*.
322 // It must be initialized either statically, with __STL_MUTEX_INITIALIZER,
323 // or dynamically, by explicitly calling the _M_initialize member function.
324 // (This is similar to the ways that a pthreads mutex can be initialized.)
325 // There are explicit member functions for acquiring and releasing the lock.
326
327 // There is no constructor because static initialization is essential for
328 // some uses, and only a class aggregate (see section 8.5.1 of the C++
329 // standard) can be initialized that way. That means we must have no
330 // constructors, no base classes, no virtual functions, and no private or
331 // protected members.
332
333 // Helper struct. This is a workaround for various compilers that don't
334 // handle static variables in inline functions properly.
335 template <int __inst>
336 struct _STL_mutex_spin {
337 enum { __low_max = 30, __high_max = 1000 };
338 // Low if we suspect uniprocessor, high for multiprocessor.
339
340 static unsigned __max;
341 static unsigned __last;
342 };
343
344 template <int __inst>
345 unsigned _STL_mutex_spin<__inst>::__max = _STL_mutex_spin<__inst>::__low_max;
346
347 template <int __inst>
348 unsigned _STL_mutex_spin<__inst>::__last = 0;
349
350 // GCC extension begin
351 #if defined(__STL_GTHREADS)
352 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
353 extern __gthread_mutex_t _GLIBCPP_mutex;
354 extern __gthread_mutex_t *_GLIBCPP_mutex_address;
355 extern __gthread_once_t _GLIBCPP_once;
356 extern void _GLIBCPP_mutex_init (void);
357 extern void _GLIBCPP_mutex_address_init (void);
358 #endif
359 #endif
360 // GCC extension end
361
362 struct _STL_mutex_lock
363 {
364 // GCC extension begin
365 #if defined(__STL_GTHREADS)
366 // The class must be statically initialized with __STL_MUTEX_INITIALIZER.
367 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
368 volatile int _M_init_flag;
369 __gthread_once_t _M_once;
370 #endif
371 __gthread_mutex_t _M_lock;
372 void _M_initialize() {
373 #ifdef __GTHREAD_MUTEX_INIT
374 // There should be no code in this path given the usage rules above.
375 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
376 if (_M_init_flag) return;
377 if (__gthread_once (&_GLIBCPP_once, _GLIBCPP_mutex_init) != 0
378 && __gthread_active_p ())
379 abort ();
380 __gthread_mutex_lock (&_GLIBCPP_mutex);
381 if (!_M_init_flag) {
382 // Even though we have a global lock, we use __gthread_once to be
383 // absolutely certain the _M_lock mutex is only initialized once on
384 // multiprocessor systems.
385 _GLIBCPP_mutex_address = &_M_lock;
386 if (__gthread_once (&_M_once, _GLIBCPP_mutex_address_init) != 0
387 && __gthread_active_p ())
388 abort ();
389 _M_init_flag = 1;
390 }
391 __gthread_mutex_unlock (&_GLIBCPP_mutex);
392 #endif
393 }
394 void _M_acquire_lock() {
395 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
396 if (!_M_init_flag) _M_initialize();
397 #endif
398 __gthread_mutex_lock(&_M_lock);
399 }
400 void _M_release_lock() {
401 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
402 if (!_M_init_flag) _M_initialize();
403 #endif
404 __gthread_mutex_unlock(&_M_lock);
405 }
406 #else
407 // GCC extension end
408 #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
409 // It should be relatively easy to get this to work on any modern Unix.
410 volatile unsigned long _M_lock;
411 void _M_initialize() { _M_lock = 0; }
412 static void _S_nsec_sleep(int __log_nsec) {
413 # ifdef __STL_SGI_THREADS
414 struct timespec __ts;
415 /* Max sleep is 2**27nsec ~ 60msec */
416 __ts.tv_sec = 0;
417 __ts.tv_nsec = 1L << __log_nsec;
418 nanosleep(&__ts, 0);
419 # elif defined(__STL_WIN32THREADS)
420 if (__log_nsec <= 20) {
421 Sleep(0);
422 } else {
423 Sleep(1 << (__log_nsec - 20));
424 }
425 # else
426 # error unimplemented
427 # endif
428 }
429 void _M_acquire_lock() {
430 volatile unsigned long* __lock = &this->_M_lock;
431
432 if (!_Atomic_swap((unsigned long*)__lock, 1)) {
433 return;
434 }
435 unsigned __my_spin_max = _STL_mutex_spin<0>::__max;
436 unsigned __my_last_spins = _STL_mutex_spin<0>::__last;
437 volatile unsigned __junk = 17; // Value doesn't matter.
438 unsigned __i;
439 for (__i = 0; __i < __my_spin_max; __i++) {
440 if (__i < __my_last_spins/2 || *__lock) {
441 __junk *= __junk; __junk *= __junk;
442 __junk *= __junk; __junk *= __junk;
443 continue;
444 }
445 if (!_Atomic_swap((unsigned long*)__lock, 1)) {
446 // got it!
447 // Spinning worked. Thus we're probably not being scheduled
448 // against the other process with which we were contending.
449 // Thus it makes sense to spin longer the next time.
450 _STL_mutex_spin<0>::__last = __i;
451 _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__high_max;
452 return;
453 }
454 }
455 // We are probably being scheduled against the other process. Sleep.
456 _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__low_max;
457 for (__i = 0 ;; ++__i) {
458 int __log_nsec = __i + 6;
459
460 if (__log_nsec > 27) __log_nsec = 27;
461 if (!_Atomic_swap((unsigned long *)__lock, 1)) {
462 return;
463 }
464 _S_nsec_sleep(__log_nsec);
465 }
466 }
467 void _M_release_lock() {
468 volatile unsigned long* __lock = &_M_lock;
469 # if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
470 asm("sync");
471 *__lock = 0;
472 # elif defined(__STL_SGI_THREADS) && __mips >= 3 \
473 && (defined (_ABIN32) || defined(_ABI64))
474 __lock_release(__lock);
475 # else
476 *__lock = 0;
477 // This is not sufficient on many multiprocessors, since
478 // writes to protected variables and the lock may be reordered.
479 # endif
480 }
481
482 // We no longer use win32 critical sections.
483 // They appear to be slower in the contention-free case,
484 // and they appear difficult to initialize without introducing a race.
485
486 #elif defined(__STL_PTHREADS)
487 pthread_mutex_t _M_lock;
488 void _M_initialize() { pthread_mutex_init(&_M_lock, NULL); }
489 void _M_acquire_lock() { pthread_mutex_lock(&_M_lock); }
490 void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
491 #elif defined(__STL_UITHREADS)
492 mutex_t _M_lock;
493 void _M_initialize() { mutex_init(&_M_lock, USYNC_THREAD, 0); }
494 void _M_acquire_lock() { mutex_lock(&_M_lock); }
495 void _M_release_lock() { mutex_unlock(&_M_lock); }
496 #else /* No threads */
497 void _M_initialize() {}
498 void _M_acquire_lock() {}
499 void _M_release_lock() {}
500 #endif
501 // GCC extension begin
502 #endif
503 // GCC extension end
504 };
505
506 // GCC extension begin
507 #if defined(__STL_GTHREADS)
508 #ifdef __GTHREAD_MUTEX_INIT
509 #define __STL_MUTEX_INITIALIZER = { __GTHREAD_MUTEX_INIT }
510 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
511 #ifdef __GTHREAD_MUTEX_INIT_DEFAULT
512 #define __STL_MUTEX_INITIALIZER \
513 = { 0, __GTHREAD_ONCE_INIT, __GTHREAD_MUTEX_INIT_DEFAULT }
514 #else
515 #define __STL_MUTEX_INITIALIZER = { 0, __GTHREAD_ONCE_INIT }
516 #endif
517 #endif
518 #else
519 // GCC extension end
520 #ifdef __STL_PTHREADS
521 // Pthreads locks must be statically initialized to something other than
522 // the default value of zero.
523 # define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
524 #elif defined(__STL_UITHREADS)
525 // UIthreads locks must be statically initialized to something other than
526 // the default value of zero.
527 # define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
528 #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
529 # define __STL_MUTEX_INITIALIZER = { 0 }
530 #else
531 # define __STL_MUTEX_INITIALIZER
532 #endif
533 // GCC extension begin
534 #endif
535 // GCC extension end
536
537
538 // A locking class that uses _STL_mutex_lock. The constructor takes a
539 // reference to an _STL_mutex_lock, and acquires a lock. The
540 // destructor releases the lock. It's not clear that this is exactly
541 // the right functionality. It will probably change in the future.
542
543 struct _STL_auto_lock
544 {
545 _STL_mutex_lock& _M_lock;
546
547 _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
548 { _M_lock._M_acquire_lock(); }
549 ~_STL_auto_lock() { _M_lock._M_release_lock(); }
550
551 private:
552 void operator=(const _STL_auto_lock&);
553 _STL_auto_lock(const _STL_auto_lock&);
554 };
555
556 } // namespace std
557
558 #endif /* __SGI_STL_INTERNAL_THREADS_H */
559
560 // Local Variables:
561 // mode:C++
562 // End:
563