1 // Threading support -*- C++ -*-
3 // Copyright (C) 2001 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 * Copyright (c) 1997-1999
32 * Silicon Graphics Computer Systems, Inc.
34 * Permission to use, copy, modify, distribute and sell this software
35 * and its documentation for any purpose is hereby granted without fee,
36 * provided that the above copyright notice appear in all copies and
37 * that both that copyright notice and this permission notice appear
38 * in supporting documentation. Silicon Graphics makes no
39 * representations about the suitability of this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
43 // WARNING: This is an internal header file, included by other C++
44 // standard library headers. You should not attempt to use this header
46 // Stl_config.h should be included before this file.
48 #ifndef __SGI_STL_INTERNAL_THREADS_H
49 #define __SGI_STL_INTERNAL_THREADS_H
51 // Supported threading models are native SGI, pthreads, uithreads
52 // (similar to pthreads, but based on an earlier draft of the Posix
53 // threads standard), and Win32 threads. Uithread support by Jochen
56 // GCC extension begin
57 // In order to present a stable threading configuration, in all cases,
58 // gcc looks for it's own abstraction layer before all others. All
59 // modifications to this file are marked to allow easier importation of
61 #if defined(__STL_GTHREADS)
62 #include "bits/gthr.h"
65 #if defined(__STL_SGI_THREADS)
68 #elif defined(__STL_PTHREADS)
70 #elif defined(__STL_UITHREADS)
73 #elif defined(__STL_WIN32THREADS)
76 // GCC extension begin
83 // Class _Refcount_Base provides a type, _RC_t, a data member,
84 // _M_ref_count, and member functions _M_incr and _M_decr, which perform
85 // atomic preincrement/predecrement. The constructor initializes
88 // Hack for SGI o32 compilers.
89 #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
90 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
91 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
92 # define __test_and_set(__l,__v) test_and_set(__l,__v)
98 # ifdef __STL_WIN32THREADS
101 typedef size_t _RC_t
;
104 // The data member _M_ref_count
105 volatile _RC_t _M_ref_count
;
108 // GCC extension begin
109 #ifdef __STL_GTHREADS
110 __gthread_mutex_t _M_ref_count_lock
;
111 _Refcount_Base(_RC_t __n
) : _M_ref_count(__n
)
113 #ifdef __GTHREAD_MUTEX_INIT
114 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
115 _M_ref_count_lock
= __tmp
;
116 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
117 __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock
);
119 #error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.
124 # ifdef __STL_PTHREADS
125 pthread_mutex_t _M_ref_count_lock
;
126 _Refcount_Base(_RC_t __n
) : _M_ref_count(__n
)
127 { pthread_mutex_init(&_M_ref_count_lock
, 0); }
128 # elif defined(__STL_UITHREADS)
129 mutex_t _M_ref_count_lock
;
130 _Refcount_Base(_RC_t __n
) : _M_ref_count(__n
)
131 { mutex_init(&_M_ref_count_lock
, USYNC_THREAD
, 0); }
133 _Refcount_Base(_RC_t __n
) : _M_ref_count(__n
) {}
135 // GCC extension begin
139 // GCC extension begin
140 #ifdef __STL_GTHREADS
142 __gthread_mutex_lock(&_M_ref_count_lock
);
144 __gthread_mutex_unlock(&_M_ref_count_lock
);
147 __gthread_mutex_lock(&_M_ref_count_lock
);
148 volatile _RC_t __tmp
= --_M_ref_count
;
149 __gthread_mutex_unlock(&_M_ref_count_lock
);
154 // _M_incr and _M_decr
155 # ifdef __STL_SGI_THREADS
156 void _M_incr() { __add_and_fetch(&_M_ref_count
, 1); }
157 _RC_t
_M_decr() { return __add_and_fetch(&_M_ref_count
, (size_t) -1); }
158 # elif defined (__STL_WIN32THREADS)
159 void _M_incr() { InterlockedIncrement((_RC_t
*)&_M_ref_count
); }
160 _RC_t
_M_decr() { return InterlockedDecrement((_RC_t
*)&_M_ref_count
); }
161 # elif defined(__STL_PTHREADS)
163 pthread_mutex_lock(&_M_ref_count_lock
);
165 pthread_mutex_unlock(&_M_ref_count_lock
);
168 pthread_mutex_lock(&_M_ref_count_lock
);
169 volatile _RC_t __tmp
= --_M_ref_count
;
170 pthread_mutex_unlock(&_M_ref_count_lock
);
173 # elif defined(__STL_UITHREADS)
175 mutex_lock(&_M_ref_count_lock
);
177 mutex_unlock(&_M_ref_count_lock
);
180 mutex_lock(&_M_ref_count_lock
);
181 /*volatile*/ _RC_t __tmp
= --_M_ref_count
;
182 mutex_unlock(&_M_ref_count_lock
);
185 # else /* No threads */
186 void _M_incr() { ++_M_ref_count
; }
187 _RC_t
_M_decr() { return --_M_ref_count
; }
189 // GCC extension begin
194 // Atomic swap on unsigned long
195 // This is guaranteed to behave as though it were atomic only if all
196 // possibly concurrent updates use _Atomic_swap.
197 // In some cases the operation is emulated with a lock.
198 // GCC extension begin
199 #ifdef __STL_GTHREADS
200 // We don't provide an _Atomic_swap in this configuration. This only
201 // affects the use of ext/rope with threads. Someone could add this
202 // later, if required. You can start by cloning the __STL_PTHREADS
203 // path while making the obvious changes. Later it could be optimized
204 // to use the atomicity.h abstraction layer from libstdc++-v3.
205 // vyzo: simple _Atomic_swap implementation following the guidelines above
206 // We use a template here only to get a unique initialized instance.
207 template<int __dummy
>
208 struct _Swap_lock_struct
{
209 static __gthread_mutex_t _S_swap_lock
;
212 template<int __dummy
>
214 _Swap_lock_struct
<__dummy
>::_S_swap_lock
= __GTHREAD_MUTEX_INIT
;
216 // This should be portable, but performance is expected
217 // to be quite awful. This really needs platform specific
219 inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
220 __gthread_mutex_lock(&_Swap_lock_struct
<0>::_S_swap_lock
);
221 unsigned long __result
= *__p
;
223 __gthread_mutex_unlock(&_Swap_lock_struct
<0>::_S_swap_lock
);
229 # ifdef __STL_SGI_THREADS
230 inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
231 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
232 return test_and_set(__p
, __q
);
234 return __test_and_set(__p
, (unsigned long)__q
);
237 # elif defined(__STL_WIN32THREADS)
238 inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
239 return (unsigned long) InterlockedExchange((LPLONG
)__p
, (LONG
)__q
);
241 # elif defined(__STL_PTHREADS)
242 // We use a template here only to get a unique initialized instance.
243 template<int __dummy
>
244 struct _Swap_lock_struct
{
245 static pthread_mutex_t _S_swap_lock
;
248 template<int __dummy
>
250 _Swap_lock_struct
<__dummy
>::_S_swap_lock
= PTHREAD_MUTEX_INITIALIZER
;
252 // This should be portable, but performance is expected
253 // to be quite awful. This really needs platform specific
255 inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
256 pthread_mutex_lock(&_Swap_lock_struct
<0>::_S_swap_lock
);
257 unsigned long __result
= *__p
;
259 pthread_mutex_unlock(&_Swap_lock_struct
<0>::_S_swap_lock
);
262 # elif defined(__STL_UITHREADS)
263 // We use a template here only to get a unique initialized instance.
264 template<int __dummy
>
265 struct _Swap_lock_struct
{
266 static mutex_t _S_swap_lock
;
269 template<int __dummy
>
271 _Swap_lock_struct
<__dummy
>::_S_swap_lock
= DEFAULTMUTEX
;
273 // This should be portable, but performance is expected
274 // to be quite awful. This really needs platform specific
276 inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
277 mutex_lock(&_Swap_lock_struct
<0>::_S_swap_lock
);
278 unsigned long __result
= *__p
;
280 mutex_unlock(&_Swap_lock_struct
<0>::_S_swap_lock
);
283 # elif defined (__STL_SOLARIS_THREADS)
284 // any better solutions ?
285 // We use a template here only to get a unique initialized instance.
286 template<int __dummy
>
287 struct _Swap_lock_struct
{
288 static mutex_t _S_swap_lock
;
291 # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
292 template<int __dummy
>
294 _Swap_lock_struct
<__dummy
>::_S_swap_lock
= DEFAULTMUTEX
;
296 __DECLARE_INSTANCE(mutex_t
, _Swap_lock_struct
<__dummy
>::_S_swap_lock
,
298 # endif /* ( __STL_STATIC_TEMPLATE_DATA > 0 ) */
300 // This should be portable, but performance is expected
301 // to be quite awful. This really needs platform specific
303 inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
304 mutex_lock(&_Swap_lock_struct
<0>::_S_swap_lock
);
305 unsigned long __result
= *__p
;
307 mutex_unlock(&_Swap_lock_struct
<0>::_S_swap_lock
);
311 static inline unsigned long _Atomic_swap(unsigned long * __p
, unsigned long __q
) {
312 unsigned long __result
= *__p
;
317 // GCC extension begin
321 // Locking class. Note that this class *does not have a constructor*.
322 // It must be initialized either statically, with __STL_MUTEX_INITIALIZER,
323 // or dynamically, by explicitly calling the _M_initialize member function.
324 // (This is similar to the ways that a pthreads mutex can be initialized.)
325 // There are explicit member functions for acquiring and releasing the lock.
327 // There is no constructor because static initialization is essential for
328 // some uses, and only a class aggregate (see section 8.5.1 of the C++
329 // standard) can be initialized that way. That means we must have no
330 // constructors, no base classes, no virtual functions, and no private or
331 // protected members.
333 // Helper struct. This is a workaround for various compilers that don't
334 // handle static variables in inline functions properly.
335 template <int __inst
>
336 struct _STL_mutex_spin
{
337 enum { __low_max
= 30, __high_max
= 1000 };
338 // Low if we suspect uniprocessor, high for multiprocessor.
340 static unsigned __max
;
341 static unsigned __last
;
344 template <int __inst
>
345 unsigned _STL_mutex_spin
<__inst
>::__max
= _STL_mutex_spin
<__inst
>::__low_max
;
347 template <int __inst
>
348 unsigned _STL_mutex_spin
<__inst
>::__last
= 0;
350 // GCC extension begin
351 #if defined(__STL_GTHREADS)
352 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
353 extern __gthread_mutex_t _GLIBCPP_mutex
;
354 extern __gthread_mutex_t
*_GLIBCPP_mutex_address
;
355 extern __gthread_once_t _GLIBCPP_once
;
356 extern void _GLIBCPP_mutex_init (void);
357 extern void _GLIBCPP_mutex_address_init (void);
362 struct _STL_mutex_lock
364 // GCC extension begin
365 #if defined(__STL_GTHREADS)
366 // The class must be statically initialized with __STL_MUTEX_INITIALIZER.
367 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
368 volatile int _M_init_flag
;
369 __gthread_once_t _M_once
;
371 __gthread_mutex_t _M_lock
;
372 void _M_initialize() {
373 #ifdef __GTHREAD_MUTEX_INIT
374 // There should be no code in this path given the usage rules above.
375 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
376 if (_M_init_flag
) return;
377 if (__gthread_once (&_GLIBCPP_once
, _GLIBCPP_mutex_init
) != 0
378 && __gthread_active_p ())
380 __gthread_mutex_lock (&_GLIBCPP_mutex
);
382 // Even though we have a global lock, we use __gthread_once to be
383 // absolutely certain the _M_lock mutex is only initialized once on
384 // multiprocessor systems.
385 _GLIBCPP_mutex_address
= &_M_lock
;
386 if (__gthread_once (&_M_once
, _GLIBCPP_mutex_address_init
) != 0
387 && __gthread_active_p ())
391 __gthread_mutex_unlock (&_GLIBCPP_mutex
);
394 void _M_acquire_lock() {
395 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
396 if (!_M_init_flag
) _M_initialize();
398 __gthread_mutex_lock(&_M_lock
);
400 void _M_release_lock() {
401 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
402 if (!_M_init_flag
) _M_initialize();
404 __gthread_mutex_unlock(&_M_lock
);
408 #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
409 // It should be relatively easy to get this to work on any modern Unix.
410 volatile unsigned long _M_lock
;
411 void _M_initialize() { _M_lock
= 0; }
412 static void _S_nsec_sleep(int __log_nsec
) {
413 # ifdef __STL_SGI_THREADS
414 struct timespec __ts
;
415 /* Max sleep is 2**27nsec ~ 60msec */
417 __ts
.tv_nsec
= 1L << __log_nsec
;
419 # elif defined(__STL_WIN32THREADS)
420 if (__log_nsec
<= 20) {
423 Sleep(1 << (__log_nsec
- 20));
426 # error unimplemented
429 void _M_acquire_lock() {
430 volatile unsigned long* __lock
= &this->_M_lock
;
432 if (!_Atomic_swap((unsigned long*)__lock
, 1)) {
435 unsigned __my_spin_max
= _STL_mutex_spin
<0>::__max
;
436 unsigned __my_last_spins
= _STL_mutex_spin
<0>::__last
;
437 volatile unsigned __junk
= 17; // Value doesn't matter.
439 for (__i
= 0; __i
< __my_spin_max
; __i
++) {
440 if (__i
< __my_last_spins
/2 || *__lock
) {
441 __junk
*= __junk
; __junk
*= __junk
;
442 __junk
*= __junk
; __junk
*= __junk
;
445 if (!_Atomic_swap((unsigned long*)__lock
, 1)) {
447 // Spinning worked. Thus we're probably not being scheduled
448 // against the other process with which we were contending.
449 // Thus it makes sense to spin longer the next time.
450 _STL_mutex_spin
<0>::__last
= __i
;
451 _STL_mutex_spin
<0>::__max
= _STL_mutex_spin
<0>::__high_max
;
455 // We are probably being scheduled against the other process. Sleep.
456 _STL_mutex_spin
<0>::__max
= _STL_mutex_spin
<0>::__low_max
;
457 for (__i
= 0 ;; ++__i
) {
458 int __log_nsec
= __i
+ 6;
460 if (__log_nsec
> 27) __log_nsec
= 27;
461 if (!_Atomic_swap((unsigned long *)__lock
, 1)) {
464 _S_nsec_sleep(__log_nsec
);
467 void _M_release_lock() {
468 volatile unsigned long* __lock
= &_M_lock
;
469 # if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
472 # elif defined(__STL_SGI_THREADS) && __mips >= 3 \
473 && (defined (_ABIN32) || defined(_ABI64))
474 __lock_release(__lock
);
477 // This is not sufficient on many multiprocessors, since
478 // writes to protected variables and the lock may be reordered.
482 // We no longer use win32 critical sections.
483 // They appear to be slower in the contention-free case,
484 // and they appear difficult to initialize without introducing a race.
486 #elif defined(__STL_PTHREADS)
487 pthread_mutex_t _M_lock
;
488 void _M_initialize() { pthread_mutex_init(&_M_lock
, NULL
); }
489 void _M_acquire_lock() { pthread_mutex_lock(&_M_lock
); }
490 void _M_release_lock() { pthread_mutex_unlock(&_M_lock
); }
491 #elif defined(__STL_UITHREADS)
493 void _M_initialize() { mutex_init(&_M_lock
, USYNC_THREAD
, 0); }
494 void _M_acquire_lock() { mutex_lock(&_M_lock
); }
495 void _M_release_lock() { mutex_unlock(&_M_lock
); }
496 #else /* No threads */
497 void _M_initialize() {}
498 void _M_acquire_lock() {}
499 void _M_release_lock() {}
501 // GCC extension begin
506 // GCC extension begin
507 #if defined(__STL_GTHREADS)
508 #ifdef __GTHREAD_MUTEX_INIT
509 #define __STL_MUTEX_INITIALIZER = { __GTHREAD_MUTEX_INIT }
510 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
511 #ifdef __GTHREAD_MUTEX_INIT_DEFAULT
512 #define __STL_MUTEX_INITIALIZER \
513 = { 0, __GTHREAD_ONCE_INIT, __GTHREAD_MUTEX_INIT_DEFAULT }
515 #define __STL_MUTEX_INITIALIZER = { 0, __GTHREAD_ONCE_INIT }
520 #ifdef __STL_PTHREADS
521 // Pthreads locks must be statically initialized to something other than
522 // the default value of zero.
523 # define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
524 #elif defined(__STL_UITHREADS)
525 // UIthreads locks must be statically initialized to something other than
526 // the default value of zero.
527 # define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
528 #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
529 # define __STL_MUTEX_INITIALIZER = { 0 }
531 # define __STL_MUTEX_INITIALIZER
533 // GCC extension begin
538 // A locking class that uses _STL_mutex_lock. The constructor takes a
539 // reference to an _STL_mutex_lock, and acquires a lock. The
540 // destructor releases the lock. It's not clear that this is exactly
541 // the right functionality. It will probably change in the future.
543 struct _STL_auto_lock
545 _STL_mutex_lock
& _M_lock
;
547 _STL_auto_lock(_STL_mutex_lock
& __lock
) : _M_lock(__lock
)
548 { _M_lock
._M_acquire_lock(); }
549 ~_STL_auto_lock() { _M_lock
._M_release_lock(); }
552 void operator=(const _STL_auto_lock
&);
553 _STL_auto_lock(const _STL_auto_lock
&);
558 #endif /* __SGI_STL_INTERNAL_THREADS_H */