]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/sysdeps/pthread/bits/libc-lock.h
Update.
[thirdparty/glibc.git] / nptl / sysdeps / pthread / bits / libc-lock.h
1 /* libc-internal interface for mutex locks. NPTL version.
2 Copyright (C) 1996-2001, 2002, 2003 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Library General Public License as
7 published by the Free Software Foundation; either version 2 of the
8 License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Library General Public License for more details.
14
15 You should have received a copy of the GNU Library General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If not,
17 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 Boston, MA 02111-1307, USA. */
19
20 #ifndef _BITS_LIBC_LOCK_H
21 #define _BITS_LIBC_LOCK_H 1
22
23 #include <pthread.h>
24 #define __need_NULL
25 #include <stddef.h>
26
27
28 /* Fortunately Linux now has a mean to do locking which is realtime
29 safe without the aid of the thread library. We also need no fancy
30 options like error checking mutexes etc. We only need simple
31 locks, maybe recursive. This can be easily and cheaply implemented
32 using futexes. We will use them everywhere except in ld.so since
33 ld.so might be used on old kernels with a different libc.so. */
34 #ifdef _LIBC
35 # include <lowlevellock.h>
36 # include <tls.h>
37 # include <pthread-functions.h>
38 #endif
39
40 /* Mutex type. */
41 #if defined _LIBC || defined _IO_MTSAFE_IO
42 # if (defined NOT_IN_libc && !defined IS_IN_libpthread) || !defined _LIBC
43 typedef pthread_mutex_t __libc_lock_t;
44 typedef struct { pthread_mutex_t mutex; } __libc_lock_recursive_t;
45 # else
46 typedef int __libc_lock_t;
47 typedef struct { int lock; int cnt; void *owner; } __libc_lock_recursive_t;
48 # endif
49 typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t;
50 # ifdef __USE_UNIX98
51 typedef pthread_rwlock_t __libc_rwlock_t;
52 # else
53 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
54 # endif
55 #else
56 typedef struct __libc_lock_opaque__ __libc_lock_t;
57 typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
58 typedef struct __libc_rwlock_opaque__ __libc_rwlock_t;
59 #endif
60
61 /* Type for key to thread-specific data. */
62 typedef pthread_key_t __libc_key_t;
63
64 /* Define a lock variable NAME with storage class CLASS. The lock must be
65 initialized with __libc_lock_init before it can be used (or define it
66 with __libc_lock_define_initialized, below). Use `extern' for CLASS to
67 declare a lock defined in another module. In public structure
68 definitions you must use a pointer to the lock structure (i.e., NAME
69 begins with a `*'), because its storage size will not be known outside
70 of libc. */
71 #define __libc_lock_define(CLASS,NAME) \
72 CLASS __libc_lock_t NAME;
73 #define __libc_rwlock_define(CLASS,NAME) \
74 CLASS __libc_rwlock_t NAME;
75 #define __libc_lock_define_recursive(CLASS,NAME) \
76 CLASS __libc_lock_recursive_t NAME;
77 #define __rtld_lock_define_recursive(CLASS,NAME) \
78 CLASS __rtld_lock_recursive_t NAME;
79
80 /* Define an initialized lock variable NAME with storage class CLASS.
81
82 For the C library we take a deeper look at the initializer. For
83 this implementation all fields are initialized to zero. Therefore
84 we don't initialize the variable which allows putting it into the
85 BSS section. (Except on PA-RISC and other odd architectures, where
86 initialized locks must be set to one due to the lack of normal
87 atomic operations.) */
88
89 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
90 # if LLL_LOCK_INITIALIZER == 0
91 # define __libc_lock_define_initialized(CLASS,NAME) \
92 CLASS __libc_lock_t NAME;
93 # else
94 # define __libc_lock_define_initialized(CLASS,NAME) \
95 CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER;
96 # endif
97 #else
98 # if __LT_SPINLOCK_INIT == 0
99 # define __libc_lock_define_initialized(CLASS,NAME) \
100 CLASS __libc_lock_t NAME;
101 # else
102 # define __libc_lock_define_initialized(CLASS,NAME) \
103 CLASS __libc_lock_t NAME = PTHREAD_MUTEX_INITIALIZER;
104 # endif
105 #endif
106
107 #define __libc_rwlock_define_initialized(CLASS,NAME) \
108 CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
109
110 /* Define an initialized recursive lock variable NAME with storage
111 class CLASS. */
112 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
113 # if LLL_LOCK_INITIALIZER == 0
114 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
115 CLASS __libc_lock_recursive_t NAME;
116 # else
117 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
118 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
119 # endif
120 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
121 { LLL_LOCK_INITIALIZER, 0, NULL }
122 #else
123 # define __libc_lock_define_initialized_recursive(CLASS,NAME) \
124 CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
125 # define _LIBC_LOCK_RECURSIVE_INITIALIZER \
126 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
127 #endif
128
129 #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
130 CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER;
131 #define _RTLD_LOCK_RECURSIVE_INITIALIZER \
132 {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP}
133
134 /* If we check for a weakly referenced symbol and then perform a
135 normal jump to it te code generated for some platforms in case of
136 PIC is unnecessarily slow. What would happen is that the function
137 is first referenced as data and then it is called indirectly
138 through the PLT. We can make this a direct jump. */
139 #ifdef __PIC__
140 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
141 (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \
142 _fn != NULL ? (*_fn) ARGS : ELSE; }))
143 #else
144 # define __libc_maybe_call(FUNC, ARGS, ELSE) \
145 (FUNC != NULL ? FUNC ARGS : ELSE)
146 #endif
147
148 /* Call thread functions through the function pointer table. */
149 #if defined SHARED && !defined NOT_IN_libc
150 # define PTF(NAME) __libc_pthread_functions.ptr_##NAME
151 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
152 (PTF(FUNC) != NULL ? PTF(FUNC) ARGS : ELSE)
153 #else
154 # define PTF(NAME) NAME
155 # define __libc_ptf_call(FUNC, ARGS, ELSE) \
156 __libc_maybe_call (FUNC, ARGS, ELSE)
157 #endif
158
159
160 /* Initialize the named lock variable, leaving it in a consistent, unlocked
161 state. */
162 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
163 # define __libc_lock_init(NAME) ((NAME) = LLL_LOCK_INITIALIZER, 0)
164 #else
165 # define __libc_lock_init(NAME) \
166 __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0)
167 #endif
168 #define __libc_rwlock_init(NAME) \
169 __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0)
170
171 /* Same as last but this time we initialize a recursive mutex. */
172 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
173 # define __libc_lock_init_recursive(NAME) \
174 ((NAME) = (__libc_lock_recursive_t) _LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
175 #else
176 # define __libc_lock_init_recursive(NAME) \
177 do { \
178 if (__pthread_mutex_init != NULL) \
179 { \
180 pthread_mutexattr_t __attr; \
181 __pthread_mutexattr_init (&__attr); \
182 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
183 __pthread_mutex_init (&(NAME).mutex, &__attr); \
184 __pthread_mutexattr_destroy (&__attr); \
185 } \
186 } while (0)
187 #endif
188
189 #define __rtld_lock_init_recursive(NAME) \
190 do { \
191 if (__pthread_mutex_init != NULL) \
192 { \
193 pthread_mutexattr_t __attr; \
194 __pthread_mutexattr_init (&__attr); \
195 __pthread_mutexattr_settype (&__attr, PTHREAD_MUTEX_RECURSIVE_NP); \
196 __pthread_mutex_init (&(NAME).mutex, &__attr); \
197 __pthread_mutexattr_destroy (&__attr); \
198 } \
199 } while (0)
200
201 /* Finalize the named lock variable, which must be locked. It cannot be
202 used again until __libc_lock_init is called again on it. This must be
203 called on a lock variable before the containing storage is reused. */
204 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
205 # define __libc_lock_fini(NAME) ((void) 0)
206 #else
207 # define __libc_lock_fini(NAME) \
208 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
209 #endif
210 #define __libc_rwlock_fini(NAME) \
211 __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0)
212
213 /* Finalize recursive named lock. */
214 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
215 # define __libc_lock_fini_recursive(NAME) ((void) 0)
216 #else
217 # define __libc_lock_fini_recursive(NAME) \
218 __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0)
219 #endif
220
221 /* Lock the named lock variable. */
222 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
223 # define __libc_lock_lock(NAME) \
224 ({ lll_lock (NAME); 0; })
225 #else
226 # define __libc_lock_lock(NAME) \
227 __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0)
228 #endif
229 #define __libc_rwlock_rdlock(NAME) \
230 __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0)
231 #define __libc_rwlock_wrlock(NAME) \
232 __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0)
233
234 /* Lock the recursive named lock variable. */
235 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
236 # define __libc_lock_lock_recursive(NAME) \
237 do { \
238 void *self = THREAD_SELF; \
239 if ((NAME).owner != self) \
240 { \
241 lll_lock ((NAME).lock); \
242 (NAME).owner = self; \
243 } \
244 ++(NAME).cnt; \
245 } while (0)
246 #else
247 # define __libc_lock_lock_recursive(NAME) \
248 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
249 #endif
250
251 #define __rtld_lock_lock_recursive(NAME) \
252 __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0)
253
254 /* Try to lock the named lock variable. */
255 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
256 # define __libc_lock_trylock(NAME) \
257 lll_trylock (NAME)
258 #else
259 # define __libc_lock_trylock(NAME) \
260 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
261 #endif
262 #define __libc_rwlock_tryrdlock(NAME) \
263 __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0)
264 #define __libc_rwlock_trywrlock(NAME) \
265 __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0)
266
267 /* Try to lock the recursive named lock variable. */
268 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
269 # define __libc_lock_trylock_recursive(NAME) \
270 ({ \
271 int result = 0; \
272 void *self = THREAD_SELF; \
273 if ((NAME).owner != self) \
274 { \
275 if (lll_trylock ((NAME).lock) == 0) \
276 { \
277 (NAME).owner = self; \
278 (NAME).cnt = 1; \
279 } \
280 else \
281 result = EBUSY; \
282 } \
283 else \
284 ++(NAME).cnt; \
285 result; \
286 })
287 #else
288 # define __libc_lock_trylock_recursive(NAME) \
289 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
290 #endif
291
292 #define __rtld_lock_trylock_recursive(NAME) \
293 __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
294
295 /* Unlock the named lock variable. */
296 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
297 # define __libc_lock_unlock(NAME) \
298 lll_unlock (NAME)
299 #else
300 # define __libc_lock_unlock(NAME) \
301 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
302 #endif
303 #define __libc_rwlock_unlock(NAME) \
304 __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0)
305
306 /* Unlock the recursive named lock variable. */
307 #if defined _LIBC && (!defined NOT_IN_libc || defined IS_IN_libpthread)
308 /* We do no error checking here. */
309 # define __libc_lock_unlock_recursive(NAME) \
310 do { \
311 if (--(NAME).cnt == 0) \
312 { \
313 (NAME).owner = NULL; \
314 lll_unlock ((NAME).lock); \
315 } \
316 } while (0)
317 #else
318 # define __libc_lock_unlock_recursive(NAME) \
319 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0)
320 #endif
321
322 #define __rtld_lock_unlock_recursive(NAME) \
323 __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0)
324
325 /* Define once control variable. */
326 #if PTHREAD_ONCE_INIT == 0
327 /* Special case for static variables where we can avoid the initialization
328 if it is zero. */
329 # define __libc_once_define(CLASS, NAME) \
330 CLASS pthread_once_t NAME
331 #else
332 # define __libc_once_define(CLASS, NAME) \
333 CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT
334 #endif
335
336 /* Call handler iff the first call. */
337 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
338 do { \
339 if (PTF(__pthread_once) != NULL) \
340 PTF(__pthread_once) (&(ONCE_CONTROL), INIT_FUNCTION); \
341 else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \
342 INIT_FUNCTION (); \
343 (ONCE_CONTROL) |= 2; \
344 } \
345 } while (0)
346
347
348 /* Note that for I/O cleanup handling we are using the old-style
349 cancel handling. It does not have to be integrated with C++ snce
350 no C++ code is called in the middle. The old-style handling is
351 faster and the support is not going away. */
352 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
353 void (*routine) (void *), void *arg);
354 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
355 int execute);
356 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
357 void (*routine) (void *), void *arg);
358 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
359 int execute);
360
361 /* Start critical region with cleanup. */
362 #define __libc_cleanup_region_start(DOIT, FCT, ARG) \
363 { struct _pthread_cleanup_buffer _buffer; \
364 int _avail; \
365 if (DOIT) { \
366 _avail = PTF(_pthread_cleanup_push_defer) != NULL; \
367 if (_avail) { \
368 PTF(_pthread_cleanup_push_defer) (&_buffer, FCT, ARG); \
369 } else { \
370 _buffer.__routine = (FCT); \
371 _buffer.__arg = (ARG); \
372 } \
373 } else { \
374 _avail = 0; \
375 }
376
377 /* End critical region with cleanup. */
378 #define __libc_cleanup_region_end(DOIT) \
379 if (_avail) { \
380 PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
381 } else if (DOIT) \
382 _buffer.__routine (_buffer.__arg); \
383 }
384
385 /* Sometimes we have to exit the block in the middle. */
386 #define __libc_cleanup_end(DOIT) \
387 if (_avail) { \
388 PTF(_pthread_cleanup_pop_restore) (&_buffer, DOIT); \
389 } else if (DOIT) \
390 _buffer.__routine (_buffer.__arg)
391
392
393 /* Normal cleanup handling, based on C cleanup attribute. */
394 extern inline void
395 __libc_cleanup_routine (struct __pthread_cleanup_frame *f)
396 {
397 if (f->__do_it)
398 f->__cancel_routine (f->__cancel_arg);
399 }
400
401 #define __libc_cleanup_push(fct, arg) \
402 do { \
403 struct __pthread_cleanup_frame __clframe \
404 __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \
405 = { .__cancel_routine = (fct), .__cancel_arg = (arg), \
406 .__do_it = 1 };
407
408 #define __libc_cleanup_pop(execute) \
409 __clframe.__do_it = (execute); \
410 } while (0)
411
412
413 /* Create thread-specific key. */
414 #define __libc_key_create(KEY, DESTRUCTOR) \
415 __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1)
416
417 /* Get thread-specific data. */
418 #define __libc_getspecific(KEY) \
419 __libc_ptf_call (__pthread_getspecific, (KEY), NULL)
420
421 /* Set thread-specific data. */
422 #define __libc_setspecific(KEY, VALUE) \
423 __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0)
424
425
426 /* Register handlers to execute before and after `fork'. Note that the
427 last parameter is NULL. The handlers registered by the libc are
428 never removed so this is OK. */
429 #define __libc_atfork(PREPARE, PARENT, CHILD) \
430 __register_atfork (PREPARE, PARENT, CHILD, NULL)
431 extern int __register_atfork (void (*__prepare) (void),
432 void (*__parent) (void),
433 void (*__child) (void),
434 void *__dso_handle);
435
436 /* Functions that are used by this file and are internal to the GNU C
437 library. */
438
439 extern int __pthread_mutex_init (pthread_mutex_t *__mutex,
440 __const pthread_mutexattr_t *__mutex_attr);
441
442 extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex);
443
444 extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex);
445
446 extern int __pthread_mutex_lock (pthread_mutex_t *__mutex);
447
448 extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex);
449
450 extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr);
451
452 extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr);
453
454 extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr,
455 int __kind);
456
457 #ifdef __USE_UNIX98
458 extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock,
459 __const pthread_rwlockattr_t *__attr);
460
461 extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock);
462
463 extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock);
464
465 extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock);
466
467 extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock);
468
469 extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock);
470
471 extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock);
472 #endif
473
474 extern int __pthread_key_create (pthread_key_t *__key,
475 void (*__destr_function) (void *));
476
477 extern int __pthread_setspecific (pthread_key_t __key,
478 __const void *__pointer);
479
480 extern void *__pthread_getspecific (pthread_key_t __key);
481
482 extern int __pthread_once (pthread_once_t *__once_control,
483 void (*__init_routine) (void));
484
485 extern int __pthread_atfork (void (*__prepare) (void),
486 void (*__parent) (void),
487 void (*__child) (void));
488
489
490
491 /* Make the pthread functions weak so that we can elide them from
492 single-threaded processes. */
493 #ifndef __NO_WEAK_PTHREAD_ALIASES
494 # ifdef weak_extern
495 # if _LIBC
496 # include <bp-sym.h>
497 # else
498 # define BP_SYM (sym) sym
499 # endif
500 weak_extern (BP_SYM (__pthread_mutex_init))
501 weak_extern (BP_SYM (__pthread_mutex_destroy))
502 weak_extern (BP_SYM (__pthread_mutex_lock))
503 weak_extern (BP_SYM (__pthread_mutex_trylock))
504 weak_extern (BP_SYM (__pthread_mutex_unlock))
505 weak_extern (BP_SYM (__pthread_mutexattr_init))
506 weak_extern (BP_SYM (__pthread_mutexattr_destroy))
507 weak_extern (BP_SYM (__pthread_mutexattr_settype))
508 weak_extern (BP_SYM (__pthread_rwlock_init))
509 weak_extern (BP_SYM (__pthread_rwlock_destroy))
510 weak_extern (BP_SYM (__pthread_rwlock_rdlock))
511 weak_extern (BP_SYM (__pthread_rwlock_tryrdlock))
512 weak_extern (BP_SYM (__pthread_rwlock_wrlock))
513 weak_extern (BP_SYM (__pthread_rwlock_trywrlock))
514 weak_extern (BP_SYM (__pthread_rwlock_unlock))
515 weak_extern (BP_SYM (__pthread_key_create))
516 weak_extern (BP_SYM (__pthread_setspecific))
517 weak_extern (BP_SYM (__pthread_getspecific))
518 weak_extern (BP_SYM (__pthread_once))
519 weak_extern (__pthread_initialize)
520 weak_extern (__pthread_atfork)
521 weak_extern (BP_SYM (_pthread_cleanup_push_defer))
522 weak_extern (BP_SYM (_pthread_cleanup_pop_restore))
523 # else
524 # pragma weak __pthread_mutex_init
525 # pragma weak __pthread_mutex_destroy
526 # pragma weak __pthread_mutex_lock
527 # pragma weak __pthread_mutex_trylock
528 # pragma weak __pthread_mutex_unlock
529 # pragma weak __pthread_mutexattr_init
530 # pragma weak __pthread_mutexattr_destroy
531 # pragma weak __pthread_mutexattr_settype
532 # pragma weak __pthread_rwlock_destroy
533 # pragma weak __pthread_rwlock_rdlock
534 # pragma weak __pthread_rwlock_tryrdlock
535 # pragma weak __pthread_rwlock_wrlock
536 # pragma weak __pthread_rwlock_trywrlock
537 # pragma weak __pthread_rwlock_unlock
538 # pragma weak __pthread_key_create
539 # pragma weak __pthread_setspecific
540 # pragma weak __pthread_getspecific
541 # pragma weak __pthread_once
542 # pragma weak __pthread_initialize
543 # pragma weak __pthread_atfork
544 # pragma weak _pthread_cleanup_push_defer
545 # pragma weak _pthread_cleanup_pop_restore
546 # endif
547 #endif
548
549 #endif /* bits/libc-lock.h */