]>
Commit | Line | Data |
---|---|---|
9463518d | 1 | /* Private libc-internal interface for mutex locks. NPTL version. |
b168057a | 2 | Copyright (C) 1996-2015 Free Software Foundation, Inc. |
9463518d UD |
3 | This file is part of the GNU C Library. |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public License as | |
7 | published by the Free Software Foundation; either version 2.1 of the | |
8 | License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
16 | License along with the GNU C Library; see the file COPYING.LIB. If | |
17 | not, see <http://www.gnu.org/licenses/>. */ | |
18 | ||
19 | #ifndef _BITS_LIBC_LOCKP_H | |
20 | #define _BITS_LIBC_LOCKP_H 1 | |
21 | ||
22 | #include <pthread.h> | |
23 | #define __need_NULL | |
24 | #include <stddef.h> | |
25 | ||
26 | ||
27 | /* Fortunately Linux now has a mean to do locking which is realtime | |
28 | safe without the aid of the thread library. We also need no fancy | |
29 | options like error checking mutexes etc. We only need simple | |
30 | locks, maybe recursive. This can be easily and cheaply implemented | |
31 | using futexes. We will use them everywhere except in ld.so since | |
32 | ld.so might be used on old kernels with a different libc.so. */ | |
33 | #include <lowlevellock.h> | |
34 | #include <tls.h> | |
35 | #include <pthread-functions.h> | |
36 | ||
e4f639e4 RM |
37 | #if IS_IN (libpthread) |
38 | /* This gets us the declarations of the __pthread_* internal names, | |
39 | and hidden_proto for them. */ | |
40 | # include <nptl/pthreadP.h> | |
41 | #endif | |
42 | ||
9463518d | 43 | /* Mutex type. */ |
4f41c682 | 44 | #if !IS_IN (libc) && !IS_IN (libpthread) |
9463518d UD |
45 | typedef pthread_mutex_t __libc_lock_t; |
46 | #else | |
47 | typedef int __libc_lock_t; | |
48 | #endif | |
49 | typedef struct { pthread_mutex_t mutex; } __rtld_lock_recursive_t; | |
50 | typedef pthread_rwlock_t __libc_rwlock_t; | |
51 | ||
52 | /* Type for key to thread-specific data. */ | |
53 | typedef pthread_key_t __libc_key_t; | |
54 | ||
55 | /* Define a lock variable NAME with storage class CLASS. The lock must be | |
56 | initialized with __libc_lock_init before it can be used (or define it | |
57 | with __libc_lock_define_initialized, below). Use `extern' for CLASS to | |
58 | declare a lock defined in another module. In public structure | |
59 | definitions you must use a pointer to the lock structure (i.e., NAME | |
60 | begins with a `*'), because its storage size will not be known outside | |
61 | of libc. */ | |
62 | #define __libc_lock_define(CLASS,NAME) \ | |
63 | CLASS __libc_lock_t NAME; | |
64 | #define __libc_rwlock_define(CLASS,NAME) \ | |
65 | CLASS __libc_rwlock_t NAME; | |
66 | #define __rtld_lock_define_recursive(CLASS,NAME) \ | |
67 | CLASS __rtld_lock_recursive_t NAME; | |
68 | ||
69 | /* Define an initialized lock variable NAME with storage class CLASS. | |
70 | ||
71 | For the C library we take a deeper look at the initializer. For | |
72 | this implementation all fields are initialized to zero. Therefore | |
73 | we don't initialize the variable which allows putting it into the | |
74 | BSS section. (Except on PA-RISC and other odd architectures, where | |
75 | initialized locks must be set to one due to the lack of normal | |
76 | atomic operations.) */ | |
77 | ||
4f41c682 | 78 | #if IS_IN (libc) || IS_IN (libpthread) |
9463518d UD |
79 | # if LLL_LOCK_INITIALIZER == 0 |
80 | # define __libc_lock_define_initialized(CLASS,NAME) \ | |
81 | CLASS __libc_lock_t NAME; | |
82 | # else | |
83 | # define __libc_lock_define_initialized(CLASS,NAME) \ | |
84 | CLASS __libc_lock_t NAME = LLL_LOCK_INITIALIZER; | |
85 | # endif | |
86 | #else | |
fcd89ebe | 87 | # define __libc_lock_define_initialized(CLASS,NAME) \ |
9463518d | 88 | CLASS __libc_lock_t NAME; |
9463518d UD |
89 | #endif |
90 | ||
91 | #define __libc_rwlock_define_initialized(CLASS,NAME) \ | |
92 | CLASS __libc_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER; | |
93 | ||
94 | #define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ | |
95 | CLASS __rtld_lock_recursive_t NAME = _RTLD_LOCK_RECURSIVE_INITIALIZER; | |
96 | #define _RTLD_LOCK_RECURSIVE_INITIALIZER \ | |
97 | {PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP} | |
98 | ||
99 | #define __rtld_lock_initialize(NAME) \ | |
100 | (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) | |
101 | ||
102 | /* If we check for a weakly referenced symbol and then perform a | |
103 | normal jump to it te code generated for some platforms in case of | |
104 | PIC is unnecessarily slow. What would happen is that the function | |
105 | is first referenced as data and then it is called indirectly | |
106 | through the PLT. We can make this a direct jump. */ | |
107 | #ifdef __PIC__ | |
108 | # define __libc_maybe_call(FUNC, ARGS, ELSE) \ | |
109 | (__extension__ ({ __typeof (FUNC) *_fn = (FUNC); \ | |
110 | _fn != NULL ? (*_fn) ARGS : ELSE; })) | |
111 | #else | |
112 | # define __libc_maybe_call(FUNC, ARGS, ELSE) \ | |
113 | (FUNC != NULL ? FUNC ARGS : ELSE) | |
114 | #endif | |
115 | ||
116 | /* Call thread functions through the function pointer table. */ | |
4f41c682 | 117 | #if defined SHARED && IS_IN (libc) |
9463518d UD |
118 | # define PTFAVAIL(NAME) __libc_pthread_functions_init |
119 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ | |
120 | (__libc_pthread_functions_init ? PTHFCT_CALL (ptr_##FUNC, ARGS) : ELSE) | |
121 | # define __libc_ptf_call_always(FUNC, ARGS) \ | |
122 | PTHFCT_CALL (ptr_##FUNC, ARGS) | |
e4f639e4 RM |
123 | #elif IS_IN (libpthread) |
124 | # define PTFAVAIL(NAME) 1 | |
125 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ | |
126 | FUNC ARGS | |
127 | # define __libc_ptf_call_always(FUNC, ARGS) \ | |
128 | FUNC ARGS | |
9463518d UD |
129 | #else |
130 | # define PTFAVAIL(NAME) (NAME != NULL) | |
131 | # define __libc_ptf_call(FUNC, ARGS, ELSE) \ | |
132 | __libc_maybe_call (FUNC, ARGS, ELSE) | |
133 | # define __libc_ptf_call_always(FUNC, ARGS) \ | |
134 | FUNC ARGS | |
135 | #endif | |
136 | ||
137 | ||
138 | /* Initialize the named lock variable, leaving it in a consistent, unlocked | |
139 | state. */ | |
4f41c682 | 140 | #if IS_IN (libc) || IS_IN (libpthread) |
ab49e763 RM |
141 | # define __libc_lock_init(NAME) \ |
142 | ((void) ((NAME) = LLL_LOCK_INITIALIZER)) | |
9463518d UD |
143 | #else |
144 | # define __libc_lock_init(NAME) \ | |
145 | __libc_maybe_call (__pthread_mutex_init, (&(NAME), NULL), 0) | |
146 | #endif | |
4f41c682 | 147 | #if defined SHARED && IS_IN (libc) |
ab49e763 | 148 | /* ((NAME) = (__libc_rwlock_t) PTHREAD_RWLOCK_INITIALIZER) is inefficient. */ |
9463518d | 149 | # define __libc_rwlock_init(NAME) \ |
ab49e763 | 150 | ((void) __builtin_memset (&(NAME), '\0', sizeof (NAME))) |
9463518d UD |
151 | #else |
152 | # define __libc_rwlock_init(NAME) \ | |
153 | __libc_maybe_call (__pthread_rwlock_init, (&(NAME), NULL), 0) | |
154 | #endif | |
155 | ||
9463518d UD |
156 | /* Finalize the named lock variable, which must be locked. It cannot be |
157 | used again until __libc_lock_init is called again on it. This must be | |
158 | called on a lock variable before the containing storage is reused. */ | |
4f41c682 | 159 | #if IS_IN (libc) || IS_IN (libpthread) |
9463518d UD |
160 | # define __libc_lock_fini(NAME) ((void) 0) |
161 | #else | |
162 | # define __libc_lock_fini(NAME) \ | |
163 | __libc_maybe_call (__pthread_mutex_destroy, (&(NAME)), 0) | |
164 | #endif | |
4f41c682 | 165 | #if defined SHARED && IS_IN (libc) |
9463518d UD |
166 | # define __libc_rwlock_fini(NAME) ((void) 0) |
167 | #else | |
168 | # define __libc_rwlock_fini(NAME) \ | |
169 | __libc_maybe_call (__pthread_rwlock_destroy, (&(NAME)), 0) | |
170 | #endif | |
171 | ||
172 | /* Lock the named lock variable. */ | |
4f41c682 | 173 | #if IS_IN (libc) || IS_IN (libpthread) |
309becf1 MK |
174 | # ifndef __libc_lock_lock |
175 | # define __libc_lock_lock(NAME) \ | |
9463518d | 176 | ({ lll_lock (NAME, LLL_PRIVATE); 0; }) |
309becf1 | 177 | # endif |
9463518d | 178 | #else |
309becf1 | 179 | # undef __libc_lock_lock |
9463518d UD |
180 | # define __libc_lock_lock(NAME) \ |
181 | __libc_maybe_call (__pthread_mutex_lock, (&(NAME)), 0) | |
182 | #endif | |
183 | #define __libc_rwlock_rdlock(NAME) \ | |
184 | __libc_ptf_call (__pthread_rwlock_rdlock, (&(NAME)), 0) | |
185 | #define __libc_rwlock_wrlock(NAME) \ | |
186 | __libc_ptf_call (__pthread_rwlock_wrlock, (&(NAME)), 0) | |
187 | ||
188 | /* Try to lock the named lock variable. */ | |
4f41c682 | 189 | #if IS_IN (libc) || IS_IN (libpthread) |
309becf1 MK |
190 | # ifndef __libc_lock_trylock |
191 | # define __libc_lock_trylock(NAME) \ | |
9463518d | 192 | lll_trylock (NAME) |
309becf1 | 193 | # endif |
9463518d | 194 | #else |
309becf1 | 195 | # undef __libc_lock_trylock |
9463518d UD |
196 | # define __libc_lock_trylock(NAME) \ |
197 | __libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0) | |
198 | #endif | |
199 | #define __libc_rwlock_tryrdlock(NAME) \ | |
200 | __libc_maybe_call (__pthread_rwlock_tryrdlock, (&(NAME)), 0) | |
201 | #define __libc_rwlock_trywrlock(NAME) \ | |
202 | __libc_maybe_call (__pthread_rwlock_trywrlock, (&(NAME)), 0) | |
203 | ||
204 | #define __rtld_lock_trylock_recursive(NAME) \ | |
205 | __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0) | |
206 | ||
207 | /* Unlock the named lock variable. */ | |
4f41c682 | 208 | #if IS_IN (libc) || IS_IN (libpthread) |
9463518d UD |
209 | # define __libc_lock_unlock(NAME) \ |
210 | lll_unlock (NAME, LLL_PRIVATE) | |
211 | #else | |
212 | # define __libc_lock_unlock(NAME) \ | |
213 | __libc_maybe_call (__pthread_mutex_unlock, (&(NAME)), 0) | |
214 | #endif | |
215 | #define __libc_rwlock_unlock(NAME) \ | |
216 | __libc_ptf_call (__pthread_rwlock_unlock, (&(NAME)), 0) | |
217 | ||
218 | #ifdef SHARED | |
219 | # define __rtld_lock_default_lock_recursive(lock) \ | |
220 | ++((pthread_mutex_t *)(lock))->__data.__count; | |
221 | ||
222 | # define __rtld_lock_default_unlock_recursive(lock) \ | |
223 | --((pthread_mutex_t *)(lock))->__data.__count; | |
224 | ||
225 | # define __rtld_lock_lock_recursive(NAME) \ | |
226 | GL(dl_rtld_lock_recursive) (&(NAME).mutex) | |
227 | ||
228 | # define __rtld_lock_unlock_recursive(NAME) \ | |
229 | GL(dl_rtld_unlock_recursive) (&(NAME).mutex) | |
230 | #else | |
231 | # define __rtld_lock_lock_recursive(NAME) \ | |
232 | __libc_maybe_call (__pthread_mutex_lock, (&(NAME).mutex), 0) | |
233 | ||
234 | # define __rtld_lock_unlock_recursive(NAME) \ | |
235 | __libc_maybe_call (__pthread_mutex_unlock, (&(NAME).mutex), 0) | |
236 | #endif | |
237 | ||
238 | /* Define once control variable. */ | |
239 | #if PTHREAD_ONCE_INIT == 0 | |
240 | /* Special case for static variables where we can avoid the initialization | |
241 | if it is zero. */ | |
242 | # define __libc_once_define(CLASS, NAME) \ | |
243 | CLASS pthread_once_t NAME | |
244 | #else | |
245 | # define __libc_once_define(CLASS, NAME) \ | |
246 | CLASS pthread_once_t NAME = PTHREAD_ONCE_INIT | |
247 | #endif | |
248 | ||
249 | /* Call handler iff the first call. */ | |
250 | #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ | |
251 | do { \ | |
252 | if (PTFAVAIL (__pthread_once)) \ | |
253 | __libc_ptf_call_always (__pthread_once, (&(ONCE_CONTROL), \ | |
254 | INIT_FUNCTION)); \ | |
255 | else if ((ONCE_CONTROL) == PTHREAD_ONCE_INIT) { \ | |
256 | INIT_FUNCTION (); \ | |
257 | (ONCE_CONTROL) |= 2; \ | |
258 | } \ | |
259 | } while (0) | |
260 | ||
be971a2b TS |
261 | /* Get once control variable. */ |
262 | #define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL) != PTHREAD_ONCE_INIT) | |
9463518d UD |
263 | |
264 | /* Note that for I/O cleanup handling we are using the old-style | |
265 | cancel handling. It does not have to be integrated with C++ snce | |
266 | no C++ code is called in the middle. The old-style handling is | |
267 | faster and the support is not going away. */ | |
268 | extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, | |
269 | void (*routine) (void *), void *arg); | |
270 | extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, | |
271 | int execute); | |
272 | extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, | |
273 | void (*routine) (void *), void *arg); | |
274 | extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, | |
275 | int execute); | |
276 | ||
277 | /* Sometimes we have to exit the block in the middle. */ | |
278 | #define __libc_cleanup_end(DOIT) \ | |
279 | if (_avail) { \ | |
280 | __libc_ptf_call_always (_pthread_cleanup_pop_restore, (&_buffer, DOIT));\ | |
281 | } else if (DOIT) \ | |
282 | _buffer.__routine (_buffer.__arg) | |
283 | ||
284 | ||
285 | /* Normal cleanup handling, based on C cleanup attribute. */ | |
286 | __extern_inline void | |
287 | __libc_cleanup_routine (struct __pthread_cleanup_frame *f) | |
288 | { | |
289 | if (f->__do_it) | |
290 | f->__cancel_routine (f->__cancel_arg); | |
291 | } | |
292 | ||
293 | #define __libc_cleanup_push(fct, arg) \ | |
294 | do { \ | |
295 | struct __pthread_cleanup_frame __clframe \ | |
296 | __attribute__ ((__cleanup__ (__libc_cleanup_routine))) \ | |
297 | = { .__cancel_routine = (fct), .__cancel_arg = (arg), \ | |
298 | .__do_it = 1 }; | |
299 | ||
300 | #define __libc_cleanup_pop(execute) \ | |
301 | __clframe.__do_it = (execute); \ | |
302 | } while (0) | |
303 | ||
304 | ||
305 | /* Create thread-specific key. */ | |
306 | #define __libc_key_create(KEY, DESTRUCTOR) \ | |
307 | __libc_ptf_call (__pthread_key_create, (KEY, DESTRUCTOR), 1) | |
308 | ||
309 | /* Get thread-specific data. */ | |
310 | #define __libc_getspecific(KEY) \ | |
311 | __libc_ptf_call (__pthread_getspecific, (KEY), NULL) | |
312 | ||
313 | /* Set thread-specific data. */ | |
314 | #define __libc_setspecific(KEY, VALUE) \ | |
315 | __libc_ptf_call (__pthread_setspecific, (KEY, VALUE), 0) | |
316 | ||
317 | ||
318 | /* Register handlers to execute before and after `fork'. Note that the | |
319 | last parameter is NULL. The handlers registered by the libc are | |
320 | never removed so this is OK. */ | |
321 | #define __libc_atfork(PREPARE, PARENT, CHILD) \ | |
322 | __register_atfork (PREPARE, PARENT, CHILD, NULL) | |
323 | extern int __register_atfork (void (*__prepare) (void), | |
324 | void (*__parent) (void), | |
325 | void (*__child) (void), | |
326 | void *__dso_handle); | |
327 | ||
328 | /* Functions that are used by this file and are internal to the GNU C | |
329 | library. */ | |
330 | ||
331 | extern int __pthread_mutex_init (pthread_mutex_t *__mutex, | |
332 | const pthread_mutexattr_t *__mutex_attr); | |
333 | ||
334 | extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex); | |
335 | ||
336 | extern int __pthread_mutex_trylock (pthread_mutex_t *__mutex); | |
337 | ||
338 | extern int __pthread_mutex_lock (pthread_mutex_t *__mutex); | |
339 | ||
340 | extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex); | |
341 | ||
342 | extern int __pthread_mutexattr_init (pthread_mutexattr_t *__attr); | |
343 | ||
344 | extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *__attr); | |
345 | ||
346 | extern int __pthread_mutexattr_settype (pthread_mutexattr_t *__attr, | |
347 | int __kind); | |
348 | ||
349 | extern int __pthread_rwlock_init (pthread_rwlock_t *__rwlock, | |
350 | const pthread_rwlockattr_t *__attr); | |
351 | ||
352 | extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock); | |
353 | ||
354 | extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock); | |
355 | ||
356 | extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock); | |
357 | ||
358 | extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock); | |
359 | ||
360 | extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock); | |
361 | ||
362 | extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock); | |
363 | ||
364 | extern int __pthread_key_create (pthread_key_t *__key, | |
365 | void (*__destr_function) (void *)); | |
366 | ||
367 | extern int __pthread_setspecific (pthread_key_t __key, | |
368 | const void *__pointer); | |
369 | ||
370 | extern void *__pthread_getspecific (pthread_key_t __key); | |
371 | ||
372 | extern int __pthread_once (pthread_once_t *__once_control, | |
373 | void (*__init_routine) (void)); | |
374 | ||
375 | extern int __pthread_atfork (void (*__prepare) (void), | |
376 | void (*__parent) (void), | |
377 | void (*__child) (void)); | |
378 | ||
379 | ||
380 | ||
381 | /* Make the pthread functions weak so that we can elide them from | |
382 | single-threaded processes. */ | |
383 | #ifndef __NO_WEAK_PTHREAD_ALIASES | |
384 | # ifdef weak_extern | |
e97ed6dd JM |
385 | weak_extern (__pthread_mutex_init) |
386 | weak_extern (__pthread_mutex_destroy) | |
387 | weak_extern (__pthread_mutex_lock) | |
388 | weak_extern (__pthread_mutex_trylock) | |
389 | weak_extern (__pthread_mutex_unlock) | |
390 | weak_extern (__pthread_mutexattr_init) | |
391 | weak_extern (__pthread_mutexattr_destroy) | |
392 | weak_extern (__pthread_mutexattr_settype) | |
393 | weak_extern (__pthread_rwlock_init) | |
394 | weak_extern (__pthread_rwlock_destroy) | |
395 | weak_extern (__pthread_rwlock_rdlock) | |
396 | weak_extern (__pthread_rwlock_tryrdlock) | |
397 | weak_extern (__pthread_rwlock_wrlock) | |
398 | weak_extern (__pthread_rwlock_trywrlock) | |
399 | weak_extern (__pthread_rwlock_unlock) | |
400 | weak_extern (__pthread_key_create) | |
401 | weak_extern (__pthread_setspecific) | |
402 | weak_extern (__pthread_getspecific) | |
403 | weak_extern (__pthread_once) | |
9463518d UD |
404 | weak_extern (__pthread_initialize) |
405 | weak_extern (__pthread_atfork) | |
e97ed6dd JM |
406 | weak_extern (_pthread_cleanup_push_defer) |
407 | weak_extern (_pthread_cleanup_pop_restore) | |
408 | weak_extern (pthread_setcancelstate) | |
9463518d UD |
409 | # else |
410 | # pragma weak __pthread_mutex_init | |
411 | # pragma weak __pthread_mutex_destroy | |
412 | # pragma weak __pthread_mutex_lock | |
413 | # pragma weak __pthread_mutex_trylock | |
414 | # pragma weak __pthread_mutex_unlock | |
415 | # pragma weak __pthread_mutexattr_init | |
416 | # pragma weak __pthread_mutexattr_destroy | |
417 | # pragma weak __pthread_mutexattr_settype | |
418 | # pragma weak __pthread_rwlock_destroy | |
419 | # pragma weak __pthread_rwlock_rdlock | |
420 | # pragma weak __pthread_rwlock_tryrdlock | |
421 | # pragma weak __pthread_rwlock_wrlock | |
422 | # pragma weak __pthread_rwlock_trywrlock | |
423 | # pragma weak __pthread_rwlock_unlock | |
424 | # pragma weak __pthread_key_create | |
425 | # pragma weak __pthread_setspecific | |
426 | # pragma weak __pthread_getspecific | |
427 | # pragma weak __pthread_once | |
428 | # pragma weak __pthread_initialize | |
429 | # pragma weak __pthread_atfork | |
430 | # pragma weak _pthread_cleanup_push_defer | |
431 | # pragma weak _pthread_cleanup_pop_restore | |
432 | # pragma weak pthread_setcancelstate | |
433 | # endif | |
434 | #endif | |
435 | ||
436 | #endif /* bits/libc-lockP.h */ |