]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_lock.c
Improve tgamma accuracy (bug 18613).
[thirdparty/glibc.git] / nptl / pthread_mutex_lock.c
CommitLineData
b168057a 1/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749 18
3892d906 19#include <assert.h>
76a50749 20#include <errno.h>
1bcfb5a5 21#include <stdlib.h>
df47504c 22#include <unistd.h>
ca06321d 23#include <sys/param.h>
b894c2ea 24#include <not-cancel.h>
76a50749
UD
25#include "pthreadP.h"
26#include <lowlevellock.h>
5acf7263 27#include <stap-probe.h>
76a50749 28
e8c659d7
AK
29#ifndef lll_lock_elision
30#define lll_lock_elision(lock, try_lock, private) ({ \
31 lll_lock (lock, private); 0; })
32#endif
33
34#ifndef lll_trylock_elision
35#define lll_trylock_elision(a,t) lll_trylock(a)
36#endif
76a50749 37
69431c9a 38#ifndef LLL_MUTEX_LOCK
5bd8a249
UD
39# define LLL_MUTEX_LOCK(mutex) \
40 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
41# define LLL_MUTEX_TRYLOCK(mutex) \
42 lll_trylock ((mutex)->__data.__lock)
43# define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
44 lll_robust_lock ((mutex)->__data.__lock, id, \
45 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
e8c659d7
AK
46# define LLL_MUTEX_LOCK_ELISION(mutex) \
47 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
48 PTHREAD_MUTEX_PSHARED (mutex))
49# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
50 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
51 PTHREAD_MUTEX_PSHARED (mutex))
69431c9a
UD
52#endif
53
e8c659d7
AK
54#ifndef FORCE_ELISION
55#define FORCE_ELISION(m, s)
56#endif
69431c9a 57
6de79a49
UD
58static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
59 __attribute_noinline__;
60
76a50749
UD
61int
62__pthread_mutex_lock (mutex)
63 pthread_mutex_t *mutex;
64{
2c0b891a
UD
65 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
66
e8c659d7 67 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
5acf7263
RM
68
69 LIBC_PROBE (mutex_entry, 1, mutex);
70
e8c659d7
AK
71 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
72 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49
UD
73 return __pthread_mutex_lock_full (mutex);
74
a1ffb40e 75 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
6de79a49 76 {
e8c659d7 77 FORCE_ELISION (mutex, goto elision);
6de79a49
UD
78 simple:
79 /* Normal mutex. */
80 LLL_MUTEX_LOCK (mutex);
81 assert (mutex->__data.__owner == 0);
82 }
e8c659d7 83#ifdef HAVE_ELISION
a1ffb40e 84 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
85 {
86 elision: __attribute__((unused))
87 /* This case can never happen on a system without elision,
88 as the mutex type initialization functions will not
89 allow to set the elision flags. */
075b9322 90 /* Don't record owner or users for elision case. This is a
e8c659d7
AK
91 tail call. */
92 return LLL_MUTEX_LOCK_ELISION (mutex);
93 }
94#endif
95 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
96 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749
UD
97 {
98 /* Recursive mutex. */
e8c659d7 99 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
6de79a49 100
76a50749 101 /* Check whether we already hold the mutex. */
9a7178d6 102 if (mutex->__data.__owner == id)
76a50749
UD
103 {
104 /* Just bump the counter. */
a1ffb40e 105 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
76a50749
UD
106 /* Overflow of the counter. */
107 return EAGAIN;
108
109 ++mutex->__data.__count;
76a50749 110
3892d906 111 return 0;
76a50749 112 }
3892d906
UD
113
114 /* We have to get the mutex. */
5bd8a249 115 LLL_MUTEX_LOCK (mutex);
3892d906 116
1bcfb5a5 117 assert (mutex->__data.__owner == 0);
3892d906 118 mutex->__data.__count = 1;
6de79a49 119 }
e8c659d7
AK
120 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
121 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49 122 {
2c0b891a
UD
123 if (! __is_smp)
124 goto simple;
125
5bd8a249 126 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
2c0b891a
UD
127 {
128 int cnt = 0;
129 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
130 mutex->__data.__spins * 2 + 10);
131 do
132 {
133 if (cnt++ >= max_cnt)
134 {
5bd8a249 135 LLL_MUTEX_LOCK (mutex);
2c0b891a
UD
136 break;
137 }
138
139#ifdef BUSY_WAIT_NOP
140 BUSY_WAIT_NOP;
141#endif
142 }
5bd8a249 143 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
2c0b891a
UD
144
145 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
146 }
1bcfb5a5 147 assert (mutex->__data.__owner == 0);
6de79a49
UD
148 }
149 else
150 {
e8c659d7
AK
151 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
152 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
6de79a49 153 /* Check whether we already hold the mutex. */
a1ffb40e 154 if (__glibc_unlikely (mutex->__data.__owner == id))
6de79a49
UD
155 return EDEADLK;
156 goto simple;
157 }
1bcfb5a5 158
e8c659d7
AK
159 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
160
6de79a49
UD
161 /* Record the ownership. */
162 mutex->__data.__owner = id;
163#ifndef NO_INCR
164 ++mutex->__data.__nusers;
165#endif
166
5acf7263
RM
167 LIBC_PROBE (mutex_acquired, 1, mutex);
168
6de79a49
UD
169 return 0;
170}
171
172static int
173__pthread_mutex_lock_full (pthread_mutex_t *mutex)
174{
175 int oldval;
176 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
177
178 switch (PTHREAD_MUTEX_TYPE (mutex))
179 {
0f6699ea
UD
180 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
181 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
182 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
183 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
184 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
185 &mutex->__data.__list.__next);
186
683040c3
UD
187 oldval = mutex->__data.__lock;
188 do
1bcfb5a5 189 {
0f6699ea 190 again:
683040c3
UD
191 if ((oldval & FUTEX_OWNER_DIED) != 0)
192 {
193 /* The previous owner died. Try locking the mutex. */
0f6699ea
UD
194 int newval = id;
195#ifdef NO_INCR
196 newval |= FUTEX_WAITERS;
113ad5fc
UD
197#else
198 newval |= (oldval & FUTEX_WAITERS);
0f6699ea
UD
199#endif
200
201 newval
052757bf 202 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
0f6699ea
UD
203 newval, oldval);
204
205 if (newval != oldval)
683040c3 206 {
683040c3 207 oldval = newval;
0f6699ea 208 goto again;
683040c3 209 }
1bcfb5a5 210
683040c3
UD
211 /* We got the mutex. */
212 mutex->__data.__count = 1;
213 /* But it is inconsistent unless marked otherwise. */
214 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
215
216 ENQUEUE_MUTEX (mutex);
0f6699ea 217 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3
UD
218
219 /* Note that we deliberately exit here. If we fall
220 through to the end of the function __nusers would be
221 incremented which is not correct because the old
222 owner has to be discounted. If we are not supposed
223 to increment __nusers we actually have to decrement
224 it here. */
225#ifdef NO_INCR
226 --mutex->__data.__nusers;
227#endif
1bcfb5a5 228
683040c3
UD
229 return EOWNERDEAD;
230 }
1bcfb5a5 231
683040c3 232 /* Check whether we already hold the mutex. */
a1ffb40e 233 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 234 {
5bd8a249
UD
235 int kind = PTHREAD_MUTEX_TYPE (mutex);
236 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea
UD
237 {
238 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
239 NULL);
240 return EDEADLK;
241 }
1bcfb5a5 242
5bd8a249 243 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 244 {
0f6699ea
UD
245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
246 NULL);
247
683040c3 248 /* Just bump the counter. */
a1ffb40e 249 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
250 /* Overflow of the counter. */
251 return EAGAIN;
1bcfb5a5 252
683040c3 253 ++mutex->__data.__count;
1bcfb5a5 254
683040c3
UD
255 return 0;
256 }
257 }
1bcfb5a5 258
5bd8a249 259 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
1bcfb5a5 260
683040c3
UD
261 if (__builtin_expect (mutex->__data.__owner
262 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
263 {
264 /* This mutex is now not recoverable. */
265 mutex->__data.__count = 0;
5bd8a249
UD
266 lll_unlock (mutex->__data.__lock,
267 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
0f6699ea 268 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3
UD
269 return ENOTRECOVERABLE;
270 }
1bcfb5a5 271 }
683040c3 272 while ((oldval & FUTEX_OWNER_DIED) != 0);
1bcfb5a5 273
683040c3 274 mutex->__data.__count = 1;
1bcfb5a5 275 ENQUEUE_MUTEX (mutex);
0f6699ea 276 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
2c0b891a 277 break;
dcc73a8d 278
184ee940
RM
279 /* The PI support requires the Linux futex system call. If that's not
280 available, pthread_mutex_init should never have allowed the type to
281 be set. So it will get the default case for an invalid type. */
282#ifdef __NR_futex
df47504c
UD
283 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
284 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
285 case PTHREAD_MUTEX_PI_NORMAL_NP:
286 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
287 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
288 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
289 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
290 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
291 {
292 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
293 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
294
295 if (robust)
296 /* Note: robust PI futexes are signaled by setting bit 0. */
297 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
298 (void *) (((uintptr_t) &mutex->__data.__list.__next)
299 | 1));
300
301 oldval = mutex->__data.__lock;
302
303 /* Check whether we already hold the mutex. */
a1ffb40e 304 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
305 {
306 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
307 {
308 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
309 return EDEADLK;
310 }
311
312 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
313 {
314 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
315
316 /* Just bump the counter. */
a1ffb40e 317 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
318 /* Overflow of the counter. */
319 return EAGAIN;
320
321 ++mutex->__data.__count;
322
323 return 0;
324 }
325 }
326
327 int newval = id;
184ee940 328# ifdef NO_INCR
df47504c 329 newval |= FUTEX_WAITERS;
184ee940 330# endif
052757bf 331 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
df47504c
UD
332 newval, 0);
333
334 if (oldval != 0)
335 {
336 /* The mutex is locked. The kernel will now take care of
337 everything. */
efac1fce
UD
338 int private = (robust
339 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
340 : PTHREAD_MUTEX_PSHARED (mutex));
df47504c
UD
341 INTERNAL_SYSCALL_DECL (__err);
342 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
efac1fce
UD
343 __lll_private_flag (FUTEX_LOCK_PI,
344 private), 1, 0);
df47504c
UD
345
346 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
347 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
348 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
349 {
350 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
351 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
352 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
353 /* ESRCH can happen only for non-robust PI mutexes where
354 the owner of the lock died. */
355 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
356
357 /* Delay the thread indefinitely. */
358 while (1)
b894c2ea 359 pause_not_cancel ();
df47504c
UD
360 }
361
362 oldval = mutex->__data.__lock;
363
364 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
365 }
366
a1ffb40e 367 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c
UD
368 {
369 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
370
371 /* We got the mutex. */
372 mutex->__data.__count = 1;
373 /* But it is inconsistent unless marked otherwise. */
374 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
375
376 ENQUEUE_MUTEX_PI (mutex);
377 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
378
379 /* Note that we deliberately exit here. If we fall
380 through to the end of the function __nusers would be
381 incremented which is not correct because the old owner
382 has to be discounted. If we are not supposed to
383 increment __nusers we actually have to decrement it here. */
184ee940 384# ifdef NO_INCR
df47504c 385 --mutex->__data.__nusers;
184ee940 386# endif
df47504c
UD
387
388 return EOWNERDEAD;
389 }
390
391 if (robust
392 && __builtin_expect (mutex->__data.__owner
393 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
394 {
395 /* This mutex is now not recoverable. */
396 mutex->__data.__count = 0;
397
398 INTERNAL_SYSCALL_DECL (__err);
399 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
efac1fce 400 __lll_private_flag (FUTEX_UNLOCK_PI,
6de79a49 401 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
efac1fce 402 0, 0);
df47504c
UD
403
404 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
405 return ENOTRECOVERABLE;
406 }
407
408 mutex->__data.__count = 1;
409 if (robust)
410 {
411 ENQUEUE_MUTEX_PI (mutex);
412 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
413 }
414 }
415 break;
184ee940 416#endif /* __NR_futex. */
df47504c 417
f17efcb4
UD
418 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
419 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
420 case PTHREAD_MUTEX_PP_NORMAL_NP:
421 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
422 {
423 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
424
425 oldval = mutex->__data.__lock;
426
427 /* Check whether we already hold the mutex. */
428 if (mutex->__data.__owner == id)
429 {
430 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
431 return EDEADLK;
432
433 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
434 {
435 /* Just bump the counter. */
a1ffb40e 436 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
437 /* Overflow of the counter. */
438 return EAGAIN;
439
440 ++mutex->__data.__count;
441
442 return 0;
443 }
444 }
445
446 int oldprio = -1, ceilval;
447 do
448 {
449 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
450 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
451
452 if (__pthread_current_priority () > ceiling)
453 {
454 if (oldprio != -1)
455 __pthread_tpp_change_priority (oldprio, -1);
456 return EINVAL;
457 }
458
6de79a49 459 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
f17efcb4
UD
460 if (retval)
461 return retval;
462
463 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
464 oldprio = ceiling;
465
466 oldval
052757bf 467 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
468#ifdef NO_INCR
469 ceilval | 2,
470#else
471 ceilval | 1,
472#endif
473 ceilval);
474
475 if (oldval == ceilval)
476 break;
477
478 do
479 {
480 oldval
052757bf 481 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
482 ceilval | 2,
483 ceilval | 1);
484
485 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
486 break;
487
488 if (oldval != ceilval)
835abc5c 489 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
5bd8a249 490 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4 491 }
052757bf 492 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
493 ceilval | 2, ceilval)
494 != ceilval);
495 }
496 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
497
498 assert (mutex->__data.__owner == 0);
499 mutex->__data.__count = 1;
500 }
501 break;
502
dcc73a8d
UD
503 default:
504 /* Correct code cannot set any other type. */
505 return EINVAL;
76a50749
UD
506 }
507
3892d906 508 /* Record the ownership. */
3892d906
UD
509 mutex->__data.__owner = id;
510#ifndef NO_INCR
511 ++mutex->__data.__nusers;
512#endif
513
5acf7263
RM
514 LIBC_PROBE (mutex_acquired, 1, mutex);
515
6de79a49 516 return 0;
76a50749 517}
69431c9a 518#ifndef __pthread_mutex_lock
76a50749 519strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
4d17e683 520hidden_def (__pthread_mutex_lock)
69431c9a 521#endif
b0948ffd
UD
522
523
524#ifdef NO_INCR
525void
526__pthread_mutex_cond_lock_adjust (mutex)
527 pthread_mutex_t *mutex;
528{
529 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
530 assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
531 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
532
533 /* Record the ownership. */
534 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
535 mutex->__data.__owner = id;
536
537 if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
538 ++mutex->__data.__count;
539}
540#endif