]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_lock.c
nptl: pthread_mutex_lock, pthread_mutex_unock single-threaded optimization
[thirdparty/glibc.git] / nptl / pthread_mutex_lock.c
CommitLineData
2b778ceb 1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
76a50749 18
3892d906 19#include <assert.h>
76a50749 20#include <errno.h>
1bcfb5a5 21#include <stdlib.h>
df47504c 22#include <unistd.h>
ca06321d 23#include <sys/param.h>
b894c2ea 24#include <not-cancel.h>
76a50749 25#include "pthreadP.h"
4eb984d3 26#include <atomic.h>
6b1472eb 27#include <futex-internal.h>
5acf7263 28#include <stap-probe.h>
76a50749 29
65810f0e
TR
30/* Some of the following definitions differ when pthread_mutex_cond_lock.c
31 includes this file. */
69431c9a 32#ifndef LLL_MUTEX_LOCK
99f841c4
FW
33/* lll_lock with single-thread optimization. */
34static inline void
35lll_mutex_lock_optimized (pthread_mutex_t *mutex)
36{
37 /* The single-threaded optimization is only valid for private
38 mutexes. For process-shared mutexes, the mutex could be in a
39 shared mapping, so synchronization with another process is needed
40 even without any threads. If the lock is already marked as
41 acquired, POSIX requires that pthread_mutex_lock deadlocks for
42 normal mutexes, so skip the optimization in that case as
43 well. */
44 int private = PTHREAD_MUTEX_PSHARED (mutex);
45 if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0)
46 mutex->__data.__lock = 1;
47 else
48 lll_lock (mutex->__data.__lock, private);
49}
50
51# define LLL_MUTEX_LOCK(mutex) \
5bd8a249 52 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
99f841c4 53# define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
5bd8a249
UD
54# define LLL_MUTEX_TRYLOCK(mutex) \
55 lll_trylock ((mutex)->__data.__lock)
65810f0e 56# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
e8c659d7
AK
57# define LLL_MUTEX_LOCK_ELISION(mutex) \
58 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
59 PTHREAD_MUTEX_PSHARED (mutex))
60# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
61 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
62 PTHREAD_MUTEX_PSHARED (mutex))
69431c9a
UD
63#endif
64
6de79a49
UD
65static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
66 __attribute_noinline__;
67
76a50749 68int
9dd346ff 69__pthread_mutex_lock (pthread_mutex_t *mutex)
76a50749 70{
403b4feb
SL
71 /* See concurrency notes regarding mutex type which is loaded from __kind
72 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 73 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
5acf7263
RM
74
75 LIBC_PROBE (mutex_entry, 1, mutex);
76
e8c659d7
AK
77 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
78 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49
UD
79 return __pthread_mutex_lock_full (mutex);
80
a1ffb40e 81 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
6de79a49 82 {
e8c659d7 83 FORCE_ELISION (mutex, goto elision);
6de79a49
UD
84 simple:
85 /* Normal mutex. */
99f841c4 86 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
6de79a49
UD
87 assert (mutex->__data.__owner == 0);
88 }
5a664d7a 89#if ENABLE_ELISION_SUPPORT
a1ffb40e 90 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
91 {
92 elision: __attribute__((unused))
93 /* This case can never happen on a system without elision,
94 as the mutex type initialization functions will not
95 allow to set the elision flags. */
075b9322 96 /* Don't record owner or users for elision case. This is a
e8c659d7
AK
97 tail call. */
98 return LLL_MUTEX_LOCK_ELISION (mutex);
99 }
100#endif
101 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
102 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749
UD
103 {
104 /* Recursive mutex. */
e8c659d7 105 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
6de79a49 106
76a50749 107 /* Check whether we already hold the mutex. */
9a7178d6 108 if (mutex->__data.__owner == id)
76a50749
UD
109 {
110 /* Just bump the counter. */
a1ffb40e 111 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
76a50749
UD
112 /* Overflow of the counter. */
113 return EAGAIN;
114
115 ++mutex->__data.__count;
76a50749 116
3892d906 117 return 0;
76a50749 118 }
3892d906
UD
119
120 /* We have to get the mutex. */
99f841c4 121 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
3892d906 122
1bcfb5a5 123 assert (mutex->__data.__owner == 0);
3892d906 124 mutex->__data.__count = 1;
6de79a49 125 }
e8c659d7
AK
126 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
127 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49 128 {
5bd8a249 129 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
2c0b891a
UD
130 {
131 int cnt = 0;
6310e6be 132 int max_cnt = MIN (max_adaptive_count (),
2c0b891a
UD
133 mutex->__data.__spins * 2 + 10);
134 do
135 {
136 if (cnt++ >= max_cnt)
137 {
5bd8a249 138 LLL_MUTEX_LOCK (mutex);
2c0b891a
UD
139 break;
140 }
4eb984d3 141 atomic_spin_nop ();
2c0b891a 142 }
5bd8a249 143 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
2c0b891a
UD
144
145 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
146 }
1bcfb5a5 147 assert (mutex->__data.__owner == 0);
6de79a49
UD
148 }
149 else
150 {
e8c659d7
AK
151 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
152 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
6de79a49 153 /* Check whether we already hold the mutex. */
a1ffb40e 154 if (__glibc_unlikely (mutex->__data.__owner == id))
6de79a49
UD
155 return EDEADLK;
156 goto simple;
157 }
1bcfb5a5 158
e8c659d7
AK
159 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
160
6de79a49
UD
161 /* Record the ownership. */
162 mutex->__data.__owner = id;
163#ifndef NO_INCR
164 ++mutex->__data.__nusers;
165#endif
166
5acf7263
RM
167 LIBC_PROBE (mutex_acquired, 1, mutex);
168
6de79a49
UD
169 return 0;
170}
171
172static int
173__pthread_mutex_lock_full (pthread_mutex_t *mutex)
174{
175 int oldval;
176 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
177
178 switch (PTHREAD_MUTEX_TYPE (mutex))
179 {
0f6699ea
UD
180 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
181 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
182 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
183 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
184 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
185 &mutex->__data.__list.__next);
8f9450a0
TR
186 /* We need to set op_pending before starting the operation. Also
187 see comments at ENQUEUE_MUTEX. */
188 __asm ("" ::: "memory");
0f6699ea 189
683040c3 190 oldval = mutex->__data.__lock;
353683a2
TR
191 /* This is set to FUTEX_WAITERS iff we might have shared the
192 FUTEX_WAITERS flag with other threads, and therefore need to keep it
193 set to avoid lost wake-ups. We have the same requirement in the
65810f0e
TR
194 simple mutex algorithm.
195 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
196 are building the special case mutexes for use from within condition
197 variables. */
198 unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
199 while (1)
1bcfb5a5 200 {
65810f0e
TR
201 /* Try to acquire the lock through a CAS from 0 (not acquired) to
202 our TID | assume_other_futex_waiters. */
5920a4a6
CD
203 if (__glibc_likely (oldval == 0))
204 {
205 oldval
206 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
207 id | assume_other_futex_waiters, 0);
208 if (__glibc_likely (oldval == 0))
209 break;
210 }
65810f0e 211
683040c3
UD
212 if ((oldval & FUTEX_OWNER_DIED) != 0)
213 {
214 /* The previous owner died. Try locking the mutex. */
0f6699ea
UD
215 int newval = id;
216#ifdef NO_INCR
353683a2
TR
217 /* We are not taking assume_other_futex_waiters into accoount
218 here simply because we'll set FUTEX_WAITERS anyway. */
0f6699ea 219 newval |= FUTEX_WAITERS;
113ad5fc 220#else
353683a2 221 newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
0f6699ea
UD
222#endif
223
224 newval
052757bf 225 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
0f6699ea
UD
226 newval, oldval);
227
228 if (newval != oldval)
683040c3 229 {
683040c3 230 oldval = newval;
65810f0e 231 continue;
683040c3 232 }
1bcfb5a5 233
683040c3
UD
234 /* We got the mutex. */
235 mutex->__data.__count = 1;
236 /* But it is inconsistent unless marked otherwise. */
237 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
238
8f9450a0
TR
239 /* We must not enqueue the mutex before we have acquired it.
240 Also see comments at ENQUEUE_MUTEX. */
241 __asm ("" ::: "memory");
683040c3 242 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
243 /* We need to clear op_pending after we enqueue the mutex. */
244 __asm ("" ::: "memory");
0f6699ea 245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3
UD
246
247 /* Note that we deliberately exit here. If we fall
248 through to the end of the function __nusers would be
249 incremented which is not correct because the old
250 owner has to be discounted. If we are not supposed
251 to increment __nusers we actually have to decrement
252 it here. */
253#ifdef NO_INCR
254 --mutex->__data.__nusers;
255#endif
1bcfb5a5 256
683040c3
UD
257 return EOWNERDEAD;
258 }
1bcfb5a5 259
683040c3 260 /* Check whether we already hold the mutex. */
a1ffb40e 261 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 262 {
5bd8a249
UD
263 int kind = PTHREAD_MUTEX_TYPE (mutex);
264 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea 265 {
8f9450a0
TR
266 /* We do not need to ensure ordering wrt another memory
267 access. Also see comments at ENQUEUE_MUTEX. */
0f6699ea
UD
268 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
269 NULL);
270 return EDEADLK;
271 }
1bcfb5a5 272
5bd8a249 273 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 274 {
8f9450a0
TR
275 /* We do not need to ensure ordering wrt another memory
276 access. */
0f6699ea
UD
277 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
278 NULL);
279
683040c3 280 /* Just bump the counter. */
a1ffb40e 281 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
282 /* Overflow of the counter. */
283 return EAGAIN;
1bcfb5a5 284
683040c3 285 ++mutex->__data.__count;
1bcfb5a5 286
683040c3
UD
287 return 0;
288 }
289 }
1bcfb5a5 290
65810f0e
TR
291 /* We cannot acquire the mutex nor has its owner died. Thus, try
292 to block using futexes. Set FUTEX_WAITERS if necessary so that
293 other threads are aware that there are potentially threads
294 blocked on the futex. Restart if oldval changed in the
295 meantime. */
296 if ((oldval & FUTEX_WAITERS) == 0)
683040c3 297 {
65810f0e
TR
298 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
299 oldval | FUTEX_WAITERS,
300 oldval)
301 != 0)
302 {
303 oldval = mutex->__data.__lock;
304 continue;
305 }
306 oldval |= FUTEX_WAITERS;
683040c3 307 }
65810f0e
TR
308
309 /* It is now possible that we share the FUTEX_WAITERS flag with
310 another thread; therefore, update assume_other_futex_waiters so
311 that we do not forget about this when handling other cases
312 above and thus do not cause lost wake-ups. */
313 assume_other_futex_waiters |= FUTEX_WAITERS;
314
315 /* Block using the futex and reload current lock value. */
878fe624
AZ
316 futex_wait ((unsigned int *) &mutex->__data.__lock, oldval,
317 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
65810f0e
TR
318 oldval = mutex->__data.__lock;
319 }
320
321 /* We have acquired the mutex; check if it is still consistent. */
322 if (__builtin_expect (mutex->__data.__owner
323 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
324 {
325 /* This mutex is now not recoverable. */
326 mutex->__data.__count = 0;
327 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
328 lll_unlock (mutex->__data.__lock, private);
8f9450a0
TR
329 /* FIXME This violates the mutex destruction requirements. See
330 __pthread_mutex_unlock_full. */
65810f0e
TR
331 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
332 return ENOTRECOVERABLE;
1bcfb5a5
UD
333 }
334
683040c3 335 mutex->__data.__count = 1;
8f9450a0
TR
336 /* We must not enqueue the mutex before we have acquired it.
337 Also see comments at ENQUEUE_MUTEX. */
338 __asm ("" ::: "memory");
1bcfb5a5 339 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
340 /* We need to clear op_pending after we enqueue the mutex. */
341 __asm ("" ::: "memory");
0f6699ea 342 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
2c0b891a 343 break;
dcc73a8d 344
184ee940
RM
345 /* The PI support requires the Linux futex system call. If that's not
346 available, pthread_mutex_init should never have allowed the type to
347 be set. So it will get the default case for an invalid type. */
348#ifdef __NR_futex
df47504c
UD
349 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
350 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
351 case PTHREAD_MUTEX_PI_NORMAL_NP:
352 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
353 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
354 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
355 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
356 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
357 {
403b4feb
SL
358 int kind, robust;
359 {
360 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
361 in sysdeps/nptl/bits/thread-shared-types.h. */
362 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
363 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
364 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
365 }
df47504c
UD
366
367 if (robust)
8f9450a0
TR
368 {
369 /* Note: robust PI futexes are signaled by setting bit 0. */
370 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
371 (void *) (((uintptr_t) &mutex->__data.__list.__next)
372 | 1));
373 /* We need to set op_pending before starting the operation. Also
374 see comments at ENQUEUE_MUTEX. */
375 __asm ("" ::: "memory");
376 }
df47504c
UD
377
378 oldval = mutex->__data.__lock;
379
380 /* Check whether we already hold the mutex. */
a1ffb40e 381 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
382 {
383 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
384 {
8f9450a0
TR
385 /* We do not need to ensure ordering wrt another memory
386 access. */
df47504c
UD
387 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
388 return EDEADLK;
389 }
390
391 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
392 {
8f9450a0
TR
393 /* We do not need to ensure ordering wrt another memory
394 access. */
df47504c
UD
395 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
396
397 /* Just bump the counter. */
a1ffb40e 398 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
399 /* Overflow of the counter. */
400 return EAGAIN;
401
402 ++mutex->__data.__count;
403
404 return 0;
405 }
406 }
407
408 int newval = id;
184ee940 409# ifdef NO_INCR
df47504c 410 newval |= FUTEX_WAITERS;
184ee940 411# endif
052757bf 412 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
df47504c
UD
413 newval, 0);
414
415 if (oldval != 0)
416 {
417 /* The mutex is locked. The kernel will now take care of
418 everything. */
efac1fce
UD
419 int private = (robust
420 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
421 : PTHREAD_MUTEX_PSHARED (mutex));
7e9afa8a 422 int e = futex_lock_pi64 (&mutex->__data.__lock, NULL, private);
6b1472eb 423 if (e == ESRCH || e == EDEADLK)
df47504c 424 {
6b1472eb 425 assert (e != EDEADLK
df47504c
UD
426 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
427 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
428 /* ESRCH can happen only for non-robust PI mutexes where
429 the owner of the lock died. */
6b1472eb 430 assert (e != ESRCH || !robust);
df47504c
UD
431
432 /* Delay the thread indefinitely. */
433 while (1)
5289cec4
AZ
434 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
435 0 /* ignored */, NULL, private);
df47504c
UD
436 }
437
438 oldval = mutex->__data.__lock;
439
440 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
441 }
442
a1ffb40e 443 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c
UD
444 {
445 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
446
447 /* We got the mutex. */
448 mutex->__data.__count = 1;
449 /* But it is inconsistent unless marked otherwise. */
450 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
451
8f9450a0
TR
452 /* We must not enqueue the mutex before we have acquired it.
453 Also see comments at ENQUEUE_MUTEX. */
454 __asm ("" ::: "memory");
df47504c 455 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
456 /* We need to clear op_pending after we enqueue the mutex. */
457 __asm ("" ::: "memory");
df47504c
UD
458 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
459
460 /* Note that we deliberately exit here. If we fall
461 through to the end of the function __nusers would be
462 incremented which is not correct because the old owner
463 has to be discounted. If we are not supposed to
464 increment __nusers we actually have to decrement it here. */
184ee940 465# ifdef NO_INCR
df47504c 466 --mutex->__data.__nusers;
184ee940 467# endif
df47504c
UD
468
469 return EOWNERDEAD;
470 }
471
472 if (robust
473 && __builtin_expect (mutex->__data.__owner
474 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
475 {
476 /* This mutex is now not recoverable. */
477 mutex->__data.__count = 0;
478
6b1472eb
AZ
479 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
480 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
df47504c 481
8f9450a0
TR
482 /* To the kernel, this will be visible after the kernel has
483 acquired the mutex in the syscall. */
df47504c
UD
484 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
485 return ENOTRECOVERABLE;
486 }
487
488 mutex->__data.__count = 1;
489 if (robust)
490 {
8f9450a0
TR
491 /* We must not enqueue the mutex before we have acquired it.
492 Also see comments at ENQUEUE_MUTEX. */
493 __asm ("" ::: "memory");
df47504c 494 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
495 /* We need to clear op_pending after we enqueue the mutex. */
496 __asm ("" ::: "memory");
df47504c
UD
497 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
498 }
499 }
500 break;
184ee940 501#endif /* __NR_futex. */
df47504c 502
f17efcb4
UD
503 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
504 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
505 case PTHREAD_MUTEX_PP_NORMAL_NP:
506 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
507 {
403b4feb
SL
508 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
509 in sysdeps/nptl/bits/thread-shared-types.h. */
510 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
511 & PTHREAD_MUTEX_KIND_MASK_NP;
f17efcb4
UD
512
513 oldval = mutex->__data.__lock;
514
515 /* Check whether we already hold the mutex. */
516 if (mutex->__data.__owner == id)
517 {
518 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
519 return EDEADLK;
520
521 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
522 {
523 /* Just bump the counter. */
a1ffb40e 524 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
525 /* Overflow of the counter. */
526 return EAGAIN;
527
528 ++mutex->__data.__count;
529
530 return 0;
531 }
532 }
533
534 int oldprio = -1, ceilval;
535 do
536 {
537 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
538 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
539
540 if (__pthread_current_priority () > ceiling)
541 {
542 if (oldprio != -1)
543 __pthread_tpp_change_priority (oldprio, -1);
544 return EINVAL;
545 }
546
6de79a49 547 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
f17efcb4
UD
548 if (retval)
549 return retval;
550
551 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
552 oldprio = ceiling;
553
554 oldval
052757bf 555 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
556#ifdef NO_INCR
557 ceilval | 2,
558#else
559 ceilval | 1,
560#endif
561 ceilval);
562
563 if (oldval == ceilval)
564 break;
565
566 do
567 {
568 oldval
052757bf 569 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
570 ceilval | 2,
571 ceilval | 1);
572
573 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
574 break;
575
576 if (oldval != ceilval)
878fe624
AZ
577 futex_wait ((unsigned int * ) &mutex->__data.__lock,
578 ceilval | 2,
579 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4 580 }
052757bf 581 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
582 ceilval | 2, ceilval)
583 != ceilval);
584 }
585 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
586
587 assert (mutex->__data.__owner == 0);
588 mutex->__data.__count = 1;
589 }
590 break;
591
dcc73a8d
UD
592 default:
593 /* Correct code cannot set any other type. */
594 return EINVAL;
76a50749
UD
595 }
596
3892d906 597 /* Record the ownership. */
3892d906
UD
598 mutex->__data.__owner = id;
599#ifndef NO_INCR
600 ++mutex->__data.__nusers;
601#endif
602
5acf7263
RM
603 LIBC_PROBE (mutex_acquired, 1, mutex);
604
6de79a49 605 return 0;
76a50749 606}
69431c9a 607#ifndef __pthread_mutex_lock
fa872e1b 608weak_alias (__pthread_mutex_lock, pthread_mutex_lock)
4d17e683 609hidden_def (__pthread_mutex_lock)
69431c9a 610#endif
b0948ffd
UD
611
612
613#ifdef NO_INCR
614void
9dd346ff 615__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
b0948ffd 616{
403b4feb
SL
617 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
618 in sysdeps/nptl/bits/thread-shared-types.h. */
619 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
620 assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
621 assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
622 assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
b0948ffd
UD
623
624 /* Record the ownership. */
625 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
626 mutex->__data.__owner = id;
627
403b4feb 628 if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
b0948ffd
UD
629 ++mutex->__data.__count;
630}
631#endif