]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_lock.c
Avoid extra load with CAS in __pthread_mutex_lock_full [BZ #28537]
[thirdparty/glibc.git] / nptl / pthread_mutex_lock.c
CommitLineData
2b778ceb 1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
76a50749 2 This file is part of the GNU C Library.
76a50749
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
76a50749 17
3892d906 18#include <assert.h>
76a50749 19#include <errno.h>
1bcfb5a5 20#include <stdlib.h>
df47504c 21#include <unistd.h>
ca06321d 22#include <sys/param.h>
b894c2ea 23#include <not-cancel.h>
76a50749 24#include "pthreadP.h"
4eb984d3 25#include <atomic.h>
6b1472eb 26#include <futex-internal.h>
5acf7263 27#include <stap-probe.h>
27a44822 28#include <shlib-compat.h>
76a50749 29
65810f0e
TR
30/* Some of the following definitions differ when pthread_mutex_cond_lock.c
31 includes this file. */
69431c9a 32#ifndef LLL_MUTEX_LOCK
99f841c4
FW
33/* lll_lock with single-thread optimization. */
34static inline void
35lll_mutex_lock_optimized (pthread_mutex_t *mutex)
36{
37 /* The single-threaded optimization is only valid for private
38 mutexes. For process-shared mutexes, the mutex could be in a
39 shared mapping, so synchronization with another process is needed
40 even without any threads. If the lock is already marked as
41 acquired, POSIX requires that pthread_mutex_lock deadlocks for
42 normal mutexes, so skip the optimization in that case as
43 well. */
44 int private = PTHREAD_MUTEX_PSHARED (mutex);
45 if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0)
46 mutex->__data.__lock = 1;
47 else
48 lll_lock (mutex->__data.__lock, private);
49}
50
51# define LLL_MUTEX_LOCK(mutex) \
5bd8a249 52 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
99f841c4 53# define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
5bd8a249
UD
54# define LLL_MUTEX_TRYLOCK(mutex) \
55 lll_trylock ((mutex)->__data.__lock)
65810f0e 56# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
e8c659d7
AK
57# define LLL_MUTEX_LOCK_ELISION(mutex) \
58 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
59 PTHREAD_MUTEX_PSHARED (mutex))
60# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
61 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
62 PTHREAD_MUTEX_PSHARED (mutex))
27a44822
FW
63# define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
64# define PTHREAD_MUTEX_VERSIONS 1
69431c9a
UD
65#endif
66
6de79a49
UD
67static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
68 __attribute_noinline__;
69
76a50749 70int
27a44822 71PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
76a50749 72{
403b4feb
SL
73 /* See concurrency notes regarding mutex type which is loaded from __kind
74 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 75 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
5acf7263
RM
76
77 LIBC_PROBE (mutex_entry, 1, mutex);
78
e8c659d7
AK
79 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
80 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49
UD
81 return __pthread_mutex_lock_full (mutex);
82
a1ffb40e 83 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
6de79a49 84 {
e8c659d7 85 FORCE_ELISION (mutex, goto elision);
6de79a49
UD
86 simple:
87 /* Normal mutex. */
99f841c4 88 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
6de79a49
UD
89 assert (mutex->__data.__owner == 0);
90 }
5a664d7a 91#if ENABLE_ELISION_SUPPORT
a1ffb40e 92 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
93 {
94 elision: __attribute__((unused))
95 /* This case can never happen on a system without elision,
96 as the mutex type initialization functions will not
97 allow to set the elision flags. */
075b9322 98 /* Don't record owner or users for elision case. This is a
e8c659d7
AK
99 tail call. */
100 return LLL_MUTEX_LOCK_ELISION (mutex);
101 }
102#endif
103 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
104 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749
UD
105 {
106 /* Recursive mutex. */
e8c659d7 107 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
6de79a49 108
76a50749 109 /* Check whether we already hold the mutex. */
9a7178d6 110 if (mutex->__data.__owner == id)
76a50749
UD
111 {
112 /* Just bump the counter. */
a1ffb40e 113 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
76a50749
UD
114 /* Overflow of the counter. */
115 return EAGAIN;
116
117 ++mutex->__data.__count;
76a50749 118
3892d906 119 return 0;
76a50749 120 }
3892d906
UD
121
122 /* We have to get the mutex. */
99f841c4 123 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
3892d906 124
1bcfb5a5 125 assert (mutex->__data.__owner == 0);
3892d906 126 mutex->__data.__count = 1;
6de79a49 127 }
e8c659d7
AK
128 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
129 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49 130 {
5bd8a249 131 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
2c0b891a
UD
132 {
133 int cnt = 0;
6310e6be 134 int max_cnt = MIN (max_adaptive_count (),
2c0b891a
UD
135 mutex->__data.__spins * 2 + 10);
136 do
137 {
138 if (cnt++ >= max_cnt)
139 {
5bd8a249 140 LLL_MUTEX_LOCK (mutex);
2c0b891a
UD
141 break;
142 }
4eb984d3 143 atomic_spin_nop ();
2c0b891a 144 }
5bd8a249 145 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
2c0b891a
UD
146
147 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
148 }
1bcfb5a5 149 assert (mutex->__data.__owner == 0);
6de79a49
UD
150 }
151 else
152 {
e8c659d7
AK
153 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
154 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
6de79a49 155 /* Check whether we already hold the mutex. */
a1ffb40e 156 if (__glibc_unlikely (mutex->__data.__owner == id))
6de79a49
UD
157 return EDEADLK;
158 goto simple;
159 }
1bcfb5a5 160
e8c659d7
AK
161 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
162
6de79a49
UD
163 /* Record the ownership. */
164 mutex->__data.__owner = id;
165#ifndef NO_INCR
166 ++mutex->__data.__nusers;
167#endif
168
5acf7263
RM
169 LIBC_PROBE (mutex_acquired, 1, mutex);
170
6de79a49
UD
171 return 0;
172}
173
174static int
175__pthread_mutex_lock_full (pthread_mutex_t *mutex)
176{
177 int oldval;
178 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
179
180 switch (PTHREAD_MUTEX_TYPE (mutex))
181 {
0f6699ea
UD
182 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
183 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
184 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
185 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
186 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
187 &mutex->__data.__list.__next);
8f9450a0
TR
188 /* We need to set op_pending before starting the operation. Also
189 see comments at ENQUEUE_MUTEX. */
190 __asm ("" ::: "memory");
0f6699ea 191
683040c3 192 oldval = mutex->__data.__lock;
353683a2
TR
193 /* This is set to FUTEX_WAITERS iff we might have shared the
194 FUTEX_WAITERS flag with other threads, and therefore need to keep it
195 set to avoid lost wake-ups. We have the same requirement in the
65810f0e
TR
196 simple mutex algorithm.
197 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
198 are building the special case mutexes for use from within condition
199 variables. */
200 unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
201 while (1)
1bcfb5a5 202 {
65810f0e
TR
203 /* Try to acquire the lock through a CAS from 0 (not acquired) to
204 our TID | assume_other_futex_waiters. */
5920a4a6
CD
205 if (__glibc_likely (oldval == 0))
206 {
207 oldval
208 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
209 id | assume_other_futex_waiters, 0);
210 if (__glibc_likely (oldval == 0))
211 break;
212 }
65810f0e 213
683040c3
UD
214 if ((oldval & FUTEX_OWNER_DIED) != 0)
215 {
216 /* The previous owner died. Try locking the mutex. */
0f6699ea
UD
217 int newval = id;
218#ifdef NO_INCR
353683a2
TR
219 /* We are not taking assume_other_futex_waiters into accoount
220 here simply because we'll set FUTEX_WAITERS anyway. */
0f6699ea 221 newval |= FUTEX_WAITERS;
113ad5fc 222#else
353683a2 223 newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
0f6699ea
UD
224#endif
225
226 newval
052757bf 227 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
0f6699ea
UD
228 newval, oldval);
229
230 if (newval != oldval)
683040c3 231 {
683040c3 232 oldval = newval;
65810f0e 233 continue;
683040c3 234 }
1bcfb5a5 235
683040c3
UD
236 /* We got the mutex. */
237 mutex->__data.__count = 1;
238 /* But it is inconsistent unless marked otherwise. */
239 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
240
8f9450a0
TR
241 /* We must not enqueue the mutex before we have acquired it.
242 Also see comments at ENQUEUE_MUTEX. */
243 __asm ("" ::: "memory");
683040c3 244 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
245 /* We need to clear op_pending after we enqueue the mutex. */
246 __asm ("" ::: "memory");
0f6699ea 247 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3
UD
248
249 /* Note that we deliberately exit here. If we fall
250 through to the end of the function __nusers would be
251 incremented which is not correct because the old
252 owner has to be discounted. If we are not supposed
253 to increment __nusers we actually have to decrement
254 it here. */
255#ifdef NO_INCR
256 --mutex->__data.__nusers;
257#endif
1bcfb5a5 258
683040c3
UD
259 return EOWNERDEAD;
260 }
1bcfb5a5 261
683040c3 262 /* Check whether we already hold the mutex. */
a1ffb40e 263 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 264 {
5bd8a249
UD
265 int kind = PTHREAD_MUTEX_TYPE (mutex);
266 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea 267 {
8f9450a0
TR
268 /* We do not need to ensure ordering wrt another memory
269 access. Also see comments at ENQUEUE_MUTEX. */
0f6699ea
UD
270 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
271 NULL);
272 return EDEADLK;
273 }
1bcfb5a5 274
5bd8a249 275 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 276 {
8f9450a0
TR
277 /* We do not need to ensure ordering wrt another memory
278 access. */
0f6699ea
UD
279 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
280 NULL);
281
683040c3 282 /* Just bump the counter. */
a1ffb40e 283 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
284 /* Overflow of the counter. */
285 return EAGAIN;
1bcfb5a5 286
683040c3 287 ++mutex->__data.__count;
1bcfb5a5 288
683040c3
UD
289 return 0;
290 }
291 }
1bcfb5a5 292
65810f0e
TR
293 /* We cannot acquire the mutex nor has its owner died. Thus, try
294 to block using futexes. Set FUTEX_WAITERS if necessary so that
295 other threads are aware that there are potentially threads
296 blocked on the futex. Restart if oldval changed in the
297 meantime. */
298 if ((oldval & FUTEX_WAITERS) == 0)
683040c3 299 {
0b82747d
L
300 int val;
301 if ((val = atomic_compare_and_exchange_val_acq
302 (&mutex->__data.__lock, oldval | FUTEX_WAITERS,
303 oldval)) != oldval)
65810f0e 304 {
0b82747d 305 oldval = val;
65810f0e
TR
306 continue;
307 }
308 oldval |= FUTEX_WAITERS;
683040c3 309 }
65810f0e
TR
310
311 /* It is now possible that we share the FUTEX_WAITERS flag with
312 another thread; therefore, update assume_other_futex_waiters so
313 that we do not forget about this when handling other cases
314 above and thus do not cause lost wake-ups. */
315 assume_other_futex_waiters |= FUTEX_WAITERS;
316
317 /* Block using the futex and reload current lock value. */
878fe624
AZ
318 futex_wait ((unsigned int *) &mutex->__data.__lock, oldval,
319 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
65810f0e
TR
320 oldval = mutex->__data.__lock;
321 }
322
323 /* We have acquired the mutex; check if it is still consistent. */
324 if (__builtin_expect (mutex->__data.__owner
325 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
326 {
327 /* This mutex is now not recoverable. */
328 mutex->__data.__count = 0;
329 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
330 lll_unlock (mutex->__data.__lock, private);
8f9450a0
TR
331 /* FIXME This violates the mutex destruction requirements. See
332 __pthread_mutex_unlock_full. */
65810f0e
TR
333 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
334 return ENOTRECOVERABLE;
1bcfb5a5
UD
335 }
336
683040c3 337 mutex->__data.__count = 1;
8f9450a0
TR
338 /* We must not enqueue the mutex before we have acquired it.
339 Also see comments at ENQUEUE_MUTEX. */
340 __asm ("" ::: "memory");
1bcfb5a5 341 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
342 /* We need to clear op_pending after we enqueue the mutex. */
343 __asm ("" ::: "memory");
0f6699ea 344 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
2c0b891a 345 break;
dcc73a8d 346
184ee940
RM
347 /* The PI support requires the Linux futex system call. If that's not
348 available, pthread_mutex_init should never have allowed the type to
349 be set. So it will get the default case for an invalid type. */
350#ifdef __NR_futex
df47504c
UD
351 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
352 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
353 case PTHREAD_MUTEX_PI_NORMAL_NP:
354 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
355 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
356 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
357 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
358 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
359 {
403b4feb
SL
360 int kind, robust;
361 {
362 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
363 in sysdeps/nptl/bits/thread-shared-types.h. */
364 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
365 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
366 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
367 }
df47504c
UD
368
369 if (robust)
8f9450a0
TR
370 {
371 /* Note: robust PI futexes are signaled by setting bit 0. */
372 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
373 (void *) (((uintptr_t) &mutex->__data.__list.__next)
374 | 1));
375 /* We need to set op_pending before starting the operation. Also
376 see comments at ENQUEUE_MUTEX. */
377 __asm ("" ::: "memory");
378 }
df47504c
UD
379
380 oldval = mutex->__data.__lock;
381
382 /* Check whether we already hold the mutex. */
a1ffb40e 383 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
384 {
385 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
386 {
8f9450a0
TR
387 /* We do not need to ensure ordering wrt another memory
388 access. */
df47504c
UD
389 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
390 return EDEADLK;
391 }
392
393 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
394 {
8f9450a0
TR
395 /* We do not need to ensure ordering wrt another memory
396 access. */
df47504c
UD
397 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
398
399 /* Just bump the counter. */
a1ffb40e 400 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
401 /* Overflow of the counter. */
402 return EAGAIN;
403
404 ++mutex->__data.__count;
405
406 return 0;
407 }
408 }
409
410 int newval = id;
184ee940 411# ifdef NO_INCR
df47504c 412 newval |= FUTEX_WAITERS;
184ee940 413# endif
052757bf 414 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
df47504c
UD
415 newval, 0);
416
417 if (oldval != 0)
418 {
419 /* The mutex is locked. The kernel will now take care of
420 everything. */
efac1fce
UD
421 int private = (robust
422 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
423 : PTHREAD_MUTEX_PSHARED (mutex));
8352b6df
AZ
424 int e = __futex_lock_pi64 (&mutex->__data.__lock, 0 /* ununsed */,
425 NULL, private);
6b1472eb 426 if (e == ESRCH || e == EDEADLK)
df47504c 427 {
6b1472eb 428 assert (e != EDEADLK
df47504c
UD
429 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
430 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
431 /* ESRCH can happen only for non-robust PI mutexes where
432 the owner of the lock died. */
6b1472eb 433 assert (e != ESRCH || !robust);
df47504c
UD
434
435 /* Delay the thread indefinitely. */
436 while (1)
5289cec4
AZ
437 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
438 0 /* ignored */, NULL, private);
df47504c
UD
439 }
440
441 oldval = mutex->__data.__lock;
442
443 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
444 }
445
a1ffb40e 446 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c
UD
447 {
448 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
449
450 /* We got the mutex. */
451 mutex->__data.__count = 1;
452 /* But it is inconsistent unless marked otherwise. */
453 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
454
8f9450a0
TR
455 /* We must not enqueue the mutex before we have acquired it.
456 Also see comments at ENQUEUE_MUTEX. */
457 __asm ("" ::: "memory");
df47504c 458 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
459 /* We need to clear op_pending after we enqueue the mutex. */
460 __asm ("" ::: "memory");
df47504c
UD
461 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
462
463 /* Note that we deliberately exit here. If we fall
464 through to the end of the function __nusers would be
465 incremented which is not correct because the old owner
466 has to be discounted. If we are not supposed to
467 increment __nusers we actually have to decrement it here. */
184ee940 468# ifdef NO_INCR
df47504c 469 --mutex->__data.__nusers;
184ee940 470# endif
df47504c
UD
471
472 return EOWNERDEAD;
473 }
474
475 if (robust
476 && __builtin_expect (mutex->__data.__owner
477 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
478 {
479 /* This mutex is now not recoverable. */
480 mutex->__data.__count = 0;
481
6b1472eb
AZ
482 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
483 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
df47504c 484
8f9450a0
TR
485 /* To the kernel, this will be visible after the kernel has
486 acquired the mutex in the syscall. */
df47504c
UD
487 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
488 return ENOTRECOVERABLE;
489 }
490
491 mutex->__data.__count = 1;
492 if (robust)
493 {
8f9450a0
TR
494 /* We must not enqueue the mutex before we have acquired it.
495 Also see comments at ENQUEUE_MUTEX. */
496 __asm ("" ::: "memory");
df47504c 497 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
498 /* We need to clear op_pending after we enqueue the mutex. */
499 __asm ("" ::: "memory");
df47504c
UD
500 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
501 }
502 }
503 break;
184ee940 504#endif /* __NR_futex. */
df47504c 505
f17efcb4
UD
506 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
507 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
508 case PTHREAD_MUTEX_PP_NORMAL_NP:
509 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
510 {
403b4feb
SL
511 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
512 in sysdeps/nptl/bits/thread-shared-types.h. */
513 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
514 & PTHREAD_MUTEX_KIND_MASK_NP;
f17efcb4
UD
515
516 oldval = mutex->__data.__lock;
517
518 /* Check whether we already hold the mutex. */
519 if (mutex->__data.__owner == id)
520 {
521 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
522 return EDEADLK;
523
524 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
525 {
526 /* Just bump the counter. */
a1ffb40e 527 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
528 /* Overflow of the counter. */
529 return EAGAIN;
530
531 ++mutex->__data.__count;
532
533 return 0;
534 }
535 }
536
537 int oldprio = -1, ceilval;
538 do
539 {
540 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
541 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
542
543 if (__pthread_current_priority () > ceiling)
544 {
545 if (oldprio != -1)
546 __pthread_tpp_change_priority (oldprio, -1);
547 return EINVAL;
548 }
549
6de79a49 550 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
f17efcb4
UD
551 if (retval)
552 return retval;
553
554 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
555 oldprio = ceiling;
556
557 oldval
052757bf 558 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
559#ifdef NO_INCR
560 ceilval | 2,
561#else
562 ceilval | 1,
563#endif
564 ceilval);
565
566 if (oldval == ceilval)
567 break;
568
569 do
570 {
571 oldval
052757bf 572 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
573 ceilval | 2,
574 ceilval | 1);
575
576 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
577 break;
578
579 if (oldval != ceilval)
878fe624
AZ
580 futex_wait ((unsigned int * ) &mutex->__data.__lock,
581 ceilval | 2,
582 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4 583 }
052757bf 584 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
585 ceilval | 2, ceilval)
586 != ceilval);
587 }
588 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
589
590 assert (mutex->__data.__owner == 0);
591 mutex->__data.__count = 1;
592 }
593 break;
594
dcc73a8d
UD
595 default:
596 /* Correct code cannot set any other type. */
597 return EINVAL;
76a50749
UD
598 }
599
3892d906 600 /* Record the ownership. */
3892d906
UD
601 mutex->__data.__owner = id;
602#ifndef NO_INCR
603 ++mutex->__data.__nusers;
604#endif
605
5acf7263
RM
606 LIBC_PROBE (mutex_acquired, 1, mutex);
607
6de79a49 608 return 0;
76a50749 609}
27a44822
FW
610
611#if PTHREAD_MUTEX_VERSIONS
27a44822 612libc_hidden_ver (___pthread_mutex_lock, __pthread_mutex_lock)
8ec022a0
FW
613# ifndef SHARED
614strong_alias (___pthread_mutex_lock, __pthread_mutex_lock)
615# endif
27a44822
FW
616versioned_symbol (libpthread, ___pthread_mutex_lock, pthread_mutex_lock,
617 GLIBC_2_0);
618
619# if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
620compat_symbol (libpthread, ___pthread_mutex_lock, __pthread_mutex_lock,
621 GLIBC_2_0);
622# endif
623#endif /* PTHREAD_MUTEX_VERSIONS */
b0948ffd
UD
624
625
626#ifdef NO_INCR
627void
9dd346ff 628__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
b0948ffd 629{
403b4feb
SL
630 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
631 in sysdeps/nptl/bits/thread-shared-types.h. */
632 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
633 assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
634 assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
635 assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
b0948ffd
UD
636
637 /* Record the ownership. */
638 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
639 mutex->__data.__owner = id;
640
403b4feb 641 if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
b0948ffd
UD
642 ++mutex->__data.__count;
643}
644#endif