]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_lock.c
malloc: set NON_MAIN_ARENA flag for reclaimed memalign chunk (BZ #30101)
[thirdparty/glibc.git] / nptl / pthread_mutex_lock.c
CommitLineData
6d7e8eda 1/* Copyright (C) 2002-2023 Free Software Foundation, Inc.
76a50749 2 This file is part of the GNU C Library.
76a50749
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
76a50749 17
3892d906 18#include <assert.h>
76a50749 19#include <errno.h>
1bcfb5a5 20#include <stdlib.h>
df47504c 21#include <unistd.h>
ca06321d 22#include <sys/param.h>
b894c2ea 23#include <not-cancel.h>
76a50749 24#include "pthreadP.h"
4eb984d3 25#include <atomic.h>
6b1472eb 26#include <futex-internal.h>
5acf7263 27#include <stap-probe.h>
27a44822 28#include <shlib-compat.h>
76a50749 29
65810f0e
TR
30/* Some of the following definitions differ when pthread_mutex_cond_lock.c
31 includes this file. */
69431c9a 32#ifndef LLL_MUTEX_LOCK
99f841c4
FW
33/* lll_lock with single-thread optimization. */
34static inline void
35lll_mutex_lock_optimized (pthread_mutex_t *mutex)
36{
37 /* The single-threaded optimization is only valid for private
38 mutexes. For process-shared mutexes, the mutex could be in a
39 shared mapping, so synchronization with another process is needed
40 even without any threads. If the lock is already marked as
41 acquired, POSIX requires that pthread_mutex_lock deadlocks for
42 normal mutexes, so skip the optimization in that case as
43 well. */
44 int private = PTHREAD_MUTEX_PSHARED (mutex);
45 if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0)
46 mutex->__data.__lock = 1;
47 else
48 lll_lock (mutex->__data.__lock, private);
49}
50
51# define LLL_MUTEX_LOCK(mutex) \
5bd8a249 52 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
99f841c4 53# define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
5bd8a249
UD
54# define LLL_MUTEX_TRYLOCK(mutex) \
55 lll_trylock ((mutex)->__data.__lock)
65810f0e 56# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
e8c659d7
AK
57# define LLL_MUTEX_LOCK_ELISION(mutex) \
58 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
59 PTHREAD_MUTEX_PSHARED (mutex))
60# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
61 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
62 PTHREAD_MUTEX_PSHARED (mutex))
27a44822
FW
63# define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
64# define PTHREAD_MUTEX_VERSIONS 1
69431c9a
UD
65#endif
66
d672a98a
L
67#ifndef LLL_MUTEX_READ_LOCK
68# define LLL_MUTEX_READ_LOCK(mutex) \
69 atomic_load_relaxed (&(mutex)->__data.__lock)
70#endif
71
6de79a49
UD
72static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
73 __attribute_noinline__;
74
76a50749 75int
27a44822 76PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex)
76a50749 77{
403b4feb
SL
78 /* See concurrency notes regarding mutex type which is loaded from __kind
79 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 80 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
5acf7263
RM
81
82 LIBC_PROBE (mutex_entry, 1, mutex);
83
e8c659d7
AK
84 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
85 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49
UD
86 return __pthread_mutex_lock_full (mutex);
87
a1ffb40e 88 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
6de79a49 89 {
e8c659d7 90 FORCE_ELISION (mutex, goto elision);
6de79a49
UD
91 simple:
92 /* Normal mutex. */
99f841c4 93 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
6de79a49
UD
94 assert (mutex->__data.__owner == 0);
95 }
5a664d7a 96#if ENABLE_ELISION_SUPPORT
a1ffb40e 97 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
98 {
99 elision: __attribute__((unused))
100 /* This case can never happen on a system without elision,
101 as the mutex type initialization functions will not
102 allow to set the elision flags. */
075b9322 103 /* Don't record owner or users for elision case. This is a
e8c659d7
AK
104 tail call. */
105 return LLL_MUTEX_LOCK_ELISION (mutex);
106 }
107#endif
108 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
109 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749
UD
110 {
111 /* Recursive mutex. */
e8c659d7 112 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
6de79a49 113
76a50749 114 /* Check whether we already hold the mutex. */
9a7178d6 115 if (mutex->__data.__owner == id)
76a50749
UD
116 {
117 /* Just bump the counter. */
a1ffb40e 118 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
76a50749
UD
119 /* Overflow of the counter. */
120 return EAGAIN;
121
122 ++mutex->__data.__count;
76a50749 123
3892d906 124 return 0;
76a50749 125 }
3892d906
UD
126
127 /* We have to get the mutex. */
99f841c4 128 LLL_MUTEX_LOCK_OPTIMIZED (mutex);
3892d906 129
1bcfb5a5 130 assert (mutex->__data.__owner == 0);
3892d906 131 mutex->__data.__count = 1;
6de79a49 132 }
e8c659d7
AK
133 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
134 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49 135 {
5bd8a249 136 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
2c0b891a
UD
137 {
138 int cnt = 0;
6310e6be 139 int max_cnt = MIN (max_adaptive_count (),
2c0b891a 140 mutex->__data.__spins * 2 + 10);
81621478
WG
141 int spin_count, exp_backoff = 1;
142 unsigned int jitter = get_jitter ();
2c0b891a
UD
143 do
144 {
81621478
WG
145 /* In each loop, spin count is exponential backoff plus
146 random jitter, random range is [0, exp_backoff-1]. */
147 spin_count = exp_backoff + (jitter & (exp_backoff - 1));
148 cnt += spin_count;
149 if (cnt >= max_cnt)
2c0b891a 150 {
81621478
WG
151 /* If cnt exceeds max spin count, just go to wait
152 queue. */
5bd8a249 153 LLL_MUTEX_LOCK (mutex);
2c0b891a
UD
154 break;
155 }
81621478
WG
156 do
157 atomic_spin_nop ();
158 while (--spin_count > 0);
159 /* Prepare for next loop. */
160 exp_backoff = get_next_backoff (exp_backoff);
2c0b891a 161 }
6b8dbbd0
JK
162 while (LLL_MUTEX_READ_LOCK (mutex) != 0
163 || LLL_MUTEX_TRYLOCK (mutex) != 0);
2c0b891a
UD
164
165 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
166 }
1bcfb5a5 167 assert (mutex->__data.__owner == 0);
6de79a49
UD
168 }
169 else
170 {
e8c659d7
AK
171 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
172 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
6de79a49 173 /* Check whether we already hold the mutex. */
a1ffb40e 174 if (__glibc_unlikely (mutex->__data.__owner == id))
6de79a49
UD
175 return EDEADLK;
176 goto simple;
177 }
1bcfb5a5 178
e8c659d7
AK
179 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
180
6de79a49
UD
181 /* Record the ownership. */
182 mutex->__data.__owner = id;
183#ifndef NO_INCR
184 ++mutex->__data.__nusers;
185#endif
186
5acf7263
RM
187 LIBC_PROBE (mutex_acquired, 1, mutex);
188
6de79a49
UD
189 return 0;
190}
191
192static int
193__pthread_mutex_lock_full (pthread_mutex_t *mutex)
194{
195 int oldval;
196 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
197
198 switch (PTHREAD_MUTEX_TYPE (mutex))
199 {
0f6699ea
UD
200 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
201 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
202 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
203 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
204 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
205 &mutex->__data.__list.__next);
8f9450a0
TR
206 /* We need to set op_pending before starting the operation. Also
207 see comments at ENQUEUE_MUTEX. */
208 __asm ("" ::: "memory");
0f6699ea 209
683040c3 210 oldval = mutex->__data.__lock;
353683a2
TR
211 /* This is set to FUTEX_WAITERS iff we might have shared the
212 FUTEX_WAITERS flag with other threads, and therefore need to keep it
213 set to avoid lost wake-ups. We have the same requirement in the
65810f0e
TR
214 simple mutex algorithm.
215 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
216 are building the special case mutexes for use from within condition
217 variables. */
218 unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER;
219 while (1)
1bcfb5a5 220 {
65810f0e
TR
221 /* Try to acquire the lock through a CAS from 0 (not acquired) to
222 our TID | assume_other_futex_waiters. */
5920a4a6
CD
223 if (__glibc_likely (oldval == 0))
224 {
225 oldval
226 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
227 id | assume_other_futex_waiters, 0);
228 if (__glibc_likely (oldval == 0))
229 break;
230 }
65810f0e 231
683040c3
UD
232 if ((oldval & FUTEX_OWNER_DIED) != 0)
233 {
234 /* The previous owner died. Try locking the mutex. */
0f6699ea
UD
235 int newval = id;
236#ifdef NO_INCR
353683a2
TR
237 /* We are not taking assume_other_futex_waiters into accoount
238 here simply because we'll set FUTEX_WAITERS anyway. */
0f6699ea 239 newval |= FUTEX_WAITERS;
113ad5fc 240#else
353683a2 241 newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters;
0f6699ea
UD
242#endif
243
244 newval
052757bf 245 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
0f6699ea
UD
246 newval, oldval);
247
248 if (newval != oldval)
683040c3 249 {
683040c3 250 oldval = newval;
65810f0e 251 continue;
683040c3 252 }
1bcfb5a5 253
683040c3
UD
254 /* We got the mutex. */
255 mutex->__data.__count = 1;
256 /* But it is inconsistent unless marked otherwise. */
257 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
258
8f9450a0
TR
259 /* We must not enqueue the mutex before we have acquired it.
260 Also see comments at ENQUEUE_MUTEX. */
261 __asm ("" ::: "memory");
683040c3 262 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
263 /* We need to clear op_pending after we enqueue the mutex. */
264 __asm ("" ::: "memory");
0f6699ea 265 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3
UD
266
267 /* Note that we deliberately exit here. If we fall
268 through to the end of the function __nusers would be
269 incremented which is not correct because the old
270 owner has to be discounted. If we are not supposed
271 to increment __nusers we actually have to decrement
272 it here. */
273#ifdef NO_INCR
274 --mutex->__data.__nusers;
275#endif
1bcfb5a5 276
683040c3
UD
277 return EOWNERDEAD;
278 }
1bcfb5a5 279
683040c3 280 /* Check whether we already hold the mutex. */
a1ffb40e 281 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 282 {
5bd8a249
UD
283 int kind = PTHREAD_MUTEX_TYPE (mutex);
284 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea 285 {
8f9450a0
TR
286 /* We do not need to ensure ordering wrt another memory
287 access. Also see comments at ENQUEUE_MUTEX. */
0f6699ea
UD
288 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
289 NULL);
290 return EDEADLK;
291 }
1bcfb5a5 292
5bd8a249 293 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 294 {
8f9450a0
TR
295 /* We do not need to ensure ordering wrt another memory
296 access. */
0f6699ea
UD
297 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
298 NULL);
299
683040c3 300 /* Just bump the counter. */
a1ffb40e 301 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
302 /* Overflow of the counter. */
303 return EAGAIN;
1bcfb5a5 304
683040c3 305 ++mutex->__data.__count;
1bcfb5a5 306
683040c3
UD
307 return 0;
308 }
309 }
1bcfb5a5 310
65810f0e
TR
311 /* We cannot acquire the mutex nor has its owner died. Thus, try
312 to block using futexes. Set FUTEX_WAITERS if necessary so that
313 other threads are aware that there are potentially threads
314 blocked on the futex. Restart if oldval changed in the
315 meantime. */
316 if ((oldval & FUTEX_WAITERS) == 0)
683040c3 317 {
120ac6d2
L
318 int val = atomic_compare_and_exchange_val_acq
319 (&mutex->__data.__lock, oldval | FUTEX_WAITERS, oldval);
320 if (val != oldval)
65810f0e 321 {
0b82747d 322 oldval = val;
65810f0e
TR
323 continue;
324 }
325 oldval |= FUTEX_WAITERS;
683040c3 326 }
65810f0e
TR
327
328 /* It is now possible that we share the FUTEX_WAITERS flag with
329 another thread; therefore, update assume_other_futex_waiters so
330 that we do not forget about this when handling other cases
331 above and thus do not cause lost wake-ups. */
332 assume_other_futex_waiters |= FUTEX_WAITERS;
333
334 /* Block using the futex and reload current lock value. */
878fe624
AZ
335 futex_wait ((unsigned int *) &mutex->__data.__lock, oldval,
336 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
65810f0e
TR
337 oldval = mutex->__data.__lock;
338 }
339
340 /* We have acquired the mutex; check if it is still consistent. */
341 if (__builtin_expect (mutex->__data.__owner
342 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
343 {
344 /* This mutex is now not recoverable. */
345 mutex->__data.__count = 0;
346 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
347 lll_unlock (mutex->__data.__lock, private);
8f9450a0
TR
348 /* FIXME This violates the mutex destruction requirements. See
349 __pthread_mutex_unlock_full. */
65810f0e
TR
350 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
351 return ENOTRECOVERABLE;
1bcfb5a5
UD
352 }
353
683040c3 354 mutex->__data.__count = 1;
8f9450a0
TR
355 /* We must not enqueue the mutex before we have acquired it.
356 Also see comments at ENQUEUE_MUTEX. */
357 __asm ("" ::: "memory");
1bcfb5a5 358 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
359 /* We need to clear op_pending after we enqueue the mutex. */
360 __asm ("" ::: "memory");
0f6699ea 361 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
2c0b891a 362 break;
dcc73a8d 363
184ee940
RM
364 /* The PI support requires the Linux futex system call. If that's not
365 available, pthread_mutex_init should never have allowed the type to
366 be set. So it will get the default case for an invalid type. */
367#ifdef __NR_futex
df47504c
UD
368 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
369 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
370 case PTHREAD_MUTEX_PI_NORMAL_NP:
371 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
372 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
373 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
374 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
375 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
376 {
403b4feb
SL
377 int kind, robust;
378 {
379 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
380 in sysdeps/nptl/bits/thread-shared-types.h. */
381 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
382 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
383 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
384 }
df47504c
UD
385
386 if (robust)
8f9450a0
TR
387 {
388 /* Note: robust PI futexes are signaled by setting bit 0. */
389 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
390 (void *) (((uintptr_t) &mutex->__data.__list.__next)
391 | 1));
392 /* We need to set op_pending before starting the operation. Also
393 see comments at ENQUEUE_MUTEX. */
394 __asm ("" ::: "memory");
395 }
df47504c
UD
396
397 oldval = mutex->__data.__lock;
398
399 /* Check whether we already hold the mutex. */
a1ffb40e 400 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
401 {
402 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
403 {
8f9450a0
TR
404 /* We do not need to ensure ordering wrt another memory
405 access. */
df47504c
UD
406 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
407 return EDEADLK;
408 }
409
410 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
411 {
8f9450a0
TR
412 /* We do not need to ensure ordering wrt another memory
413 access. */
df47504c
UD
414 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
415
416 /* Just bump the counter. */
a1ffb40e 417 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
418 /* Overflow of the counter. */
419 return EAGAIN;
420
421 ++mutex->__data.__count;
422
423 return 0;
424 }
425 }
426
427 int newval = id;
184ee940 428# ifdef NO_INCR
df47504c 429 newval |= FUTEX_WAITERS;
184ee940 430# endif
052757bf 431 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
df47504c
UD
432 newval, 0);
433
434 if (oldval != 0)
435 {
436 /* The mutex is locked. The kernel will now take care of
437 everything. */
efac1fce
UD
438 int private = (robust
439 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
440 : PTHREAD_MUTEX_PSHARED (mutex));
8352b6df
AZ
441 int e = __futex_lock_pi64 (&mutex->__data.__lock, 0 /* ununsed */,
442 NULL, private);
6b1472eb 443 if (e == ESRCH || e == EDEADLK)
df47504c 444 {
6b1472eb 445 assert (e != EDEADLK
df47504c
UD
446 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
447 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
448 /* ESRCH can happen only for non-robust PI mutexes where
449 the owner of the lock died. */
6b1472eb 450 assert (e != ESRCH || !robust);
df47504c
UD
451
452 /* Delay the thread indefinitely. */
453 while (1)
5289cec4
AZ
454 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
455 0 /* ignored */, NULL, private);
df47504c
UD
456 }
457
458 oldval = mutex->__data.__lock;
459
460 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
461 }
462
a1ffb40e 463 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c 464 {
8114b95c 465 atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
df47504c
UD
466
467 /* We got the mutex. */
468 mutex->__data.__count = 1;
469 /* But it is inconsistent unless marked otherwise. */
470 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
471
8f9450a0
TR
472 /* We must not enqueue the mutex before we have acquired it.
473 Also see comments at ENQUEUE_MUTEX. */
474 __asm ("" ::: "memory");
df47504c 475 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
476 /* We need to clear op_pending after we enqueue the mutex. */
477 __asm ("" ::: "memory");
df47504c
UD
478 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
479
480 /* Note that we deliberately exit here. If we fall
481 through to the end of the function __nusers would be
482 incremented which is not correct because the old owner
483 has to be discounted. If we are not supposed to
484 increment __nusers we actually have to decrement it here. */
184ee940 485# ifdef NO_INCR
df47504c 486 --mutex->__data.__nusers;
184ee940 487# endif
df47504c
UD
488
489 return EOWNERDEAD;
490 }
491
492 if (robust
493 && __builtin_expect (mutex->__data.__owner
494 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
495 {
496 /* This mutex is now not recoverable. */
497 mutex->__data.__count = 0;
498
6b1472eb
AZ
499 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
500 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
df47504c 501
8f9450a0
TR
502 /* To the kernel, this will be visible after the kernel has
503 acquired the mutex in the syscall. */
df47504c
UD
504 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
505 return ENOTRECOVERABLE;
506 }
507
508 mutex->__data.__count = 1;
509 if (robust)
510 {
8f9450a0
TR
511 /* We must not enqueue the mutex before we have acquired it.
512 Also see comments at ENQUEUE_MUTEX. */
513 __asm ("" ::: "memory");
df47504c 514 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
515 /* We need to clear op_pending after we enqueue the mutex. */
516 __asm ("" ::: "memory");
df47504c
UD
517 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
518 }
519 }
520 break;
184ee940 521#endif /* __NR_futex. */
df47504c 522
f17efcb4
UD
523 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
524 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
525 case PTHREAD_MUTEX_PP_NORMAL_NP:
526 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
527 {
403b4feb
SL
528 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
529 in sysdeps/nptl/bits/thread-shared-types.h. */
530 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
531 & PTHREAD_MUTEX_KIND_MASK_NP;
f17efcb4
UD
532
533 oldval = mutex->__data.__lock;
534
535 /* Check whether we already hold the mutex. */
536 if (mutex->__data.__owner == id)
537 {
538 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
539 return EDEADLK;
540
541 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
542 {
543 /* Just bump the counter. */
a1ffb40e 544 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
545 /* Overflow of the counter. */
546 return EAGAIN;
547
548 ++mutex->__data.__count;
549
550 return 0;
551 }
552 }
553
554 int oldprio = -1, ceilval;
555 do
556 {
557 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
558 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
559
560 if (__pthread_current_priority () > ceiling)
561 {
562 if (oldprio != -1)
563 __pthread_tpp_change_priority (oldprio, -1);
564 return EINVAL;
565 }
566
6de79a49 567 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
f17efcb4
UD
568 if (retval)
569 return retval;
570
571 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
572 oldprio = ceiling;
573
574 oldval
052757bf 575 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
576#ifdef NO_INCR
577 ceilval | 2,
578#else
579 ceilval | 1,
580#endif
581 ceilval);
582
583 if (oldval == ceilval)
584 break;
585
586 do
587 {
588 oldval
052757bf 589 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
590 ceilval | 2,
591 ceilval | 1);
592
593 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
594 break;
595
596 if (oldval != ceilval)
878fe624
AZ
597 futex_wait ((unsigned int * ) &mutex->__data.__lock,
598 ceilval | 2,
599 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4 600 }
052757bf 601 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
602 ceilval | 2, ceilval)
603 != ceilval);
604 }
605 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
606
607 assert (mutex->__data.__owner == 0);
608 mutex->__data.__count = 1;
609 }
610 break;
611
dcc73a8d
UD
612 default:
613 /* Correct code cannot set any other type. */
614 return EINVAL;
76a50749
UD
615 }
616
3892d906 617 /* Record the ownership. */
3892d906
UD
618 mutex->__data.__owner = id;
619#ifndef NO_INCR
620 ++mutex->__data.__nusers;
621#endif
622
5acf7263
RM
623 LIBC_PROBE (mutex_acquired, 1, mutex);
624
6de79a49 625 return 0;
76a50749 626}
27a44822
FW
627
628#if PTHREAD_MUTEX_VERSIONS
27a44822 629libc_hidden_ver (___pthread_mutex_lock, __pthread_mutex_lock)
8ec022a0
FW
630# ifndef SHARED
631strong_alias (___pthread_mutex_lock, __pthread_mutex_lock)
632# endif
27a44822
FW
633versioned_symbol (libpthread, ___pthread_mutex_lock, pthread_mutex_lock,
634 GLIBC_2_0);
635
636# if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
637compat_symbol (libpthread, ___pthread_mutex_lock, __pthread_mutex_lock,
638 GLIBC_2_0);
639# endif
640#endif /* PTHREAD_MUTEX_VERSIONS */
b0948ffd
UD
641
642
643#ifdef NO_INCR
644void
9dd346ff 645__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex)
b0948ffd 646{
403b4feb
SL
647 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
648 in sysdeps/nptl/bits/thread-shared-types.h. */
649 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
650 assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
651 assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
652 assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
b0948ffd
UD
653
654 /* Record the ownership. */
655 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
656 mutex->__data.__owner = id;
657
403b4feb 658 if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
b0948ffd
UD
659 ++mutex->__data.__count;
660}
661#endif