]>
Commit | Line | Data |
---|---|---|
581c785b | 1 | /* Copyright (C) 2002-2022 Free Software Foundation, Inc. |
76a50749 | 2 | This file is part of the GNU C Library. |
76a50749 UD |
3 | |
4 | The GNU C Library is free software; you can redistribute it and/or | |
5 | modify it under the terms of the GNU Lesser General Public | |
6 | License as published by the Free Software Foundation; either | |
7 | version 2.1 of the License, or (at your option) any later version. | |
8 | ||
9 | The GNU C Library is distributed in the hope that it will be useful, | |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | Lesser General Public License for more details. | |
13 | ||
14 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 | 15 | License along with the GNU C Library; if not, see |
5a82c748 | 16 | <https://www.gnu.org/licenses/>. */ |
76a50749 | 17 | |
3892d906 | 18 | #include <assert.h> |
76a50749 | 19 | #include <errno.h> |
1bcfb5a5 | 20 | #include <stdlib.h> |
df47504c | 21 | #include <unistd.h> |
ca06321d | 22 | #include <sys/param.h> |
b894c2ea | 23 | #include <not-cancel.h> |
76a50749 | 24 | #include "pthreadP.h" |
4eb984d3 | 25 | #include <atomic.h> |
6b1472eb | 26 | #include <futex-internal.h> |
5acf7263 | 27 | #include <stap-probe.h> |
27a44822 | 28 | #include <shlib-compat.h> |
76a50749 | 29 | |
65810f0e TR |
30 | /* Some of the following definitions differ when pthread_mutex_cond_lock.c |
31 | includes this file. */ | |
69431c9a | 32 | #ifndef LLL_MUTEX_LOCK |
99f841c4 FW |
33 | /* lll_lock with single-thread optimization. */ |
34 | static inline void | |
35 | lll_mutex_lock_optimized (pthread_mutex_t *mutex) | |
36 | { | |
37 | /* The single-threaded optimization is only valid for private | |
38 | mutexes. For process-shared mutexes, the mutex could be in a | |
39 | shared mapping, so synchronization with another process is needed | |
40 | even without any threads. If the lock is already marked as | |
41 | acquired, POSIX requires that pthread_mutex_lock deadlocks for | |
42 | normal mutexes, so skip the optimization in that case as | |
43 | well. */ | |
44 | int private = PTHREAD_MUTEX_PSHARED (mutex); | |
45 | if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0) | |
46 | mutex->__data.__lock = 1; | |
47 | else | |
48 | lll_lock (mutex->__data.__lock, private); | |
49 | } | |
50 | ||
51 | # define LLL_MUTEX_LOCK(mutex) \ | |
5bd8a249 | 52 | lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) |
99f841c4 | 53 | # define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex) |
5bd8a249 UD |
54 | # define LLL_MUTEX_TRYLOCK(mutex) \ |
55 | lll_trylock ((mutex)->__data.__lock) | |
65810f0e | 56 | # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0 |
e8c659d7 AK |
57 | # define LLL_MUTEX_LOCK_ELISION(mutex) \ |
58 | lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \ | |
59 | PTHREAD_MUTEX_PSHARED (mutex)) | |
60 | # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \ | |
61 | lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \ | |
62 | PTHREAD_MUTEX_PSHARED (mutex)) | |
27a44822 FW |
63 | # define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock |
64 | # define PTHREAD_MUTEX_VERSIONS 1 | |
69431c9a UD |
65 | #endif |
66 | ||
d672a98a L |
67 | #ifndef LLL_MUTEX_READ_LOCK |
68 | # define LLL_MUTEX_READ_LOCK(mutex) \ | |
69 | atomic_load_relaxed (&(mutex)->__data.__lock) | |
70 | #endif | |
71 | ||
6de79a49 UD |
72 | static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) |
73 | __attribute_noinline__; | |
74 | ||
76a50749 | 75 | int |
27a44822 | 76 | PTHREAD_MUTEX_LOCK (pthread_mutex_t *mutex) |
76a50749 | 77 | { |
403b4feb SL |
78 | /* See concurrency notes regarding mutex type which is loaded from __kind |
79 | in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ | |
e8c659d7 | 80 | unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); |
5acf7263 RM |
81 | |
82 | LIBC_PROBE (mutex_entry, 1, mutex); | |
83 | ||
e8c659d7 AK |
84 | if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP |
85 | | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) | |
6de79a49 UD |
86 | return __pthread_mutex_lock_full (mutex); |
87 | ||
a1ffb40e | 88 | if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP)) |
6de79a49 | 89 | { |
e8c659d7 | 90 | FORCE_ELISION (mutex, goto elision); |
6de79a49 UD |
91 | simple: |
92 | /* Normal mutex. */ | |
99f841c4 | 93 | LLL_MUTEX_LOCK_OPTIMIZED (mutex); |
6de79a49 UD |
94 | assert (mutex->__data.__owner == 0); |
95 | } | |
5a664d7a | 96 | #if ENABLE_ELISION_SUPPORT |
a1ffb40e | 97 | else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) |
e8c659d7 AK |
98 | { |
99 | elision: __attribute__((unused)) | |
100 | /* This case can never happen on a system without elision, | |
101 | as the mutex type initialization functions will not | |
102 | allow to set the elision flags. */ | |
075b9322 | 103 | /* Don't record owner or users for elision case. This is a |
e8c659d7 AK |
104 | tail call. */ |
105 | return LLL_MUTEX_LOCK_ELISION (mutex); | |
106 | } | |
107 | #endif | |
108 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) | |
109 | == PTHREAD_MUTEX_RECURSIVE_NP, 1)) | |
76a50749 UD |
110 | { |
111 | /* Recursive mutex. */ | |
e8c659d7 | 112 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
6de79a49 | 113 | |
76a50749 | 114 | /* Check whether we already hold the mutex. */ |
9a7178d6 | 115 | if (mutex->__data.__owner == id) |
76a50749 UD |
116 | { |
117 | /* Just bump the counter. */ | |
a1ffb40e | 118 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
76a50749 UD |
119 | /* Overflow of the counter. */ |
120 | return EAGAIN; | |
121 | ||
122 | ++mutex->__data.__count; | |
76a50749 | 123 | |
3892d906 | 124 | return 0; |
76a50749 | 125 | } |
3892d906 UD |
126 | |
127 | /* We have to get the mutex. */ | |
99f841c4 | 128 | LLL_MUTEX_LOCK_OPTIMIZED (mutex); |
3892d906 | 129 | |
1bcfb5a5 | 130 | assert (mutex->__data.__owner == 0); |
3892d906 | 131 | mutex->__data.__count = 1; |
6de79a49 | 132 | } |
e8c659d7 AK |
133 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) |
134 | == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) | |
6de79a49 | 135 | { |
5bd8a249 | 136 | if (LLL_MUTEX_TRYLOCK (mutex) != 0) |
2c0b891a UD |
137 | { |
138 | int cnt = 0; | |
6310e6be | 139 | int max_cnt = MIN (max_adaptive_count (), |
2c0b891a UD |
140 | mutex->__data.__spins * 2 + 10); |
141 | do | |
142 | { | |
143 | if (cnt++ >= max_cnt) | |
144 | { | |
5bd8a249 | 145 | LLL_MUTEX_LOCK (mutex); |
2c0b891a UD |
146 | break; |
147 | } | |
4eb984d3 | 148 | atomic_spin_nop (); |
2c0b891a | 149 | } |
6b8dbbd0 JK |
150 | while (LLL_MUTEX_READ_LOCK (mutex) != 0 |
151 | || LLL_MUTEX_TRYLOCK (mutex) != 0); | |
2c0b891a UD |
152 | |
153 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; | |
154 | } | |
1bcfb5a5 | 155 | assert (mutex->__data.__owner == 0); |
6de79a49 UD |
156 | } |
157 | else | |
158 | { | |
e8c659d7 AK |
159 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
160 | assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); | |
6de79a49 | 161 | /* Check whether we already hold the mutex. */ |
a1ffb40e | 162 | if (__glibc_unlikely (mutex->__data.__owner == id)) |
6de79a49 UD |
163 | return EDEADLK; |
164 | goto simple; | |
165 | } | |
1bcfb5a5 | 166 | |
e8c659d7 AK |
167 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
168 | ||
6de79a49 UD |
169 | /* Record the ownership. */ |
170 | mutex->__data.__owner = id; | |
171 | #ifndef NO_INCR | |
172 | ++mutex->__data.__nusers; | |
173 | #endif | |
174 | ||
5acf7263 RM |
175 | LIBC_PROBE (mutex_acquired, 1, mutex); |
176 | ||
6de79a49 UD |
177 | return 0; |
178 | } | |
179 | ||
180 | static int | |
181 | __pthread_mutex_lock_full (pthread_mutex_t *mutex) | |
182 | { | |
183 | int oldval; | |
184 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); | |
185 | ||
186 | switch (PTHREAD_MUTEX_TYPE (mutex)) | |
187 | { | |
0f6699ea UD |
188 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
189 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: | |
190 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: | |
191 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: | |
192 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, | |
193 | &mutex->__data.__list.__next); | |
8f9450a0 TR |
194 | /* We need to set op_pending before starting the operation. Also |
195 | see comments at ENQUEUE_MUTEX. */ | |
196 | __asm ("" ::: "memory"); | |
0f6699ea | 197 | |
683040c3 | 198 | oldval = mutex->__data.__lock; |
353683a2 TR |
199 | /* This is set to FUTEX_WAITERS iff we might have shared the |
200 | FUTEX_WAITERS flag with other threads, and therefore need to keep it | |
201 | set to avoid lost wake-ups. We have the same requirement in the | |
65810f0e TR |
202 | simple mutex algorithm. |
203 | We start with value zero for a normal mutex, and FUTEX_WAITERS if we | |
204 | are building the special case mutexes for use from within condition | |
205 | variables. */ | |
206 | unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER; | |
207 | while (1) | |
1bcfb5a5 | 208 | { |
65810f0e TR |
209 | /* Try to acquire the lock through a CAS from 0 (not acquired) to |
210 | our TID | assume_other_futex_waiters. */ | |
5920a4a6 CD |
211 | if (__glibc_likely (oldval == 0)) |
212 | { | |
213 | oldval | |
214 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, | |
215 | id | assume_other_futex_waiters, 0); | |
216 | if (__glibc_likely (oldval == 0)) | |
217 | break; | |
218 | } | |
65810f0e | 219 | |
683040c3 UD |
220 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
221 | { | |
222 | /* The previous owner died. Try locking the mutex. */ | |
0f6699ea UD |
223 | int newval = id; |
224 | #ifdef NO_INCR | |
353683a2 TR |
225 | /* We are not taking assume_other_futex_waiters into accoount |
226 | here simply because we'll set FUTEX_WAITERS anyway. */ | |
0f6699ea | 227 | newval |= FUTEX_WAITERS; |
113ad5fc | 228 | #else |
353683a2 | 229 | newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters; |
0f6699ea UD |
230 | #endif |
231 | ||
232 | newval | |
052757bf | 233 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
0f6699ea UD |
234 | newval, oldval); |
235 | ||
236 | if (newval != oldval) | |
683040c3 | 237 | { |
683040c3 | 238 | oldval = newval; |
65810f0e | 239 | continue; |
683040c3 | 240 | } |
1bcfb5a5 | 241 | |
683040c3 UD |
242 | /* We got the mutex. */ |
243 | mutex->__data.__count = 1; | |
244 | /* But it is inconsistent unless marked otherwise. */ | |
245 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; | |
246 | ||
8f9450a0 TR |
247 | /* We must not enqueue the mutex before we have acquired it. |
248 | Also see comments at ENQUEUE_MUTEX. */ | |
249 | __asm ("" ::: "memory"); | |
683040c3 | 250 | ENQUEUE_MUTEX (mutex); |
8f9450a0 TR |
251 | /* We need to clear op_pending after we enqueue the mutex. */ |
252 | __asm ("" ::: "memory"); | |
0f6699ea | 253 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
683040c3 UD |
254 | |
255 | /* Note that we deliberately exit here. If we fall | |
256 | through to the end of the function __nusers would be | |
257 | incremented which is not correct because the old | |
258 | owner has to be discounted. If we are not supposed | |
259 | to increment __nusers we actually have to decrement | |
260 | it here. */ | |
261 | #ifdef NO_INCR | |
262 | --mutex->__data.__nusers; | |
263 | #endif | |
1bcfb5a5 | 264 | |
683040c3 UD |
265 | return EOWNERDEAD; |
266 | } | |
1bcfb5a5 | 267 | |
683040c3 | 268 | /* Check whether we already hold the mutex. */ |
a1ffb40e | 269 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
683040c3 | 270 | { |
5bd8a249 UD |
271 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
272 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) | |
0f6699ea | 273 | { |
8f9450a0 TR |
274 | /* We do not need to ensure ordering wrt another memory |
275 | access. Also see comments at ENQUEUE_MUTEX. */ | |
0f6699ea UD |
276 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
277 | NULL); | |
278 | return EDEADLK; | |
279 | } | |
1bcfb5a5 | 280 | |
5bd8a249 | 281 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
683040c3 | 282 | { |
8f9450a0 TR |
283 | /* We do not need to ensure ordering wrt another memory |
284 | access. */ | |
0f6699ea UD |
285 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
286 | NULL); | |
287 | ||
683040c3 | 288 | /* Just bump the counter. */ |
a1ffb40e | 289 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
683040c3 UD |
290 | /* Overflow of the counter. */ |
291 | return EAGAIN; | |
1bcfb5a5 | 292 | |
683040c3 | 293 | ++mutex->__data.__count; |
1bcfb5a5 | 294 | |
683040c3 UD |
295 | return 0; |
296 | } | |
297 | } | |
1bcfb5a5 | 298 | |
65810f0e TR |
299 | /* We cannot acquire the mutex nor has its owner died. Thus, try |
300 | to block using futexes. Set FUTEX_WAITERS if necessary so that | |
301 | other threads are aware that there are potentially threads | |
302 | blocked on the futex. Restart if oldval changed in the | |
303 | meantime. */ | |
304 | if ((oldval & FUTEX_WAITERS) == 0) | |
683040c3 | 305 | { |
120ac6d2 L |
306 | int val = atomic_compare_and_exchange_val_acq |
307 | (&mutex->__data.__lock, oldval | FUTEX_WAITERS, oldval); | |
308 | if (val != oldval) | |
65810f0e | 309 | { |
0b82747d | 310 | oldval = val; |
65810f0e TR |
311 | continue; |
312 | } | |
313 | oldval |= FUTEX_WAITERS; | |
683040c3 | 314 | } |
65810f0e TR |
315 | |
316 | /* It is now possible that we share the FUTEX_WAITERS flag with | |
317 | another thread; therefore, update assume_other_futex_waiters so | |
318 | that we do not forget about this when handling other cases | |
319 | above and thus do not cause lost wake-ups. */ | |
320 | assume_other_futex_waiters |= FUTEX_WAITERS; | |
321 | ||
322 | /* Block using the futex and reload current lock value. */ | |
878fe624 AZ |
323 | futex_wait ((unsigned int *) &mutex->__data.__lock, oldval, |
324 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); | |
65810f0e TR |
325 | oldval = mutex->__data.__lock; |
326 | } | |
327 | ||
328 | /* We have acquired the mutex; check if it is still consistent. */ | |
329 | if (__builtin_expect (mutex->__data.__owner | |
330 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) | |
331 | { | |
332 | /* This mutex is now not recoverable. */ | |
333 | mutex->__data.__count = 0; | |
334 | int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); | |
335 | lll_unlock (mutex->__data.__lock, private); | |
8f9450a0 TR |
336 | /* FIXME This violates the mutex destruction requirements. See |
337 | __pthread_mutex_unlock_full. */ | |
65810f0e TR |
338 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
339 | return ENOTRECOVERABLE; | |
1bcfb5a5 UD |
340 | } |
341 | ||
683040c3 | 342 | mutex->__data.__count = 1; |
8f9450a0 TR |
343 | /* We must not enqueue the mutex before we have acquired it. |
344 | Also see comments at ENQUEUE_MUTEX. */ | |
345 | __asm ("" ::: "memory"); | |
1bcfb5a5 | 346 | ENQUEUE_MUTEX (mutex); |
8f9450a0 TR |
347 | /* We need to clear op_pending after we enqueue the mutex. */ |
348 | __asm ("" ::: "memory"); | |
0f6699ea | 349 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
2c0b891a | 350 | break; |
dcc73a8d | 351 | |
184ee940 RM |
352 | /* The PI support requires the Linux futex system call. If that's not |
353 | available, pthread_mutex_init should never have allowed the type to | |
354 | be set. So it will get the default case for an invalid type. */ | |
355 | #ifdef __NR_futex | |
df47504c UD |
356 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
357 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: | |
358 | case PTHREAD_MUTEX_PI_NORMAL_NP: | |
359 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: | |
360 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: | |
361 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: | |
362 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: | |
363 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: | |
364 | { | |
403b4feb SL |
365 | int kind, robust; |
366 | { | |
367 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s | |
368 | in sysdeps/nptl/bits/thread-shared-types.h. */ | |
369 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); | |
370 | kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; | |
371 | robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; | |
372 | } | |
df47504c UD |
373 | |
374 | if (robust) | |
8f9450a0 TR |
375 | { |
376 | /* Note: robust PI futexes are signaled by setting bit 0. */ | |
377 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, | |
378 | (void *) (((uintptr_t) &mutex->__data.__list.__next) | |
379 | | 1)); | |
380 | /* We need to set op_pending before starting the operation. Also | |
381 | see comments at ENQUEUE_MUTEX. */ | |
382 | __asm ("" ::: "memory"); | |
383 | } | |
df47504c UD |
384 | |
385 | oldval = mutex->__data.__lock; | |
386 | ||
387 | /* Check whether we already hold the mutex. */ | |
a1ffb40e | 388 | if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) |
df47504c UD |
389 | { |
390 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) | |
391 | { | |
8f9450a0 TR |
392 | /* We do not need to ensure ordering wrt another memory |
393 | access. */ | |
df47504c UD |
394 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
395 | return EDEADLK; | |
396 | } | |
397 | ||
398 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) | |
399 | { | |
8f9450a0 TR |
400 | /* We do not need to ensure ordering wrt another memory |
401 | access. */ | |
df47504c UD |
402 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
403 | ||
404 | /* Just bump the counter. */ | |
a1ffb40e | 405 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
df47504c UD |
406 | /* Overflow of the counter. */ |
407 | return EAGAIN; | |
408 | ||
409 | ++mutex->__data.__count; | |
410 | ||
411 | return 0; | |
412 | } | |
413 | } | |
414 | ||
415 | int newval = id; | |
184ee940 | 416 | # ifdef NO_INCR |
df47504c | 417 | newval |= FUTEX_WAITERS; |
184ee940 | 418 | # endif |
052757bf | 419 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
df47504c UD |
420 | newval, 0); |
421 | ||
422 | if (oldval != 0) | |
423 | { | |
424 | /* The mutex is locked. The kernel will now take care of | |
425 | everything. */ | |
efac1fce UD |
426 | int private = (robust |
427 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) | |
428 | : PTHREAD_MUTEX_PSHARED (mutex)); | |
8352b6df AZ |
429 | int e = __futex_lock_pi64 (&mutex->__data.__lock, 0 /* ununsed */, |
430 | NULL, private); | |
6b1472eb | 431 | if (e == ESRCH || e == EDEADLK) |
df47504c | 432 | { |
6b1472eb | 433 | assert (e != EDEADLK |
df47504c UD |
434 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP |
435 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); | |
436 | /* ESRCH can happen only for non-robust PI mutexes where | |
437 | the owner of the lock died. */ | |
6b1472eb | 438 | assert (e != ESRCH || !robust); |
df47504c UD |
439 | |
440 | /* Delay the thread indefinitely. */ | |
441 | while (1) | |
5289cec4 AZ |
442 | __futex_abstimed_wait64 (&(unsigned int){0}, 0, |
443 | 0 /* ignored */, NULL, private); | |
df47504c UD |
444 | } |
445 | ||
446 | oldval = mutex->__data.__lock; | |
447 | ||
448 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); | |
449 | } | |
450 | ||
a1ffb40e | 451 | if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) |
df47504c UD |
452 | { |
453 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); | |
454 | ||
455 | /* We got the mutex. */ | |
456 | mutex->__data.__count = 1; | |
457 | /* But it is inconsistent unless marked otherwise. */ | |
458 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; | |
459 | ||
8f9450a0 TR |
460 | /* We must not enqueue the mutex before we have acquired it. |
461 | Also see comments at ENQUEUE_MUTEX. */ | |
462 | __asm ("" ::: "memory"); | |
df47504c | 463 | ENQUEUE_MUTEX_PI (mutex); |
8f9450a0 TR |
464 | /* We need to clear op_pending after we enqueue the mutex. */ |
465 | __asm ("" ::: "memory"); | |
df47504c UD |
466 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
467 | ||
468 | /* Note that we deliberately exit here. If we fall | |
469 | through to the end of the function __nusers would be | |
470 | incremented which is not correct because the old owner | |
471 | has to be discounted. If we are not supposed to | |
472 | increment __nusers we actually have to decrement it here. */ | |
184ee940 | 473 | # ifdef NO_INCR |
df47504c | 474 | --mutex->__data.__nusers; |
184ee940 | 475 | # endif |
df47504c UD |
476 | |
477 | return EOWNERDEAD; | |
478 | } | |
479 | ||
480 | if (robust | |
481 | && __builtin_expect (mutex->__data.__owner | |
482 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) | |
483 | { | |
484 | /* This mutex is now not recoverable. */ | |
485 | mutex->__data.__count = 0; | |
486 | ||
6b1472eb AZ |
487 | futex_unlock_pi ((unsigned int *) &mutex->__data.__lock, |
488 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); | |
df47504c | 489 | |
8f9450a0 TR |
490 | /* To the kernel, this will be visible after the kernel has |
491 | acquired the mutex in the syscall. */ | |
df47504c UD |
492 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
493 | return ENOTRECOVERABLE; | |
494 | } | |
495 | ||
496 | mutex->__data.__count = 1; | |
497 | if (robust) | |
498 | { | |
8f9450a0 TR |
499 | /* We must not enqueue the mutex before we have acquired it. |
500 | Also see comments at ENQUEUE_MUTEX. */ | |
501 | __asm ("" ::: "memory"); | |
df47504c | 502 | ENQUEUE_MUTEX_PI (mutex); |
8f9450a0 TR |
503 | /* We need to clear op_pending after we enqueue the mutex. */ |
504 | __asm ("" ::: "memory"); | |
df47504c UD |
505 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
506 | } | |
507 | } | |
508 | break; | |
184ee940 | 509 | #endif /* __NR_futex. */ |
df47504c | 510 | |
f17efcb4 UD |
511 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
512 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: | |
513 | case PTHREAD_MUTEX_PP_NORMAL_NP: | |
514 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: | |
515 | { | |
403b4feb SL |
516 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
517 | in sysdeps/nptl/bits/thread-shared-types.h. */ | |
518 | int kind = atomic_load_relaxed (&(mutex->__data.__kind)) | |
519 | & PTHREAD_MUTEX_KIND_MASK_NP; | |
f17efcb4 UD |
520 | |
521 | oldval = mutex->__data.__lock; | |
522 | ||
523 | /* Check whether we already hold the mutex. */ | |
524 | if (mutex->__data.__owner == id) | |
525 | { | |
526 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) | |
527 | return EDEADLK; | |
528 | ||
529 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) | |
530 | { | |
531 | /* Just bump the counter. */ | |
a1ffb40e | 532 | if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) |
f17efcb4 UD |
533 | /* Overflow of the counter. */ |
534 | return EAGAIN; | |
535 | ||
536 | ++mutex->__data.__count; | |
537 | ||
538 | return 0; | |
539 | } | |
540 | } | |
541 | ||
542 | int oldprio = -1, ceilval; | |
543 | do | |
544 | { | |
545 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) | |
546 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; | |
547 | ||
548 | if (__pthread_current_priority () > ceiling) | |
549 | { | |
550 | if (oldprio != -1) | |
551 | __pthread_tpp_change_priority (oldprio, -1); | |
552 | return EINVAL; | |
553 | } | |
554 | ||
6de79a49 | 555 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
f17efcb4 UD |
556 | if (retval) |
557 | return retval; | |
558 | ||
559 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; | |
560 | oldprio = ceiling; | |
561 | ||
562 | oldval | |
052757bf | 563 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
f17efcb4 UD |
564 | #ifdef NO_INCR |
565 | ceilval | 2, | |
566 | #else | |
567 | ceilval | 1, | |
568 | #endif | |
569 | ceilval); | |
570 | ||
571 | if (oldval == ceilval) | |
572 | break; | |
573 | ||
574 | do | |
575 | { | |
576 | oldval | |
052757bf | 577 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
f17efcb4 UD |
578 | ceilval | 2, |
579 | ceilval | 1); | |
580 | ||
581 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) | |
582 | break; | |
583 | ||
584 | if (oldval != ceilval) | |
878fe624 AZ |
585 | futex_wait ((unsigned int * ) &mutex->__data.__lock, |
586 | ceilval | 2, | |
587 | PTHREAD_MUTEX_PSHARED (mutex)); | |
f17efcb4 | 588 | } |
052757bf | 589 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
f17efcb4 UD |
590 | ceilval | 2, ceilval) |
591 | != ceilval); | |
592 | } | |
593 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); | |
594 | ||
595 | assert (mutex->__data.__owner == 0); | |
596 | mutex->__data.__count = 1; | |
597 | } | |
598 | break; | |
599 | ||
dcc73a8d UD |
600 | default: |
601 | /* Correct code cannot set any other type. */ | |
602 | return EINVAL; | |
76a50749 UD |
603 | } |
604 | ||
3892d906 | 605 | /* Record the ownership. */ |
3892d906 UD |
606 | mutex->__data.__owner = id; |
607 | #ifndef NO_INCR | |
608 | ++mutex->__data.__nusers; | |
609 | #endif | |
610 | ||
5acf7263 RM |
611 | LIBC_PROBE (mutex_acquired, 1, mutex); |
612 | ||
6de79a49 | 613 | return 0; |
76a50749 | 614 | } |
27a44822 FW |
615 | |
616 | #if PTHREAD_MUTEX_VERSIONS | |
27a44822 | 617 | libc_hidden_ver (___pthread_mutex_lock, __pthread_mutex_lock) |
8ec022a0 FW |
618 | # ifndef SHARED |
619 | strong_alias (___pthread_mutex_lock, __pthread_mutex_lock) | |
620 | # endif | |
27a44822 FW |
621 | versioned_symbol (libpthread, ___pthread_mutex_lock, pthread_mutex_lock, |
622 | GLIBC_2_0); | |
623 | ||
624 | # if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34) | |
625 | compat_symbol (libpthread, ___pthread_mutex_lock, __pthread_mutex_lock, | |
626 | GLIBC_2_0); | |
627 | # endif | |
628 | #endif /* PTHREAD_MUTEX_VERSIONS */ | |
b0948ffd UD |
629 | |
630 | ||
631 | #ifdef NO_INCR | |
632 | void | |
9dd346ff | 633 | __pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) |
b0948ffd | 634 | { |
403b4feb SL |
635 | /* See concurrency notes regarding __kind in struct __pthread_mutex_s |
636 | in sysdeps/nptl/bits/thread-shared-types.h. */ | |
637 | int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); | |
638 | assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); | |
639 | assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); | |
640 | assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); | |
b0948ffd UD |
641 | |
642 | /* Record the ownership. */ | |
643 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); | |
644 | mutex->__data.__owner = id; | |
645 | ||
403b4feb | 646 | if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) |
b0948ffd UD |
647 | ++mutex->__data.__count; |
648 | } | |
649 | #endif |