]>
Commit | Line | Data |
---|---|---|
568035b7 | 1 | /* Copyright (C) 2002-2013 Free Software Foundation, Inc. |
76a50749 UD |
2 | This file is part of the GNU C Library. |
3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. | |
4 | ||
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | Lesser General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ | |
76a50749 | 18 | |
3892d906 | 19 | #include <assert.h> |
76a50749 | 20 | #include <errno.h> |
1bcfb5a5 | 21 | #include <stdlib.h> |
df47504c | 22 | #include <unistd.h> |
b894c2ea | 23 | #include <not-cancel.h> |
76a50749 UD |
24 | #include "pthreadP.h" |
25 | #include <lowlevellock.h> | |
5acf7263 | 26 | #include <stap-probe.h> |
76a50749 | 27 | |
e8c659d7 AK |
28 | #ifndef lll_lock_elision |
29 | #define lll_lock_elision(lock, try_lock, private) ({ \ | |
30 | lll_lock (lock, private); 0; }) | |
31 | #endif | |
32 | ||
33 | #ifndef lll_trylock_elision | |
34 | #define lll_trylock_elision(a,t) lll_trylock(a) | |
35 | #endif | |
76a50749 | 36 | |
69431c9a | 37 | #ifndef LLL_MUTEX_LOCK |
5bd8a249 UD |
38 | # define LLL_MUTEX_LOCK(mutex) \ |
39 | lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) | |
40 | # define LLL_MUTEX_TRYLOCK(mutex) \ | |
41 | lll_trylock ((mutex)->__data.__lock) | |
42 | # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \ | |
43 | lll_robust_lock ((mutex)->__data.__lock, id, \ | |
44 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)) | |
e8c659d7 AK |
45 | # define LLL_MUTEX_LOCK_ELISION(mutex) \ |
46 | lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \ | |
47 | PTHREAD_MUTEX_PSHARED (mutex)) | |
48 | # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \ | |
49 | lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \ | |
50 | PTHREAD_MUTEX_PSHARED (mutex)) | |
69431c9a UD |
51 | #endif |
52 | ||
e8c659d7 AK |
53 | #ifndef FORCE_ELISION |
54 | #define FORCE_ELISION(m, s) | |
55 | #endif | |
69431c9a | 56 | |
6de79a49 UD |
57 | static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) |
58 | __attribute_noinline__; | |
59 | ||
76a50749 UD |
60 | int |
61 | __pthread_mutex_lock (mutex) | |
62 | pthread_mutex_t *mutex; | |
63 | { | |
2c0b891a UD |
64 | assert (sizeof (mutex->__size) >= sizeof (mutex->__data)); |
65 | ||
e8c659d7 | 66 | unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); |
5acf7263 RM |
67 | |
68 | LIBC_PROBE (mutex_entry, 1, mutex); | |
69 | ||
e8c659d7 AK |
70 | if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP |
71 | | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) | |
6de79a49 UD |
72 | return __pthread_mutex_lock_full (mutex); |
73 | ||
e8c659d7 | 74 | if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_NP, 1)) |
6de79a49 | 75 | { |
e8c659d7 | 76 | FORCE_ELISION (mutex, goto elision); |
6de79a49 UD |
77 | simple: |
78 | /* Normal mutex. */ | |
79 | LLL_MUTEX_LOCK (mutex); | |
80 | assert (mutex->__data.__owner == 0); | |
81 | } | |
e8c659d7 AK |
82 | #ifdef HAVE_ELISION |
83 | else if (__builtin_expect (type == PTHREAD_MUTEX_TIMED_ELISION_NP, 1)) | |
84 | { | |
85 | elision: __attribute__((unused)) | |
86 | /* This case can never happen on a system without elision, | |
87 | as the mutex type initialization functions will not | |
88 | allow to set the elision flags. */ | |
89 | /* Don't record owner or users for elision case. This is a | |
90 | tail call. */ | |
91 | return LLL_MUTEX_LOCK_ELISION (mutex); | |
92 | } | |
93 | #endif | |
94 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) | |
95 | == PTHREAD_MUTEX_RECURSIVE_NP, 1)) | |
76a50749 UD |
96 | { |
97 | /* Recursive mutex. */ | |
e8c659d7 | 98 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
6de79a49 | 99 | |
76a50749 | 100 | /* Check whether we already hold the mutex. */ |
9a7178d6 | 101 | if (mutex->__data.__owner == id) |
76a50749 UD |
102 | { |
103 | /* Just bump the counter. */ | |
104 | if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) | |
105 | /* Overflow of the counter. */ | |
106 | return EAGAIN; | |
107 | ||
108 | ++mutex->__data.__count; | |
76a50749 | 109 | |
3892d906 | 110 | return 0; |
76a50749 | 111 | } |
3892d906 UD |
112 | |
113 | /* We have to get the mutex. */ | |
5bd8a249 | 114 | LLL_MUTEX_LOCK (mutex); |
3892d906 | 115 | |
1bcfb5a5 | 116 | assert (mutex->__data.__owner == 0); |
3892d906 | 117 | mutex->__data.__count = 1; |
6de79a49 | 118 | } |
e8c659d7 AK |
119 | else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) |
120 | == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) | |
6de79a49 | 121 | { |
2c0b891a UD |
122 | if (! __is_smp) |
123 | goto simple; | |
124 | ||
5bd8a249 | 125 | if (LLL_MUTEX_TRYLOCK (mutex) != 0) |
2c0b891a UD |
126 | { |
127 | int cnt = 0; | |
128 | int max_cnt = MIN (MAX_ADAPTIVE_COUNT, | |
129 | mutex->__data.__spins * 2 + 10); | |
130 | do | |
131 | { | |
132 | if (cnt++ >= max_cnt) | |
133 | { | |
5bd8a249 | 134 | LLL_MUTEX_LOCK (mutex); |
2c0b891a UD |
135 | break; |
136 | } | |
137 | ||
138 | #ifdef BUSY_WAIT_NOP | |
139 | BUSY_WAIT_NOP; | |
140 | #endif | |
141 | } | |
5bd8a249 | 142 | while (LLL_MUTEX_TRYLOCK (mutex) != 0); |
2c0b891a UD |
143 | |
144 | mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; | |
145 | } | |
1bcfb5a5 | 146 | assert (mutex->__data.__owner == 0); |
6de79a49 UD |
147 | } |
148 | else | |
149 | { | |
e8c659d7 AK |
150 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
151 | assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); | |
6de79a49 UD |
152 | /* Check whether we already hold the mutex. */ |
153 | if (__builtin_expect (mutex->__data.__owner == id, 0)) | |
154 | return EDEADLK; | |
155 | goto simple; | |
156 | } | |
1bcfb5a5 | 157 | |
e8c659d7 AK |
158 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); |
159 | ||
6de79a49 UD |
160 | /* Record the ownership. */ |
161 | mutex->__data.__owner = id; | |
162 | #ifndef NO_INCR | |
163 | ++mutex->__data.__nusers; | |
164 | #endif | |
165 | ||
5acf7263 RM |
166 | LIBC_PROBE (mutex_acquired, 1, mutex); |
167 | ||
6de79a49 UD |
168 | return 0; |
169 | } | |
170 | ||
171 | static int | |
172 | __pthread_mutex_lock_full (pthread_mutex_t *mutex) | |
173 | { | |
174 | int oldval; | |
175 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); | |
176 | ||
177 | switch (PTHREAD_MUTEX_TYPE (mutex)) | |
178 | { | |
0f6699ea UD |
179 | case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: |
180 | case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: | |
181 | case PTHREAD_MUTEX_ROBUST_NORMAL_NP: | |
182 | case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: | |
183 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, | |
184 | &mutex->__data.__list.__next); | |
185 | ||
683040c3 UD |
186 | oldval = mutex->__data.__lock; |
187 | do | |
1bcfb5a5 | 188 | { |
0f6699ea | 189 | again: |
683040c3 UD |
190 | if ((oldval & FUTEX_OWNER_DIED) != 0) |
191 | { | |
192 | /* The previous owner died. Try locking the mutex. */ | |
0f6699ea UD |
193 | int newval = id; |
194 | #ifdef NO_INCR | |
195 | newval |= FUTEX_WAITERS; | |
113ad5fc UD |
196 | #else |
197 | newval |= (oldval & FUTEX_WAITERS); | |
0f6699ea UD |
198 | #endif |
199 | ||
200 | newval | |
052757bf | 201 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
0f6699ea UD |
202 | newval, oldval); |
203 | ||
204 | if (newval != oldval) | |
683040c3 | 205 | { |
683040c3 | 206 | oldval = newval; |
0f6699ea | 207 | goto again; |
683040c3 | 208 | } |
1bcfb5a5 | 209 | |
683040c3 UD |
210 | /* We got the mutex. */ |
211 | mutex->__data.__count = 1; | |
212 | /* But it is inconsistent unless marked otherwise. */ | |
213 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; | |
214 | ||
215 | ENQUEUE_MUTEX (mutex); | |
0f6699ea | 216 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
683040c3 UD |
217 | |
218 | /* Note that we deliberately exit here. If we fall | |
219 | through to the end of the function __nusers would be | |
220 | incremented which is not correct because the old | |
221 | owner has to be discounted. If we are not supposed | |
222 | to increment __nusers we actually have to decrement | |
223 | it here. */ | |
224 | #ifdef NO_INCR | |
225 | --mutex->__data.__nusers; | |
226 | #endif | |
1bcfb5a5 | 227 | |
683040c3 UD |
228 | return EOWNERDEAD; |
229 | } | |
1bcfb5a5 | 230 | |
683040c3 | 231 | /* Check whether we already hold the mutex. */ |
0f6699ea | 232 | if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) |
683040c3 | 233 | { |
5bd8a249 UD |
234 | int kind = PTHREAD_MUTEX_TYPE (mutex); |
235 | if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) | |
0f6699ea UD |
236 | { |
237 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, | |
238 | NULL); | |
239 | return EDEADLK; | |
240 | } | |
1bcfb5a5 | 241 | |
5bd8a249 | 242 | if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) |
683040c3 | 243 | { |
0f6699ea UD |
244 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, |
245 | NULL); | |
246 | ||
683040c3 UD |
247 | /* Just bump the counter. */ |
248 | if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) | |
249 | /* Overflow of the counter. */ | |
250 | return EAGAIN; | |
1bcfb5a5 | 251 | |
683040c3 | 252 | ++mutex->__data.__count; |
1bcfb5a5 | 253 | |
683040c3 UD |
254 | return 0; |
255 | } | |
256 | } | |
1bcfb5a5 | 257 | |
5bd8a249 | 258 | oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id); |
1bcfb5a5 | 259 | |
683040c3 UD |
260 | if (__builtin_expect (mutex->__data.__owner |
261 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) | |
262 | { | |
263 | /* This mutex is now not recoverable. */ | |
264 | mutex->__data.__count = 0; | |
5bd8a249 UD |
265 | lll_unlock (mutex->__data.__lock, |
266 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); | |
0f6699ea | 267 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
683040c3 UD |
268 | return ENOTRECOVERABLE; |
269 | } | |
1bcfb5a5 | 270 | } |
683040c3 | 271 | while ((oldval & FUTEX_OWNER_DIED) != 0); |
1bcfb5a5 | 272 | |
683040c3 | 273 | mutex->__data.__count = 1; |
1bcfb5a5 | 274 | ENQUEUE_MUTEX (mutex); |
0f6699ea | 275 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); |
2c0b891a | 276 | break; |
dcc73a8d | 277 | |
df47504c UD |
278 | case PTHREAD_MUTEX_PI_RECURSIVE_NP: |
279 | case PTHREAD_MUTEX_PI_ERRORCHECK_NP: | |
280 | case PTHREAD_MUTEX_PI_NORMAL_NP: | |
281 | case PTHREAD_MUTEX_PI_ADAPTIVE_NP: | |
282 | case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: | |
283 | case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: | |
284 | case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: | |
285 | case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: | |
286 | { | |
287 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; | |
288 | int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; | |
289 | ||
290 | if (robust) | |
291 | /* Note: robust PI futexes are signaled by setting bit 0. */ | |
292 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, | |
293 | (void *) (((uintptr_t) &mutex->__data.__list.__next) | |
294 | | 1)); | |
295 | ||
296 | oldval = mutex->__data.__lock; | |
297 | ||
298 | /* Check whether we already hold the mutex. */ | |
299 | if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0)) | |
300 | { | |
301 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) | |
302 | { | |
303 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); | |
304 | return EDEADLK; | |
305 | } | |
306 | ||
307 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) | |
308 | { | |
309 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); | |
310 | ||
311 | /* Just bump the counter. */ | |
312 | if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) | |
313 | /* Overflow of the counter. */ | |
314 | return EAGAIN; | |
315 | ||
316 | ++mutex->__data.__count; | |
317 | ||
318 | return 0; | |
319 | } | |
320 | } | |
321 | ||
322 | int newval = id; | |
323 | #ifdef NO_INCR | |
324 | newval |= FUTEX_WAITERS; | |
325 | #endif | |
052757bf | 326 | oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
df47504c UD |
327 | newval, 0); |
328 | ||
329 | if (oldval != 0) | |
330 | { | |
331 | /* The mutex is locked. The kernel will now take care of | |
332 | everything. */ | |
efac1fce UD |
333 | int private = (robust |
334 | ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) | |
335 | : PTHREAD_MUTEX_PSHARED (mutex)); | |
df47504c UD |
336 | INTERNAL_SYSCALL_DECL (__err); |
337 | int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, | |
efac1fce UD |
338 | __lll_private_flag (FUTEX_LOCK_PI, |
339 | private), 1, 0); | |
df47504c UD |
340 | |
341 | if (INTERNAL_SYSCALL_ERROR_P (e, __err) | |
342 | && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH | |
343 | || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)) | |
344 | { | |
345 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK | |
346 | || (kind != PTHREAD_MUTEX_ERRORCHECK_NP | |
347 | && kind != PTHREAD_MUTEX_RECURSIVE_NP)); | |
348 | /* ESRCH can happen only for non-robust PI mutexes where | |
349 | the owner of the lock died. */ | |
350 | assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust); | |
351 | ||
352 | /* Delay the thread indefinitely. */ | |
353 | while (1) | |
b894c2ea | 354 | pause_not_cancel (); |
df47504c UD |
355 | } |
356 | ||
357 | oldval = mutex->__data.__lock; | |
358 | ||
359 | assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); | |
360 | } | |
361 | ||
362 | if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0)) | |
363 | { | |
364 | atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); | |
365 | ||
366 | /* We got the mutex. */ | |
367 | mutex->__data.__count = 1; | |
368 | /* But it is inconsistent unless marked otherwise. */ | |
369 | mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; | |
370 | ||
371 | ENQUEUE_MUTEX_PI (mutex); | |
372 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); | |
373 | ||
374 | /* Note that we deliberately exit here. If we fall | |
375 | through to the end of the function __nusers would be | |
376 | incremented which is not correct because the old owner | |
377 | has to be discounted. If we are not supposed to | |
378 | increment __nusers we actually have to decrement it here. */ | |
379 | #ifdef NO_INCR | |
380 | --mutex->__data.__nusers; | |
381 | #endif | |
382 | ||
383 | return EOWNERDEAD; | |
384 | } | |
385 | ||
386 | if (robust | |
387 | && __builtin_expect (mutex->__data.__owner | |
388 | == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) | |
389 | { | |
390 | /* This mutex is now not recoverable. */ | |
391 | mutex->__data.__count = 0; | |
392 | ||
393 | INTERNAL_SYSCALL_DECL (__err); | |
394 | INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock, | |
efac1fce | 395 | __lll_private_flag (FUTEX_UNLOCK_PI, |
6de79a49 | 396 | PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), |
efac1fce | 397 | 0, 0); |
df47504c UD |
398 | |
399 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); | |
400 | return ENOTRECOVERABLE; | |
401 | } | |
402 | ||
403 | mutex->__data.__count = 1; | |
404 | if (robust) | |
405 | { | |
406 | ENQUEUE_MUTEX_PI (mutex); | |
407 | THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); | |
408 | } | |
409 | } | |
410 | break; | |
411 | ||
f17efcb4 UD |
412 | case PTHREAD_MUTEX_PP_RECURSIVE_NP: |
413 | case PTHREAD_MUTEX_PP_ERRORCHECK_NP: | |
414 | case PTHREAD_MUTEX_PP_NORMAL_NP: | |
415 | case PTHREAD_MUTEX_PP_ADAPTIVE_NP: | |
416 | { | |
417 | int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP; | |
418 | ||
419 | oldval = mutex->__data.__lock; | |
420 | ||
421 | /* Check whether we already hold the mutex. */ | |
422 | if (mutex->__data.__owner == id) | |
423 | { | |
424 | if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) | |
425 | return EDEADLK; | |
426 | ||
427 | if (kind == PTHREAD_MUTEX_RECURSIVE_NP) | |
428 | { | |
429 | /* Just bump the counter. */ | |
430 | if (__builtin_expect (mutex->__data.__count + 1 == 0, 0)) | |
431 | /* Overflow of the counter. */ | |
432 | return EAGAIN; | |
433 | ||
434 | ++mutex->__data.__count; | |
435 | ||
436 | return 0; | |
437 | } | |
438 | } | |
439 | ||
440 | int oldprio = -1, ceilval; | |
441 | do | |
442 | { | |
443 | int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) | |
444 | >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; | |
445 | ||
446 | if (__pthread_current_priority () > ceiling) | |
447 | { | |
448 | if (oldprio != -1) | |
449 | __pthread_tpp_change_priority (oldprio, -1); | |
450 | return EINVAL; | |
451 | } | |
452 | ||
6de79a49 | 453 | int retval = __pthread_tpp_change_priority (oldprio, ceiling); |
f17efcb4 UD |
454 | if (retval) |
455 | return retval; | |
456 | ||
457 | ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; | |
458 | oldprio = ceiling; | |
459 | ||
460 | oldval | |
052757bf | 461 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
f17efcb4 UD |
462 | #ifdef NO_INCR |
463 | ceilval | 2, | |
464 | #else | |
465 | ceilval | 1, | |
466 | #endif | |
467 | ceilval); | |
468 | ||
469 | if (oldval == ceilval) | |
470 | break; | |
471 | ||
472 | do | |
473 | { | |
474 | oldval | |
052757bf | 475 | = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
f17efcb4 UD |
476 | ceilval | 2, |
477 | ceilval | 1); | |
478 | ||
479 | if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) | |
480 | break; | |
481 | ||
482 | if (oldval != ceilval) | |
835abc5c | 483 | lll_futex_wait (&mutex->__data.__lock, ceilval | 2, |
5bd8a249 | 484 | PTHREAD_MUTEX_PSHARED (mutex)); |
f17efcb4 | 485 | } |
052757bf | 486 | while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, |
f17efcb4 UD |
487 | ceilval | 2, ceilval) |
488 | != ceilval); | |
489 | } | |
490 | while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); | |
491 | ||
492 | assert (mutex->__data.__owner == 0); | |
493 | mutex->__data.__count = 1; | |
494 | } | |
495 | break; | |
496 | ||
dcc73a8d UD |
497 | default: |
498 | /* Correct code cannot set any other type. */ | |
499 | return EINVAL; | |
76a50749 UD |
500 | } |
501 | ||
3892d906 | 502 | /* Record the ownership. */ |
3892d906 UD |
503 | mutex->__data.__owner = id; |
504 | #ifndef NO_INCR | |
505 | ++mutex->__data.__nusers; | |
506 | #endif | |
507 | ||
5acf7263 RM |
508 | LIBC_PROBE (mutex_acquired, 1, mutex); |
509 | ||
6de79a49 | 510 | return 0; |
76a50749 | 511 | } |
69431c9a | 512 | #ifndef __pthread_mutex_lock |
76a50749 | 513 | strong_alias (__pthread_mutex_lock, pthread_mutex_lock) |
4d17e683 | 514 | hidden_def (__pthread_mutex_lock) |
69431c9a | 515 | #endif |
b0948ffd UD |
516 | |
517 | ||
518 | #ifdef NO_INCR | |
519 | void | |
520 | __pthread_mutex_cond_lock_adjust (mutex) | |
521 | pthread_mutex_t *mutex; | |
522 | { | |
523 | assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); | |
524 | assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); | |
525 | assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); | |
526 | ||
527 | /* Record the ownership. */ | |
528 | pid_t id = THREAD_GETMEM (THREAD_SELF, tid); | |
529 | mutex->__data.__owner = id; | |
530 | ||
531 | if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) | |
532 | ++mutex->__data.__count; | |
533 | } | |
534 | #endif |